function_name
stringlengths 1
63
| docstring
stringlengths 50
5.89k
| masked_code
stringlengths 50
882k
| implementation
stringlengths 169
12.9k
| start_line
int32 1
14.6k
| end_line
int32 16
14.6k
| file_content
stringlengths 274
882k
|
---|---|---|---|---|---|---|
tns_close_short_pos
|
事务平空单仓位
1.来源自止损止盈平仓
2.来源自换仓
逻辑: 如果当前账号昨仓满足平仓数量,直接平仓,如果不满足,则创建锁仓网格.
:param 平仓网格
:return:
|
""""""
import os
import uuid
import bz2
import pickle
import traceback
import zlib
from abc import ABC
from copy import copy,deepcopy
from typing import Any, Callable
from logging import INFO, ERROR
from datetime import datetime
from vnpy.trader.constant import Interval, Direction, Offset, Status, OrderType, Color, Exchange
from vnpy.trader.object import BarData, TickData, OrderData, TradeData
from vnpy.trader.utility import virtual, append_data, extract_vt_symbol, get_underlying_symbol
from .base import StopOrder, EngineType
from vnpy.component.cta_grid_trade import CtaGrid, CtaGridTrade, LOCK_GRID
from vnpy.component.cta_position import CtaPosition
from vnpy.component.cta_policy import CtaPolicy # noqa
class CtaTemplate(ABC):
"""CTA策略模板"""
author = ""
parameters = []
variables = []
# 保存委托单编号和相关委托单的字典
# key为委托单编号
# value为该合约相关的委托单
active_orders = {}
def __init__(
self,
cta_engine: Any,
strategy_name: str,
vt_symbol: str,
setting: dict,
):
""""""
self.cta_engine = cta_engine
self.strategy_name = strategy_name
self.vt_symbol = vt_symbol
self.inited = False # 是否初始化完毕
self.trading = False # 是否开始交易
self.pos = 0 # 持仓/仓差
self.entrust = 0 # 是否正在委托, 0, 无委托 , 1, 委托方向是LONG, -1, 委托方向是SHORT
self.tick_dict = {} # 记录所有on_tick传入最新tick
self.active_orders = {}
# Copy a new variables list here to avoid duplicate insert when multiple
# strategy instances are created with the same strategy class.
self.variables = copy(self.variables)
self.variables.insert(0, "inited")
self.variables.insert(1, "trading")
self.variables.insert(2, "pos")
self.variables.insert(3, "entrust")
def update_setting(self, setting: dict):
"""
Update strategy parameter wtih value in setting dict.
"""
for name in self.parameters:
if name in setting:
setattr(self, name, setting[name])
@classmethod
def get_class_parameters(cls):
"""
Get default parameters dict of strategy class.
"""
class_parameters = {}
for name in cls.parameters:
class_parameters[name] = getattr(cls, name)
return class_parameters
def get_parameters(self):
"""
Get strategy parameters dict.
"""
strategy_parameters = {}
for name in self.parameters:
strategy_parameters[name] = getattr(self, name)
return strategy_parameters
def get_variables(self):
"""
Get strategy variables dict.
"""
strategy_variables = {}
for name in self.variables:
strategy_variables[name] = getattr(self, name)
return strategy_variables
def get_data(self):
"""
Get strategy data.
"""
strategy_data = {
"strategy_name": self.strategy_name,
"vt_symbol": self.vt_symbol,
"class_name": self.__class__.__name__,
"author": self.author,
"parameters": self.get_parameters(),
"variables": self.get_variables(),
}
return strategy_data
def get_positions(self):
""" 返回持仓数量"""
pos_list = []
if self.pos > 0:
pos_list.append({
"vt_symbol": self.vt_symbol,
"direction": "long",
"volume": self.pos
})
elif self.pos < 0:
pos_list.append({
"vt_symbol": self.vt_symbol,
"direction": "short",
"volume": abs(self.pos)
})
return pos_list
@virtual
def on_timer(self):
pass
@virtual
def on_init(self):
"""
Callback when strategy is inited.
"""
pass
@virtual
def on_start(self):
"""
Callback when strategy is started.
"""
pass
@virtual
def on_stop(self):
"""
Callback when strategy is stopped.
"""
pass
@virtual
def on_tick(self, tick: TickData):
"""
Callback of new tick data update.
"""
pass
@virtual
def on_bar(self, bar: BarData):
"""
Callback of new bar data update.
"""
pass
@virtual
def on_trade(self, trade: TradeData):
"""
Callback of new trade data update.
"""
pass
@virtual
def on_order(self, order: OrderData):
"""
Callback of new order data update.
"""
pass
@virtual
def on_stop_order(self, stop_order: StopOrder):
"""
Callback of stop order update.
"""
pass
def buy(self, price: float, volume: float, stop: bool = False, lock: bool = False,
vt_symbol: str = '', order_type: OrderType = OrderType.LIMIT,
order_time: datetime = None, grid: CtaGrid = None):
"""
Send buy order to open a long position.
"""
if order_type in [OrderType.FAK, OrderType.FOK]:
if self.is_upper_limit(vt_symbol):
self.write_error(u'涨停价不做FAK/FOK委托')
return []
if volume == 0:
self.write_error(f'委托数量有误,必须大于0,{vt_symbol}, price:{price}')
return []
return self.send_order(vt_symbol=vt_symbol,
direction=Direction.LONG,
offset=Offset.OPEN,
price=price,
volume=volume,
stop=stop,
lock=lock,
order_type=order_type,
order_time=order_time,
grid=grid)
def sell(self, price: float, volume: float, stop: bool = False, lock: bool = False,
vt_symbol: str = '', order_type: OrderType = OrderType.LIMIT,
order_time: datetime = None, grid: CtaGrid = None):
"""
Send sell order to close a long position.
"""
if order_type in [OrderType.FAK, OrderType.FOK]:
if self.is_lower_limit(vt_symbol):
self.write_error(u'跌停价不做FAK/FOK sell委托')
return []
if volume == 0:
self.write_error(f'委托数量有误,必须大于0,{vt_symbol}, price:{price}')
return []
return self.send_order(vt_symbol=vt_symbol,
direction=Direction.SHORT,
offset=Offset.CLOSE,
price=price,
volume=volume,
stop=stop,
lock=lock,
order_type=order_type,
order_time=order_time,
grid=grid)
def short(self, price: float, volume: float, stop: bool = False, lock: bool = False,
vt_symbol: str = '', order_type: OrderType = OrderType.LIMIT,
order_time: datetime = None, grid: CtaGrid = None):
"""
Send short order to open as short position.
"""
if order_type in [OrderType.FAK, OrderType.FOK]:
if self.is_lower_limit(vt_symbol):
self.write_error(u'跌停价不做FAK/FOK short委托')
return []
if volume == 0:
self.write_error(f'委托数量有误,必须大于0,{vt_symbol}, price:{price}')
return []
return self.send_order(vt_symbol=vt_symbol,
direction=Direction.SHORT,
offset=Offset.OPEN,
price=price,
volume=volume,
stop=stop,
lock=lock,
order_type=order_type,
order_time=order_time,
grid=grid)
def cover(self, price: float, volume: float, stop: bool = False, lock: bool = False,
vt_symbol: str = '', order_type: OrderType = OrderType.LIMIT,
order_time: datetime = None, grid: CtaGrid = None):
"""
Send cover order to close a short position.
"""
if order_type in [OrderType.FAK, OrderType.FOK]:
if self.is_upper_limit(vt_symbol):
self.write_error(u'涨停价不做FAK/FOK cover委托')
return []
if volume == 0:
self.write_error(f'委托数量有误,必须大于0,{vt_symbol}, price:{price}')
return []
return self.send_order(vt_symbol=vt_symbol,
direction=Direction.LONG,
offset=Offset.CLOSE,
price=price,
volume=volume,
stop=stop,
lock=lock,
order_type=order_type,
order_time=order_time,
grid=grid)
def send_order(
self,
vt_symbol: str,
direction: Direction,
offset: Offset,
price: float,
volume: float,
stop: bool = False,
lock: bool = False,
order_type: OrderType = OrderType.LIMIT,
order_time: datetime = None,
grid: CtaGrid = None
):
"""
Send a new order.
"""
# 兼容cta_strategy的模板,缺省不指定vt_symbol时,使用策略配置的vt_symbol
if vt_symbol == '':
vt_symbol = self.vt_symbol
if not self.trading:
self.write_log(f'非交易状态')
return []
vt_orderids = self.cta_engine.send_order(
strategy=self,
vt_symbol=vt_symbol,
direction=direction,
offset=offset,
price=price,
volume=volume,
stop=stop,
lock=lock,
order_type=order_type
)
if len(vt_orderids) == 0:
self.write_error(f'{self.strategy_name}调用cta_engine.send_order委托返回失败,vt_symbol:{vt_symbol}')
# f',direction:{direction.value},offset:{offset.value},'
# f'price:{price},volume:{volume},stop:{stop},lock:{lock},'
# f'order_type:{order_type}')
if order_time is None:
order_time = datetime.now()
for vt_orderid in vt_orderids:
d = {
'direction': direction,
'offset': offset,
'vt_symbol': vt_symbol,
'price': price,
'volume': volume,
'order_type': order_type,
'traded': 0,
'order_time': order_time,
'status': Status.SUBMITTING
}
if grid:
d.update({'grid': grid})
grid.order_ids.append(vt_orderid)
self.active_orders.update({vt_orderid: d})
if direction == Direction.LONG:
self.entrust = 1
elif direction == Direction.SHORT:
self.entrust = -1
return vt_orderids
def cancel_order(self, vt_orderid: str):
"""
Cancel an existing order.
"""
if self.trading:
return self.cta_engine.cancel_order(self, vt_orderid)
return False
def cancel_all(self):
"""
Cancel all orders sent by strategy.
"""
if self.trading:
self.cta_engine.cancel_all(self)
def is_upper_limit(self, symbol):
"""是否涨停"""
tick = self.tick_dict.get(symbol, None)
if tick is None or tick.limit_up is None or tick.limit_up == 0:
return False
if tick.bid_price_1 == tick.limit_up:
return True
def is_lower_limit(self, symbol):
"""是否跌停"""
tick = self.tick_dict.get(symbol, None)
if tick is None or tick.limit_down is None or tick.limit_down == 0:
return False
if tick.ask_price_1 == tick.limit_down:
return True
def write_log(self, msg: str, level: int = INFO):
"""
Write a log message.
"""
self.cta_engine.write_log(msg=msg, strategy_name=self.strategy_name, level=level)
def write_error(self, msg: str):
"""write error log message"""
self.write_log(msg=msg, level=ERROR)
def get_engine_type(self):
"""
Return whether the cta_engine is backtesting or live trading.
"""
return self.cta_engine.get_engine_type()
def load_bar(
self,
days: int,
interval: Interval = Interval.MINUTE,
callback: Callable = None,
interval_num: int = 1
):
"""
Load historical bar data for initializing strategy.
"""
if not callback:
callback = self.on_bar
self.cta_engine.load_bar(self.vt_symbol, days, interval, callback, interval_num)
def load_tick(self, days: int):
"""
Load historical tick data for initializing strategy.
"""
self.cta_engine.load_tick(self.vt_symbol, days, self.on_tick)
def put_event(self):
"""
Put an strategy data event for ui update.
"""
if self.inited:
self.cta_engine.put_strategy_event(self)
def send_email(self, msg):
"""
Send email to default receiver.
"""
if self.inited:
self.cta_engine.send_email(msg, self)
def sync_data(self):
"""
Sync strategy variables value into disk storage.
"""
if self.trading:
self.cta_engine.sync_strategy_data(self)
class CtaSignal(ABC):
""""""
def __init__(self):
""""""
self.signal_pos = 0
@virtual
def on_tick(self, tick: TickData):
"""
Callback of new tick data update.
"""
pass
@virtual
def on_bar(self, bar: BarData):
"""
Callback of new bar data update.
"""
pass
def set_signal_pos(self, pos):
""""""
self.signal_pos = pos
def get_signal_pos(self):
""""""
return self.signal_pos
class TargetPosTemplate(CtaTemplate):
""""""
tick_add = 1
last_tick = None
last_bar = None
target_pos = 0
vt_orderids = []
def __init__(self, cta_engine, strategy_name, vt_symbol, setting):
""""""
super(TargetPosTemplate, self).__init__(
cta_engine, strategy_name, vt_symbol, setting
)
self.variables.append("target_pos")
@virtual
def on_tick(self, tick: TickData):
"""
Callback of new tick data update.
"""
self.last_tick = tick
if self.trading:
self.trade()
@virtual
def on_bar(self, bar: BarData):
"""
Callback of new bar data update.
"""
self.last_bar = bar
@virtual
def on_order(self, order: OrderData):
"""
Callback of new order data update.
"""
vt_orderid = order.vt_orderid
if not order.is_active() and vt_orderid in self.vt_orderids:
self.vt_orderids.remove(vt_orderid)
def set_target_pos(self, target_pos):
""""""
self.target_pos = target_pos
self.trade()
def trade(self):
""""""
self.cancel_all()
pos_change = self.target_pos - self.pos
if not pos_change:
return
long_price = 0
short_price = 0
if self.last_tick:
if pos_change > 0:
long_price = self.last_tick.ask_price_1 + self.tick_add
if self.last_tick.limit_up:
long_price = min(long_price, self.last_tick.limit_up)
else:
short_price = self.last_tick.bid_price_1 - self.tick_add
if self.last_tick.limit_down:
short_price = max(short_price, self.last_tick.limit_down)
else:
if pos_change > 0:
long_price = self.last_bar.close_price + self.tick_add
else:
short_price = self.last_bar.close_price - self.tick_add
if self.get_engine_type() == EngineType.BACKTESTING:
if pos_change > 0:
vt_orderids = self.buy(long_price, abs(pos_change))
else:
vt_orderids = self.short(short_price, abs(pos_change))
self.vt_orderids.extend(vt_orderids)
else:
if self.vt_orderids:
return
if pos_change > 0:
if self.pos < 0:
if pos_change < abs(self.pos):
vt_orderids = self.cover(long_price, pos_change)
else:
vt_orderids = self.cover(long_price, abs(self.pos))
else:
vt_orderids = self.buy(long_price, abs(pos_change))
else:
if self.pos > 0:
if abs(pos_change) < self.pos:
vt_orderids = self.sell(short_price, abs(pos_change))
else:
vt_orderids = self.sell(short_price, abs(self.pos))
else:
vt_orderids = self.short(short_price, abs(pos_change))
self.vt_orderids.extend(vt_orderids)
class CtaProTemplate(CtaTemplate):
"""
增强模板
"""
idx_symbol = None # 指数合约
exchange = Exchange.LOCAL
price_tick = 1 # 商品的最小价格跳动
symbol_size = 10 # 商品得合约乘数
margin_rate = 0.1 # 商品的保证金
# 委托类型
order_type = OrderType.LIMIT
cancel_seconds = 120 # 撤单时间(秒)
# 资金相关
max_invest_rate = 0.1 # 最大仓位(0~1)
max_invest_margin = 0 # 资金上限 0,不限制
max_invest_pos = 0 # 单向头寸数量上限 0,不限制
# 是否回测状态
backtesting = False
# 逻辑过程日志
dist_fieldnames = ['datetime', 'symbol', 'volume', 'price',
'operation', 'signal', 'stop_price', 'target_price',
'long_pos', 'short_pos']
def __init__(self, cta_engine, strategy_name, vt_symbol, setting):
""""""
self.position = None # 仓位组件
self.policy = None # 事务执行组件
self.gt = None # 网格交易组件
self.klines = {} # K线组件字典: kline_name: kline
self.cur_datetime = None # 当前Tick时间
self.cur_mi_tick = None # 最新的主力合约tick( vt_symbol)
self.cur_99_tick = None # 最新得指数合约tick( idx_symbol)
self.cur_mi_price = None # 当前价(主力合约 vt_symbol)
self.cur_99_price = None # 当前价(tick时,根据tick更新,onBar回测时,根据bar.close更新)
self.last_minute = None # 最后的分钟,用于on_tick内每分钟处理的逻辑
super(CtaProTemplate, self).__init__(
cta_engine, strategy_name, vt_symbol, setting
)
# 增加仓位管理模块
self.position = CtaPosition(strategy=self)
# 增加网格持久化模块
self.gt = CtaGridTrade(strategy=self)
# 增加指数合约
if 'idx_symbol' not in self.parameters:
self.parameters.append('idx_symbol')
if 'backtesting' not in self.parameters:
self.parameters.append('backtesting')
def update_setting(self, setting: dict):
"""
Update strategy parameter wtih value in setting dict.
"""
for name in self.parameters:
if name in setting:
setattr(self, name, setting[name])
symbol, self.exchange = extract_vt_symbol(self.vt_symbol)
if self.idx_symbol is None:
self.idx_symbol = get_underlying_symbol(symbol).upper() + '99.' + self.exchange.value
if self.vt_symbol != self.idx_symbol:
self.write_log(f'指数合约:{self.idx_symbol}, 主力合约:{self.vt_symbol}')
self.price_tick = self.cta_engine.get_price_tick(self.vt_symbol)
self.symbol_size = self.cta_engine.get_size(self.vt_symbol)
self.margin_rate = self.cta_engine.get_margin_rate(self.vt_symbol)
def sync_data(self):
"""同步更新数据"""
if not self.backtesting:
self.write_log(u'保存k线缓存数据')
self.save_klines_to_cache()
if self.inited and self.trading:
self.write_log(u'保存policy数据')
self.policy.save()
def save_klines_to_cache(self, kline_names: list = []):
"""
保存K线数据到缓存
:param kline_names: 一般为self.klines的keys
:return:
"""
if len(kline_names) == 0:
kline_names = list(self.klines.keys())
# 获取保存路径
save_path = self.cta_engine.get_data_path()
# 保存缓存的文件名
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_klines.pkb2'))
with bz2.BZ2File(file_name, 'wb') as f:
klines = {}
for kline_name in kline_names:
kline = self.klines.get(kline_name, None)
# if kline:
# kline.strategy = None
# kline.cb_on_bar = None
klines.update({kline_name: kline})
pickle.dump(klines, f)
def load_klines_from_cache(self, kline_names: list = []):
"""
从缓存加载K线数据
:param kline_names:
:return:
"""
if len(kline_names) == 0:
kline_names = list(self.klines.keys())
save_path = self.cta_engine.get_data_path()
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_klines.pkb2'))
try:
last_bar_dt = None
with bz2.BZ2File(file_name, 'rb') as f:
klines = pickle.load(f)
# 逐一恢复K线
for kline_name in kline_names:
# 缓存的k线实例
cache_kline = klines.get(kline_name, None)
# 当前策略实例的K线实例
strategy_kline = self.klines.get(kline_name, None)
if cache_kline and strategy_kline:
# 临时保存当前的回调函数
cb_on_bar = strategy_kline.cb_on_bar
# 缓存实例数据 =》 当前实例数据
strategy_kline.__dict__.update(cache_kline.__dict__)
kline_first_bar_dt = None
kline_last_bar_dt = None
if len(strategy_kline.line_bar) > 0:
kline_first_bar_dt = strategy_kline.line_bar[0].datetime
kline_last_bar_dt = strategy_kline.line_bar[-1].datetime
# 所有K线的最后时间
if last_bar_dt and strategy_kline.cur_datetime:
last_bar_dt = max(last_bar_dt, strategy_kline.cur_datetime)
else:
last_bar_dt = strategy_kline.cur_datetime
# 重新绑定k线策略与on_bar回调函数
strategy_kline.strategy = self
strategy_kline.cb_on_bar = cb_on_bar
self.write_log(f'恢复{kline_name}缓存数据:[{kline_first_bar_dt}] => [{kline_last_bar_dt}], bar结束时间:{last_bar_dt}')
self.write_log(u'加载缓存k线数据完毕')
return last_bar_dt
except Exception as ex:
self.write_error(f'加载缓存K线数据失败:{str(ex)}')
return None
def get_klines_snapshot(self):
"""返回当前klines的切片数据"""
try:
d = {
'strategy': self.strategy_name,
'datetime': datetime.now()}
klines = {}
for kline_name in sorted(self.klines.keys()):
klines.update({kline_name: self.klines.get(kline_name).get_data()})
kline_names = list(klines.keys())
binary_data = zlib.compress(pickle.dumps(klines))
d.update({'kline_names': kline_names, 'klines': binary_data, 'zlib': True})
return d
except Exception as ex:
self.write_error(f'获取klines切片数据失败:{str(ex)}')
return {}
def init_position(self):
"""
初始化Positin
使用网格的持久化,获取开仓状态的多空单,更新
:return:
"""
self.write_log(u'init_position(),初始化持仓')
pos_symbols = set()
remove_ids = []
if len(self.gt.up_grids) <= 0:
self.position.short_pos = 0
# 加载已开仓的空单数据,网格JSON
short_grids = self.gt.load(direction=Direction.SHORT, open_status_filter=[True])
if len(short_grids) == 0:
self.write_log(u'没有持久化的空单数据')
self.gt.up_grids = []
else:
self.gt.up_grids = short_grids
for sg in short_grids:
if len(sg.order_ids) > 0 or sg.order_status:
self.write_log(f'重置委托状态:{sg.order_status},清除委托单:{sg.order_ids}')
sg.order_status = False
sg.order_ids = []
short_symbol = sg.snapshot.get('mi_symbol', self.vt_symbol)
if sg.traded_volume > 0:
if sg.open_status and sg.volume== sg.traded_volume:
msg = f'{self.strategy_name} {short_symbol}空单持仓{sg.volume},已成交:{sg.traded_volume},不加载'
self.write_log(msg)
self.send_wechat(msg)
remove_ids.append(sg.id)
continue
pos_symbols.add(short_symbol)
self.write_log(u'加载持仓空单[ID:{},vt_symbol:{},价格:{}],[指数:{},价格:{}],数量:{}手'
.format(sg.id, short_symbol, sg.snapshot.get('open_price'),
self.idx_symbol, sg.open_price, sg.volume))
self.position.short_pos -= sg.volume
self.write_log(u'持久化空单,共持仓:{}手'.format(abs(self.position.short_pos)))
if len(remove_ids) > 0:
self.gt.remove_grids_by_ids(direction=Direction.SHORT,ids=remove_ids)
remove_ids = []
if len(self.gt.dn_grids) <= 0:
# 加载已开仓的多数据,网格JSON
self.position.long_pos = 0
long_grids = self.gt.load(direction=Direction.LONG, open_status_filter=[True])
if len(long_grids) == 0:
self.write_log(u'没有持久化的多单数据')
self.gt.dn_grids = []
else:
self.gt.dn_grids = long_grids
for lg in long_grids:
if len(lg.order_ids) > 0 or lg.order_status:
self.write_log(f'重置委托状态:{lg.order_status},清除委托单:{lg.order_ids}')
lg.order_status = False
lg.order_ids = []
# lg.type = self.line.name
long_symbol = lg.snapshot.get('mi_symbol', self.vt_symbol)
if lg.traded_volume > 0:
if lg.open_status and lg.volume == lg.traded_volume:
msg = f'{self.strategy_name} {long_symbol}多单持仓{lg.volume},已成交:{lg.traded_volume},不加载'
self.write_log(msg)
self.send_wechat(msg)
remove_ids.append(lg.id)
continue
pos_symbols.add(long_symbol)
self.write_log(u'加载持仓多单[ID:{},vt_symbol:{},价格:{}],[指数{},价格:{}],数量:{}手'
.format(lg.id, long_symbol, lg.snapshot.get('open_price'),
self.idx_symbol, lg.open_price, lg.volume))
self.position.long_pos += lg.volume
self.write_log(f'持久化多单,共持仓:{self.position.long_pos}手')
if len(remove_ids) > 0:
self.gt.remove_grids_by_ids(direction=Direction.LONG,ids=remove_ids)
self.position.pos = self.position.long_pos + self.position.short_pos
self.write_log(u'{}加载持久化数据完成,多单:{},空单:{},共:{}手'
.format(self.strategy_name,
self.position.long_pos,
abs(self.position.short_pos),
self.position.pos))
self.pos = self.position.pos
self.gt.save()
self.display_grids()
#if not self.backtesting:
if len(self.vt_symbol) > 0 and self.vt_symbol not in pos_symbols:
pos_symbols.add(self.vt_symbol)
if self.idx_symbol and self.idx_symbol not in pos_symbols:
pos_symbols.add(self.idx_symbol)
# 如果持仓的合约,不在self.vt_symbol中,需要订阅
for symbol in list(pos_symbols):
self.write_log(f'新增订阅合约:{symbol}')
self.cta_engine.subscribe_symbol(strategy_name=self.strategy_name, vt_symbol=symbol)
def get_positions(self):
"""
获取策略当前持仓(重构,使用主力合约)
:return: [{'vt_symbol':symbol,'direction':direction,'volume':volume]
"""
if not self.position:
return []
pos_list = []
if self.position.long_pos > 0:
for g in self.gt.get_opened_grids(direction=Direction.LONG):
vt_symbol = g.snapshot.get('mi_symbol', g.vt_symbol if g.vt_symbol and '99' not in g.vt_symbol else self.vt_symbol)
open_price = g.snapshot.get('open_price', g.open_price)
pos_list.append({'vt_symbol': vt_symbol,
'direction': 'long',
'volume': g.volume - g.traded_volume,
'price': open_price})
if abs(self.position.short_pos) > 0:
for g in self.gt.get_opened_grids(direction=Direction.SHORT):
vt_symbol = g.snapshot.get('mi_symbol', g.vt_symbol if g.vt_symbol and '99' not in g.vt_symbol else self.vt_symbol)
open_price = g.snapshot.get('open_price', g.open_price)
pos_list.append({'vt_symbol': vt_symbol,
'direction': 'short',
'volume': abs(g.volume - g.traded_volume),
'price': open_price})
if self.cur_datetime and (datetime.now() - self.cur_datetime).total_seconds() < 10:
self.write_log(u'当前持仓:{}'.format(pos_list))
return pos_list
def get_policy_json(self):
"""获取policy的json格式数据"""
if not self.policy:
return None
data = self.policy.to_json()
return data
def get_grid_trade_json(self):
"""获取gt组件的json格式数据"""
if not self.gt:
return None
data = self.gt.to_json()
return data
def tns_cancel_logic(self, dt, force=False):
"撤单逻辑"""
if len(self.active_orders) < 1:
self.entrust = 0
return
for vt_orderid in list(self.active_orders.keys()):
order_info = self.active_orders.get(vt_orderid)
order_grid = order_info.get('grid',None)
if order_info.get('status', None) in [Status.CANCELLED, Status.REJECTED]:
self.active_orders.pop(vt_orderid, None)
continue
order_time = order_info.get('order_time')
over_ms = (dt - order_time).total_seconds()
# 白天开盘或许有指数与真实tick的时间延迟,这个时刻不做撤单功能
if f'{dt.hour}:{dt.minute}' in ['10:30', '13:30']:
continue
if (over_ms > self.cancel_seconds) \
or force: # 超过设置的时间还未成交
self.write_log(f'{dt}, 超时{over_ms}秒未成交,取消委托单:{order_info}')
if self.cancel_order(vt_orderid):
order_info.update({'status': Status.CANCELLING})
else:
order_info.update({'status': Status.CANCELLED})
if order_grid:
if vt_orderid in order_grid.order_ids:
order_grid.order_ids.remove(vt_orderid)
if len(order_grid.order_ids) == 0:
order_grid.order_status = False
if len(self.active_orders) < 1:
self.entrust = 0
def tns_switch_long_pos(self, open_new=True):
"""
切换合约,从持仓的非主力合约,切换至主力合约
:param open_new: 是否开仓主力合约
:return:
"""
if self.entrust != 0:
return
if self.position.long_pos == 0:
return
if self.cur_mi_price == 0:
return
none_mi_grid = None
none_mi_symbol = None
self.write_log(f'持仓换月=>启动.')
# 找出非主力合约的持仓网格
for g in self.gt.get_opened_grids(direction=Direction.LONG):
none_mi_symbol = g.snapshot.get('mi_symbol', g.vt_symbol)
# 如果持仓的合约,跟策略配置的vt_symbol一致,则不处理
if none_mi_symbol is None or none_mi_symbol == self.vt_symbol:
self.write_log(f'none_mi_symbol:{none_mi_symbol}, vt_symbol:{self.vt_symbol} 一致,不处理')
continue
# 如果未开仓,或者处于委托状态,或者已交易完毕,不处理
if not g.open_status or g.order_status or g.volume - g.traded_volume <= 0:
self.write_log(f'开仓状态:{g.open_status}, 委托状态:{g.order_status},网格持仓:{g.volume} ,已交易数量:{g.traded_volume}, 不处理')
continue
none_mi_grid = g
if g.traded_volume > 0 and g.volume - g.traded_volume > 0:
g.volume -= g.traded_volume
g.traded_volume = 0
break
if none_mi_grid is None:
return
self.write_log(f'持仓换月=>找到多单持仓:{none_mi_symbol},持仓数量:{none_mi_grid.volume}')
# 找到行情中非主力合约/主力合约的最新价
none_mi_tick = self.tick_dict.get(none_mi_symbol)
mi_tick = self.tick_dict.get(self.vt_symbol, None)
if none_mi_tick is None or mi_tick is None:
return
# 如果涨停价,不做卖出
if self.is_upper_limit(none_mi_symbol) or self.is_upper_limit(self.vt_symbol):
self.write_log(f'{none_mi_symbol} 或 {self.vt_symbol} 为涨停价,不做换仓')
return
none_mi_price = max(none_mi_tick.last_price, none_mi_tick.bid_price_1)
grid = deepcopy(none_mi_grid)
grid.id = str(uuid.uuid1())
grid.open_status = False
self.write_log(f'持仓换月=>复制持仓信息{none_mi_symbol},ID:{none_mi_grid.id} => {self.vt_symbol},ID:{grid.id}')
# 委托卖出非主力合约
vt_orderids = self.sell(price=none_mi_price,
volume=none_mi_grid.volume,
vt_symbol=none_mi_symbol,
order_type=self.order_type,
grid=none_mi_grid)
if len(vt_orderids) > 0:
self.write_log(f'持仓换月=>委托卖出非主力合约{none_mi_symbol}持仓:{none_mi_grid.volume}')
# 已经发生过换月的,不执行买入新合约
if none_mi_grid.snapshot.get("switched", False):
self.write_log(f'持仓换月=>已经执行过换月,不再创建新的买入操作')
return
none_mi_grid.snapshot.update({'switched': True})
# 如果不买入新主力合约,直接返回
# 某些策略会自动重新开仓得
if not open_new:
self.write_log(f'不买入新的主力合约:{self.vt_symbol},数量:{grid.volume}')
self.gt.save()
return
# 添加买入主力合约
grid.snapshot.update({'mi_symbol': self.vt_symbol, 'open_price': self.cur_mi_price})
self.gt.dn_grids.append(grid)
vt_orderids = self.buy(price=self.cur_mi_price + 5 * self.price_tick,
volume=grid.volume,
vt_symbol=self.vt_symbol,
order_type=self.order_type,
grid=grid)
if len(vt_orderids) > 0:
self.write_log(u'持仓换月=>委托买入主力合约:{},价格:{},数量:{}'
.format(self.vt_symbol, self.cur_mi_price, grid.volume))
else:
self.write_error(f'持仓换月=>委托买入主力合约:{self.vt_symbol}失败')
self.gt.save()
else:
self.write_error(f'持仓换月=>委托卖出非主力合约:{none_mi_symbol}失败')
def tns_switch_short_pos(self,open_new=True):
"""
切换合约,从持仓的非主力合约,切换至主力合约
:param open_new: 是否开仓新得主力合约
:return:
"""
if self.entrust != 0:
return
if self.position.short_pos == 0:
return
if self.cur_mi_price == 0:
return
none_mi_grid = None
none_mi_symbol = None
# 找出非主力合约的持仓网格
for g in self.gt.get_opened_grids(direction=Direction.SHORT):
none_mi_symbol = g.snapshot.get('mi_symbol')
if none_mi_symbol is None or none_mi_symbol == self.vt_symbol:
continue
if not g.open_status or g.order_status or g.volume - g.traded_volume <= 0:
continue
none_mi_grid = g
if g.traded_volume > 0 and g.volume - g.traded_volume > 0:
g.volume -= g.traded_volume
g.traded_volume = 0
break
# 找不到与主力合约不一致的持仓网格
if none_mi_grid is None:
return
# 找到行情中非主力合约的最新价
none_mi_tick = self.tick_dict.get(none_mi_symbol)
mi_tick = self.tick_dict.get(self.vt_symbol, None)
if none_mi_tick is None or mi_tick is None:
return
# 如果跌停价,不做cover
if self.is_lower_limit(none_mi_symbol) or self.is_lower_limit(self.vt_symbol):
return
none_mi_price = max(none_mi_tick.last_price, none_mi_tick.bid_price_1)
grid = deepcopy(none_mi_grid)
grid.id = str(uuid.uuid1())
# 委托平空非主力合约
vt_orderids = self.cover(price=none_mi_price,
volume=none_mi_grid.volume,
vt_symbol=none_mi_symbol,
order_type=self.order_type,
grid=none_mi_grid)
if len(vt_orderids) > 0:
self.write_log(f'委托平空非主力合约{none_mi_symbol}持仓:{none_mi_grid.volume}')
# 已经发生过换月的,不执行开空新合约
if none_mi_grid.snapshot.get("switched", False):
self.write_log(f'已经执行过换月,不再创建新的空操作')
return
none_mi_grid.snapshot.update({'switched': True})
# 如果不开空新主力合约,直接返回
# 某些策略会自动重新开仓得
if not open_new:
self.write_log(f'不开空新的主力合约:{self.vt_symbol},数量:{grid.volume}')
self.gt.save()
return
# 添加卖出主力合约
grid.id = str(uuid.uuid1())
grid.snapshot.update({'mi_symbol': self.vt_symbol, 'open_price': self.cur_mi_price})
self.gt.up_grids.append(grid)
vt_orderids = self.short(price=self.cur_mi_price,
volume=grid.volume,
vt_symbol=self.vt_symbol,
order_type=self.order_type,
grid=grid)
if len(vt_orderids) > 0:
self.write_log(f'委托做空主力合约:{self.vt_symbol},价格:{self.cur_mi_price},数量:{grid.volume}')
else:
self.write_error(f'委托做空主力合约:{self.vt_symbol}失败')
self.gt.save()
else:
self.write_error(f'委托平空非主力合约:{none_mi_symbol}失败')
def display_grids(self):
"""更新网格显示信息"""
if not self.inited:
return
up_grids_info = self.gt.to_str(direction=Direction.SHORT)
if len(self.gt.up_grids) > 0:
self.write_log(up_grids_info)
dn_grids_info = self.gt.to_str(direction=Direction.LONG)
if len(self.gt.dn_grids) > 0:
self.write_log(dn_grids_info)
def display_tns(self):
"""显示事务的过程记录=》 log"""
if not self.inited:
return
self.write_log(u'{} 当前指数{}价格:{},当前主力{}价格:{}'
.format(self.cur_datetime,
self.idx_symbol, self.cur_99_price,
self.vt_symbol, self.cur_mi_price))
if hasattr(self, 'policy'):
policy = getattr(self, 'policy')
op = getattr(policy, 'to_json', None)
if callable(op):
self.write_log(u'当前Policy:{}'.format(policy.to_json()))
def save_dist(self, dist_data):
"""
保存策略逻辑过程记录=》 csv文件按
:param dist_data:
:return:
"""
if self.backtesting:
save_path = self.cta_engine.get_logs_path()
else:
save_path = self.cta_engine.get_data_path()
try:
if self.position and 'long_pos' not in dist_data:
dist_data.update({'long_pos': self.position.long_pos})
if self.position and 'short_pos' not in dist_data:
dist_data.update({'short_pos': self.position.short_pos})
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_dist.csv'))
append_data(file_name=file_name, dict_data=dist_data, field_names=self.dist_fieldnames)
except Exception as ex:
self.write_error(u'save_dist 异常:{} {}'.format(str(ex), traceback.format_exc()))
def save_tns(self, tns_data):
"""
保存多空事务记录=》csv文件,便于后续分析
:param tns_data: {"datetime":xxx, "direction":"long"或者"short", "price":xxx}
:return:
"""
if self.backtesting:
save_path = self.cta_engine.get_logs_path()
else:
save_path = self.cta_engine.get_data_path()
try:
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_tns.csv'))
append_data(file_name=file_name, dict_data=tns_data)
except Exception as ex:
self.write_error(u'save_tns 异常:{} {}'.format(str(ex), traceback.format_exc()))
def send_wechat(self, msg: str):
"""实盘时才发送微信"""
if self.backtesting:
return
self.cta_engine.send_wechat(msg=msg, strategy=self)
class CtaProFutureTemplate(CtaProTemplate):
"""期货交易增强版模板"""
activate_fak = False
activate_today_lock = False
def __init__(self, cta_engine, strategy_name, vt_symbol, setting):
""""""
super().__init__(cta_engine, strategy_name, vt_symbol, setting)
self.parameters.append('activate_fak')
self.parameters.append('activate_today_lock')
def update_setting(self, setting: dict):
"""更新配置参数"""
super().update_setting(setting)
# 实盘时,判断是否激活使用FAK模式
if not self.backtesting:
if self.activate_fak:
self.order_type = OrderType.FAK
def load_policy(self):
"""加载policy"""
if self.policy:
self.write_log(u'load_policy(),初始化Policy')
self.policy.load()
self.write_log(u'Policy:{}'.format(self.policy.to_json()))
def on_start(self):
"""启动策略(必须由用户继承实现)"""
self.write_log(u'启动')
self.trading = True
self.put_event()
def on_stop(self):
"""停止策略(必须由用户继承实现)"""
self.active_orders.clear()
self.pos = 0
self.entrust = 0
self.write_log(u'停止')
self.put_event()
def on_trade(self, trade: TradeData):
"""
交易更新
支持股指期货的对锁单或者解锁
:param trade:
:return:
"""
self.write_log(u'{},交易更新 =>{},\n 当前持仓:{} '
.format(self.cur_datetime,
trade.__dict__,
self.position.pos))
dist_record = dict()
if self.backtesting:
dist_record['datetime'] = trade.time
else:
dist_record['datetime'] = ' '.join([self.cur_datetime.strftime('%Y-%m-%d'), trade.time])
dist_record['volume'] = trade.volume
dist_record['price'] = trade.price
dist_record['symbol'] = trade.vt_symbol
# 处理股指锁单
if trade.exchange == Exchange.CFFEX and not self.backtesting:
if trade.direction == Direction.LONG:
if abs(self.position.short_pos) >= trade.volume:
self.position.short_pos += trade.volume
else:
self.position.long_pos += trade.volume
else:
if self.position.long_pos >= trade.volume:
self.position.long_pos -= trade.volume
else:
self.position.short_pos -= trade.volume
self.position.pos = self.position.long_pos + self.position.short_pos
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
else:
if trade.direction == Direction.LONG and trade.offset == Offset.OPEN:
dist_record['operation'] = 'buy'
self.position.open_pos(trade.direction, volume=trade.volume)
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
if trade.direction == Direction.SHORT and trade.offset == Offset.OPEN:
dist_record['operation'] = 'short'
self.position.open_pos(trade.direction, volume=trade.volume)
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
if trade.direction == Direction.LONG and trade.offset != Offset.OPEN:
dist_record['operation'] = 'cover'
self.position.close_pos(trade.direction, volume=trade.volume)
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
if trade.direction == Direction.SHORT and trade.offset != Offset.OPEN:
dist_record['operation'] = 'sell'
self.position.close_pos(trade.direction, volume=trade.volume)
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
self.save_dist(dist_record)
self.pos = self.position.pos
def fix_order(self, order: OrderData):
"""修正order被拆单得情况"""
order_info = self.active_orders.get(order.vt_orderid, None)
if order_info:
volume = order_info.get('volume')
if volume != order.volume:
self.write_log(f'修正order被拆单得情况,调整{order.vt_orderid} volume:{volume}=>{order.volume}')
order_info.update({'volume': order.volume})
def on_order(self, order: OrderData):
"""报单更新"""
# 未执行的订单中,存在是异常,删除
self.write_log(u'{}报单更新 => {}'.format(self.cur_datetime, order.__dict__))
# 修正order被拆单得情况"
self.fix_order(order)
if order.vt_orderid in self.active_orders:
active_order = self.active_orders[order.vt_orderid]
if order.volume == order.traded and order.status in [Status.ALLTRADED]:
self.on_order_all_traded(order)
#elif order.offset == Offset.OPEN and order.status in [Status.CANCELLED]:
# 这里 换成active_order的,因为原始order有可能被换成锁仓方式
elif active_order['offset'] == Offset.OPEN and order.status in [Status.CANCELLED]:
# 开仓委托单被撤销
self.on_order_open_canceled(order)
#elif order.offset != Offset.OPEN and order.status in [Status.CANCELLED]:
# # 这里 换成active_order的,因为原始order有可能被换成锁仓方式
elif active_order['offset'] != Offset.OPEN and order.status in [Status.CANCELLED]:
# 平仓委托单被撤销
self.on_order_close_canceled(order)
elif order.status == Status.REJECTED:
if active_order['offset'] == Offset.OPEN:
self.write_error(u'{}委托单开{}被拒,price:{},total:{},traded:{},status:{}'
.format(order.vt_symbol, order.direction, order.price, order.volume,
order.traded, order.status))
self.on_order_open_canceled(order)
else:
self.write_error(u'OnOrder({})委托单平{}被拒,price:{},total:{},traded:{},status:{}'
.format(order.vt_symbol, order.direction, order.price, order.volume,
order.traded, order.status))
self.on_order_close_canceled(order)
else:
self.write_log(u'委托单未完成,total:{},traded:{},tradeStatus:{}'
.format(order.volume, order.traded, order.status))
else:
self.write_error(u'委托单{}不在策略的未完成订单列表中:{}'.format(order.vt_orderid, self.active_orders))
def on_order_all_traded(self, order: OrderData):
"""
订单全部成交
:param order:
:return:
"""
self.write_log(u'报单更新 => 委托单全部完成:{}'.format(order.__dict__))
active_order = self.active_orders[order.vt_orderid]
# 通过vt_orderid,找到对应的网格
grid = active_order.get('grid', None)
if grid is not None:
# 移除当前委托单
if order.vt_orderid in grid.order_ids:
grid.order_ids.remove(order.vt_orderid)
# 网格的所有委托单已经执行完毕
if len(grid.order_ids) == 0:
grid.order_status = False
grid.traded_volume = 0
# 平仓完毕(cover, sell)
if active_order['offset'] != Offset.OPEN:
grid.open_status = False
grid.close_status = True
grid.open_time = None
self.write_log(f'{grid.direction.value}单已平仓完毕,order_price:{order.price}'
+ f',volume:{order.volume}')
self.write_log(f'移除网格:{grid.to_json()}')
self.gt.remove_grids_by_ids(direction=grid.direction, ids=[grid.id])
# 开仓完毕( buy, short)
else:
grid.open_status = True
grid.open_time = self.cur_datetime
self.write_log(f'{grid.direction.value}单已开仓完毕,order_price:{order.price}'
+ f',volume:{order.volume}')
# 网格的所有委托单部分执行完毕
else:
old_traded_volume = grid.traded_volume
grid.traded_volume += order.volume
self.write_log(f'{grid.direction.value}单部分{order.offset}仓,'
+ f'网格volume:{grid.volume}, traded_volume:{old_traded_volume}=>{grid.traded_volume}')
self.write_log(f'剩余委托单号:{grid.order_ids}')
self.gt.save()
else:
self.write_error(f'on_trade找不到对应grid')
# 在策略得活动订单中,移除
self.active_orders.pop(order.vt_orderid, None)
def on_order_open_canceled(self, order: OrderData):
"""
委托开仓单撤销
如果是FAK模式,重新修改价格,再提交
FAK用于实盘,需要增加涨跌停判断
:param order:
:return:
"""
self.write_log(u'报单更新 => 委托开仓 => 撤销:{}'.format(order.__dict__))
if not self.trading:
if not self.backtesting:
self.write_error(u'当前不允许交易')
return
if order.vt_orderid not in self.active_orders:
self.write_error(u'{}不在未完成的委托单中{}。'.format(order.vt_orderid, self.active_orders))
return
# 直接更新“未完成委托单”,更新volume,retry次数
old_order = self.active_orders[order.vt_orderid]
self.write_log(u'报单更新 => {} 未完成订单信息:{}'.format(order.vt_orderid, old_order))
old_order['traded'] = order.traded
order_vt_symbol = copy(old_order['vt_symbol'])
order_volume = old_order['volume'] - old_order['traded']
order_price = old_order['price']
order_type = old_order.get('order_type', OrderType.LIMIT)
order_retry = old_order.get('retry', 0)
grid = old_order.get('grid', None)
if grid:
if order.vt_orderid in grid.order_ids:
grid.order_ids.remove(order.vt_orderid)
if order_volume <= 0:
msg = u'{} {}{}需重新开仓数量为{},不再开仓' \
.format(self.strategy_name,
order.vt_orderid,
order_vt_symbol,
order_volume)
self.write_error(msg)
self.write_log(u'移除:{}'.format(order.vt_orderid))
self.active_orders.pop(order.vt_orderid, None)
return
if order_retry > 20:
# 这里超过20次尝试失败后,不再尝试,发出告警信息
msg = u'{} {}/{}手, 重试开仓次数{}>20' \
.format(self.strategy_name,
order_vt_symbol,
order_volume,
order_retry)
self.write_error(msg)
self.send_wechat(msg)
# 网格的所有委托单已经执行完毕
if len(grid.order_ids) == 0:
grid.order_status = False
self.gt.save()
self.write_log(u'网格信息更新:{}'.format(grid.__dict__))
self.write_log(u'移除:{}'.format(order.vt_orderid))
self.active_orders.pop(order.vt_orderid, None)
return
order_retry += 1
# FAK 重新开单
if old_order['direction'] == Direction.LONG and order_type == OrderType.FAK:
# 删除旧的委托记录
self.write_log(u'移除旧的委托记录:{}'.format(order.vt_orderid))
self.active_orders.pop(order.vt_orderid, None)
if order.traded > 0:
old_traded_volume = grid.traded_volume
grid.traded_volume += order.traded
self.write_log(f'{grid.direction.value}单部分{order.offset}仓,'
+ f'网格volume:{grid.volume}, traded_volume:{old_traded_volume}=>{grid.traded_volume}')
# 更新网格交易器
self.write_log(u'FAK模式,需要重新发送buy委托.grid:{}'.format(grid.__dict__))
# 更新委托平仓价
buy_price = max(self.cur_mi_tick.ask_price_1, self.cur_mi_tick.last_price, order_price) + self.price_tick
# 不能超过涨停价
if self.cur_mi_tick.limit_up > 0 and buy_price > self.cur_mi_tick.limit_up:
buy_price = self.cur_mi_tick.limit_up
if self.is_upper_limit(self.vt_symbol):
self.write_log(u'{}涨停,不做buy'.format(self.vt_symbol))
return
# 发送委托
vt_orderids = self.buy(price=buy_price,
volume=order_volume,
vt_symbol=self.vt_symbol,
order_type=OrderType.FAK,
order_time=self.cur_datetime,
grid=grid)
if not vt_orderids:
self.write_error(u'重新提交{} {}手开多单,价格:{},失败'.
format(self.vt_symbol, order_volume, buy_price))
return
# 更新retry的次数
for vt_orderid in vt_orderids:
info = self.active_orders.get(vt_orderid, None)
info.update({'retry': order_retry})
self.gt.save()
elif old_order['direction'] == Direction.SHORT and order_type == OrderType.FAK:
# 删除旧的委托记录
self.write_log(u'移除旧的委托记录:{}'.format(order.vt_orderid))
self.active_orders.pop(order.vt_orderid, None)
if order.traded > 0:
old_traded_volume = grid.traded_volume
grid.traded_volume += order.traded
self.write_log(f'{grid.direction.value}单部分{order.offset}仓,'
+ f'网格volume:{grid.volume}, traded_volume:{old_traded_volume}=>{grid.traded_volume}')
self.write_log(u'FAK模式,需要重新发送short委托.grid:{}'.format(grid.__dict__))
short_price = min(self.cur_mi_tick.bid_price_1, self.cur_mi_tick.last_price, order_price) - self.price_tick
# 不能超过跌停价
if self.cur_mi_tick.limit_down > 0 and short_price < self.cur_mi_tick.limit_down:
short_price = self.cur_mi_tick.limit_down
if self.is_lower_limit(self.vt_symbol):
self.write_log(u'{}跌停,不做short'.format(self.vt_symbol))
return
# 发送委托
vt_orderids = self.short(price=short_price,
volume=order_volume,
vt_symbol=self.vt_symbol,
order_type=OrderType.FAK,
order_time=self.cur_datetime,
grid=grid)
if not vt_orderids:
self.write_error(
u'重新提交{} {}手开空单,价格:{}, 失败'.format(self.vt_symbol, order_volume, short_price))
return
# 更新retry的次数
for vt_orderid in vt_orderids:
info = self.active_orders.get(vt_orderid, None)
info.update({'retry': order_retry})
self.gt.save()
else:
pre_status = old_order.get('status', Status.NOTTRADED)
old_order.update({'status': Status.CANCELLED})
self.write_log(u'委托单方式{},状态:{}=>{}'.format(order_type, pre_status, old_order.get('status')))
if grid:
if order.vt_orderid in grid.order_ids:
grid.order_ids.remove(order.vt_orderid)
if not grid.order_ids:
grid.order_status = False
self.gt.save()
self.active_orders.update({order.vt_orderid: old_order})
self.display_grids()
def on_order_close_canceled(self, order: OrderData):
"""委托平仓单撤销"""
self.write_log(u'报单更新 => 委托平仓 => 撤销:{}'.format(order.__dict__))
if order.vt_orderid not in self.active_orders:
self.write_error(u'{}不在未完成的委托单中:{}。'.format(order.vt_orderid, self.active_orders))
return
if not self.trading:
self.write_error(u'当前不允许交易')
return
# 直接更新“未完成委托单”,更新volume,Retry次数
old_order = self.active_orders[order.vt_orderid]
self.write_log(u'报单更新 => {} 未完成订单信息:{}'.format(order.vt_orderid, old_order))
old_order['traded'] = order.traded
# order_time = old_order['order_time']
order_vt_symbol = copy(old_order['vt_symbol'])
order_volume = old_order['volume'] - old_order['traded']
order_price = old_order['price']
order_type = old_order.get('order_type', OrderType.LIMIT)
order_retry = old_order.get('retry', 0)
grid = old_order.get('grid', None)
if grid:
if order.vt_orderid in grid.order_ids:
grid.order_ids.remove(order.vt_orderid)
if order_volume <= 0:
msg = u'{} {}{}重新平仓数量为{},不再平仓' \
.format(self.strategy_name, order.vt_orderid, order_vt_symbol, order_volume)
self.write_error(msg)
self.send_wechat(msg)
self.write_log(u'活动订单移除:{}'.format(order.vt_orderid))
self.active_orders.pop(order.vt_orderid, None)
return
if order_retry > 20:
msg = u'{} 平仓撤单 {}/{}手, 重试平仓次数{}>20' \
.format(self.strategy_name, order_vt_symbol, order_volume, order_retry)
self.write_error(msg)
self.send_wechat(msg)
if not grid.order_ids:
grid.order_status = False
self.gt.save()
self.write_log(u'更新网格=>{}'.format(grid.__dict__))
self.write_log(u'移除活动订单:{}'.format(order.vt_orderid))
self.active_orders.pop(order.vt_orderid, None)
return
order_retry += 1
if old_order['direction'] == Direction.LONG and order_type == OrderType.FAK:
self.write_log(u'移除活动订单:{}'.format(order.vt_orderid))
self.active_orders.pop(order.vt_orderid, None)
if order.traded > 0:
old_traded_volume = grid.traded_volume
grid.traded_volume += order.traded
self.write_log(f'{grid.direction.value}单部分{order.offset}仓,'
+ f'网格volume:{grid.volume}, traded_volume:{old_traded_volume}=>{grid.traded_volume}')
self.write_log(u'FAK模式,需要重新发送cover委托.grid:{}'.format(grid.__dict__))
# 更新委托平仓价
cover_tick = self.tick_dict.get(order_vt_symbol, self.cur_mi_tick)
cover_price = max(cover_tick.ask_price_1, cover_tick.last_price, order_price) + self.price_tick
# 不能超过涨停价
if cover_tick.limit_up > 0 and cover_price > cover_tick.limit_up:
cover_price = cover_tick.limit_up
if self.is_upper_limit(order_vt_symbol):
self.write_log(u'{}涨停,不做cover'.format(order_vt_symbol))
return
pos = self.cta_engine.get_position_holding(vt_symbol=order_vt_symbol)
if pos is None:
self.write_error(f'{self.strategy_name}无法获取{order_vt_symbol}的持仓信息,无法平仓')
return
if pos.short_pos < order_volume:
self.write_error(f'{self.strategy_name}{order_vt_symbol}的持仓空单{pos.short_pos}不满足平仓{order_volume}要求,无法平仓')
return
# 发送委托
vt_orderids = self.cover(price=cover_price,
volume=order_volume,
vt_symbol=order_vt_symbol,
order_type=OrderType.FAK,
order_time=self.cur_datetime,
grid=grid)
if not vt_orderids:
self.write_error(u'重新提交{} {}手平空单{}失败'.format(order_vt_symbol, order_volume, cover_price))
return
for vt_orderid in vt_orderids:
info = self.active_orders.get(vt_orderid)
info.update({'retry': order_retry})
self.gt.save()
elif old_order['direction'] == Direction.SHORT and order_type == OrderType.FAK:
self.write_log(u'移除活动订单:{}'.format(order.vt_orderid))
self.active_orders.pop(order.vt_orderid, None)
if order.traded > 0:
old_traded_volume = grid.traded_volume
grid.traded_volume += order.traded
self.write_log(f'{grid.direction.value}单部分{order.offset}仓,'
+ f'网格volume:{grid.volume}, traded_volume:{old_traded_volume}=>{grid.traded_volume}')
self.write_log(u'FAK模式,需要重新发送sell委托.grid:{}'.format(grid.__dict__))
sell_tick = self.tick_dict.get(order_vt_symbol, self.cur_mi_tick)
sell_price = min(sell_tick.bid_price_1, sell_tick.last_price, order_price) - self.price_tick
# 不能超过跌停价
if sell_tick.limit_down > 0 and sell_price < sell_tick.limit_down:
sell_price = sell_tick.limit_down
if self.is_lower_limit(order_vt_symbol):
self.write_log(u'{}涨停,不做sell'.format(order_vt_symbol))
return
pos = self.cta_engine.get_position_holding(vt_symbol=order_vt_symbol)
if pos is None:
self.write_error(f'{self.strategy_name}无法获取{order_vt_symbol}的持仓信息,无法平仓')
return
if pos.long_pos < order_volume:
self.write_error(f'{self.strategy_name}{order_vt_symbol}的持仓多单{pos.long_pos}不满足平仓{order_volume}要求,无法平仓')
return
# 发送委托
vt_orderids = self.sell(price=sell_price,
volume=order_volume,
vt_symbol=order_vt_symbol,
order_type=OrderType.FAK,
order_time=self.cur_datetime,
grid=grid)
if not vt_orderids:
self.write_error(u'重新提交{} {}手平多单{}失败'.format(order_vt_symbol, order_volume, sell_price))
return
for vt_orderid in vt_orderids:
info = self.active_orders.get(vt_orderid)
info.update({'retry': order_retry})
self.gt.save()
# 普通限价单委托方式
else:
pre_status = old_order.get('status', Status.NOTTRADED)
old_order.update({'status': Status.CANCELLED})
self.write_log(u'委托单状态:{}=>{}'.format(pre_status, old_order.get('status')))
if grid:
# 判断是否有部分交易
if order.traded > 0:
old_traded_volume = grid.traded_volume
grid.traded_volume += order.traded
self.write_log(f'{grid.direction.value}单部分{order.offset}仓,'
+ f'网格volume:{grid.volume}, traded_volume:{old_traded_volume}=>{grid.traded_volume}')
if order.vt_orderid in grid.order_ids:
grid.order_ids.remove(order.vt_orderid)
if len(grid.order_ids) == 0:
grid.order_status = False
self.gt.save()
self.active_orders.update({order.vt_orderid: old_order})
self.display_grids()
def on_stop_order(self, stop_order: StopOrder):
"""
停止单更新
需要自己重载,处理各类触发、撤单等情况
"""
self.write_log(f'停止单触发:{stop_order.__dict__}')
def cancel_all_orders(self):
"""
重载撤销所有正在进行得委托
:return:
"""
self.write_log(u'撤销所有正在进行得委托')
self.tns_cancel_logic(dt=datetime.now(), force=True, reopen=False)
def tns_cancel_logic(self, dt, force=False, reopen=False):
"撤单逻辑"""
if len(self.active_orders) < 1:
self.entrust = 0
return
canceled_ids = []
for vt_orderid in list(self.active_orders.keys()):
order_info = self.active_orders[vt_orderid]
order_vt_symbol = order_info.get('vt_symbol', self.vt_symbol)
order_time = order_info['order_time']
order_volume = order_info['volume'] - order_info['traded']
# order_price = order_info['price']
# order_direction = order_info['direction']
# order_offset = order_info['offset']
order_grid = order_info['grid']
order_status = order_info.get('status', Status.NOTTRADED)
order_type = order_info.get('order_type', OrderType.LIMIT)
over_seconds = (dt - order_time).total_seconds()
# 只处理未成交的限价委托单
if order_status in [Status.NOTTRADED, Status.SUBMITTING] and (
order_type == OrderType.LIMIT or '.SPD' in order_vt_symbol):
if over_seconds > self.cancel_seconds or force: # 超过设置的时间还未成交
self.write_log(u'撤单逻辑 => 超时{}秒未成交,取消委托单:vt_orderid:{},order:{}'
.format(over_seconds, vt_orderid, order_info))
order_info.update({'status': Status.CANCELLING})
self.active_orders.update({vt_orderid: order_info})
ret = self.cancel_order(str(vt_orderid))
if not ret:
self.write_error(f'{self.strategy_name}撤单逻辑 => {order_vt_symbol}撤单失败')
#self.write_log(u'撤单逻辑 => 撤单失败,更新状态为撤单成功')
# order_info.update({'status': Status.CANCELLED})
# self.active_orders.update({vt_orderid: order_info})
# if order_grid:
# if vt_orderid in order_grid.order_ids:
# order_grid.order_ids.remove(vt_orderid)
# if len(order_grid.order_ids) == 0:
# order_grid.order_status = False
continue
# 处理状态为‘撤销’的委托单
elif order_status == Status.CANCELLED:
self.write_log(u'撤单逻辑 => 委托单{}已成功撤单,将删除未完成订单{}'.format(vt_orderid, order_info))
canceled_ids.append(vt_orderid)
if reopen:
# 撤销的委托单,属于开仓类,需要重新委托
if order_info['offset'] == Offset.OPEN:
self.write_log(u'撤单逻辑 => 重新开仓')
# 开空委托单
if order_info['direction'] == Direction.SHORT:
short_price = self.cur_mi_price - self.price_tick
if order_grid.volume != order_volume and order_volume > 0:
self.write_log(
u'网格volume:{},order_volume:{}不一致,修正'.format(order_grid.volume, order_volume))
order_grid.volume = order_volume
self.write_log(u'重新提交{}开空委托,开空价{},v:{}'.format(order_vt_symbol, short_price, order_volume))
vt_orderids = self.short(price=short_price,
volume=order_volume,
vt_symbol=order_vt_symbol,
order_type=order_type,
order_time=self.cur_datetime,
grid=order_grid)
if len(vt_orderids) > 0:
self.write_log(u'委托成功,orderid:{}'.format(vt_orderids))
order_grid.snapshot.update({'open_price': short_price})
else:
self.write_error(u'撤单后,重新委托开空仓失败')
else:
buy_price = self.cur_mi_price + self.price_tick
if order_grid.volume != order_volume and order_volume > 0:
self.write_log(
u'网格volume:{},order_volume:{}不一致,修正'.format(order_grid.volume, order_volume))
order_grid.volume = order_volume
self.write_log(u'重新提交{}开多委托,开多价{},v:{}'.format(order_vt_symbol, buy_price, order_volume))
vt_orderids = self.buy(price=buy_price,
volume=order_volume,
vt_symbol=order_vt_symbol,
order_type=order_type,
order_time=self.cur_datetime,
grid=order_grid)
if len(vt_orderids) > 0:
self.write_log(u'委托成功,orderids:{}'.format(vt_orderids))
order_grid.snapshot.update({'open_price': buy_price})
else:
self.write_error(u'撤单后,重新委托开多仓失败')
else:
# 属于平多委托单
if order_info['direction'] == Direction.SHORT:
sell_price = self.cur_mi_price - self.price_tick
self.write_log(u'重新提交{}平多委托,{},v:{}'.format(order_vt_symbol, sell_price, order_volume))
vt_orderids = self.sell(price=sell_price,
volume=order_volume,
vt_symbol=order_vt_symbol,
order_type=order_type,
order_time=self.cur_datetime,
grid=order_grid)
if len(vt_orderids) > 0:
self.write_log(u'委托成功,orderids:{}'.format(vt_orderids))
else:
self.write_error(u'撤单后,重新委托平多仓失败')
# 属于平空委托单
else:
cover_price = self.cur_mi_price + self.price_tick
self.write_log(u'重新提交{}平空委托,委托价{},v:{}'.format(order_vt_symbol, cover_price, order_volume))
vt_orderids = self.cover(price=cover_price,
volume=order_volume,
vt_symbol=order_vt_symbol,
order_type=order_type,
order_time=self.cur_datetime,
grid=order_grid)
if len(vt_orderids) > 0:
self.write_log(u'委托成功,orderids:{}'.format(vt_orderids))
else:
self.write_error(u'撤单后,重新委托平空仓失败')
else:
self.write_log(u'撤单逻辑 => 无须重新开仓')
if order_info['offset'] == Offset.OPEN \
and order_grid \
and len(order_grid.order_ids) == 0:
if order_info['traded'] == 0 and order_grid.traded_volume == 0:
self.write_log(u'撤单逻辑 => 无任何成交 => 移除委托网格{}'.format(order_grid.__dict__))
order_info['grid'] = None
self.gt.remove_grids_by_ids(direction=order_grid.direction, ids=[order_grid.id])
elif order_info['traded'] > 0:
self.write_log('撤单逻辑 = > 部分开仓')
if order_grid.traded_volume < order_info['traded']:
self.write_log('撤单逻辑 = > 调整网格开仓数 {} => {}'.format(order_grid.traded_volume, order_info['traded'] ))
order_grid.traded_volume = order_info['traded']
self.write_log(f'撤单逻辑 => 调整网格委托状态=> False, 开仓状态:True, 开仓数量:{order_grid.volume}=>{order_grid.traded_volume}')
order_grid.order_status = False
order_grid.open_status = True
order_grid.volume = order_grid.traded_volume
order_grid.traded_volume = 0
# 删除撤单的订单
for vt_orderid in canceled_ids:
self.write_log(u'撤单逻辑 => 删除未完成订单:{}'.format(vt_orderid))
self.active_orders.pop(vt_orderid, None)
if len(self.active_orders) == 0:
self.entrust = 0
def tns_close_long_pos(self, grid):
"""
事务平多单仓位
1.来源自止损止盈平仓
逻辑: 如果当前账号昨仓满足平仓数量,直接平仓,如果不满足,则创建锁仓网格.
:param 平仓网格
:return:
"""
self.write_log(u'执行事务平多仓位:{}'.format(grid.to_json()))
# 平仓网格得合约
sell_symbol = grid.snapshot.get('mi_symbol', self.vt_symbol)
# 从cta engine获取当前账号中,sell_symbol的持仓情况
grid_pos = self.cta_engine.get_position_holding(vt_symbol=sell_symbol)
if grid_pos is None:
self.write_error(u'无法获取{}得持仓信息'.format(sell_symbol))
return False
# 不需要日内锁仓,或者昨仓可以满足,发出委托卖出单
if (grid_pos.long_yd >= grid.volume > 0 and grid_pos.long_td == 0 and grid_pos.short_td == 0) \
or not self.activate_today_lock:
if self.activate_today_lock:
self.write_log(u'昨仓多单:{},没有今仓,满足条件,直接平昨仓'.format(grid_pos.long_yd))
sell_price = self.cta_engine.get_price(sell_symbol)
if sell_price is None:
self.write_error(f'暂时不能获取{sell_symbol}价格,不能平仓')
return False
# 实盘使用对价
if not self.backtesting:
sell_tick = self.cta_engine.get_tick(sell_symbol)
if sell_tick and 0 < sell_tick.bid_price_1 < sell_price:
sell_price = sell_tick.bid_price_1
# 发出平多委托
if grid.traded_volume > 0:
grid.volume -= grid.traded_volume
grid.traded_volume = 0
# 非股指,才需要检查现有仓位
if self.exchange!= Exchange.CFFEX and grid_pos.long_pos <grid.volume:
self.write_error(f'账号{sell_symbol}多单持仓:{grid_pos.long_pos}不满足平仓:{grid.volume}要求:')
return False
vt_orderids = self.sell(price=sell_price,
volume=grid.volume,
vt_symbol=sell_symbol,
order_type=self.order_type,
order_time=self.cur_datetime,
lock=self.exchange==Exchange.CFFEX,
grid=grid)
if len(vt_orderids) == 0:
self.write_error(u'多单平仓委托失败')
return False
else:
self.write_log(u'多单平仓委托成功,编号:{}'.format(vt_orderids))
return True
# 当前没有昨仓,采用锁仓处理
else:
self.write_log(u'昨仓多单:{}不满足条件,创建对锁仓'.format(grid_pos.long_yd))
dist_record = dict()
dist_record['datetime'] = self.cur_datetime
dist_record['symbol'] = sell_symbol
dist_record['price'] = self.cur_mi_price
dist_record['volume'] = grid.volume
dist_record['operation'] = 'add short lock[long]'
self.save_dist(dist_record)
# 创建一个对锁网格
lock_grid = copy(grid)
# 网格类型, => 锁仓格
lock_grid.type = LOCK_GRID
lock_grid.id = str(uuid.uuid1())
lock_grid.direction = Direction.SHORT
lock_grid.open_status = False
lock_grid.order_status = False
lock_grid.order_ids = []
vt_orderids = self.short(self.cur_mi_price,
volume=lock_grid.volume,
vt_symbol=self.vt_symbol,
order_type=self.order_type,
order_time=self.cur_datetime,
grid=lock_grid)
if len(vt_orderids) > 0:
# 原做多网格得类型,设置为锁仓格
grid.type = LOCK_GRID
self.write_log(u'委托创建对锁单(空单)成功,委托编号:{},{},p:{},v:{}'
.format(vt_orderids,
sell_symbol,
self.cur_mi_price,
lock_grid.volume))
lock_grid.snapshot.update({'mi_symbol': self.vt_symbol, 'open_price': self.cur_mi_price})
self.gt.up_grids.append(lock_grid)
return True
else:
self.write_error(u'未能委托对锁单(空单)')
return False
# MASKED: tns_close_short_pos function (lines 2027-2125)
def tns_open_from_lock(self, open_symbol, open_volume, grid_type, open_direction):
"""
从锁仓单中,获取已开的网格(对手仓设置为止损)
1, 检查多空锁仓单中,是否有满足数量得昨仓,
2, 定位到需求网格,
:param open_symbol: 开仓合约(主力合约)
:param open_volume:
:param grid_type 更新网格的类型
:param open_direction: 开仓方向
:return: None, 保留的格
"""
# 检查多单得对锁格
locked_long_grids = self.gt.get_opened_grids_within_types(direction=Direction.LONG, types=[LOCK_GRID])
if len(locked_long_grids) == 0:
return None
locked_long_dict = {}
for g in locked_long_grids:
symbol = g.snapshot.get('mi_symbol', self.vt_symbol)
if g.order_status or g.order_ids:
self.write_log(u'当前对锁格:{}存在委托,不纳入计算'.format(g.to_json()))
continue
if symbol != open_symbol:
self.write_log(u'不处理symbol不一致: 委托请求:{}, Grid mi Symbol:{}'.format(open_symbol, symbol))
continue
volume = g.volume - g.traded_volume
locked_long_dict.update({symbol: locked_long_dict.get(symbol, 0) + volume})
locked_long_volume = locked_long_dict.get(open_symbol, 0)
if locked_long_volume < open_volume:
self.write_log(u'锁单中,没有足够得多单:{},需求:{}'.format(locked_long_volume, open_volume))
return None
# 空单对锁格
locked_short_grids = self.gt.get_opened_grids_within_types(direction=Direction.SHORT, types=[LOCK_GRID])
if len(locked_short_grids) == 0:
return None
locked_short_dict = {}
for g in locked_short_grids:
symbol = g.snapshot.get('mi_symbol', self.vt_symbol)
if g.order_status or g.order_ids:
self.write_log(u'当前对锁格:{}存在委托,不进行解锁'.format(g.to_json()))
continue
if symbol != open_symbol:
self.write_log(u'不处理symbol不一致: 委托请求:{}, Grid mi Symbol:{}'.format(open_symbol, symbol))
continue
volume = g.volume - g.traded_volume
locked_short_dict.update({symbol: locked_short_dict.get(symbol, 0) + volume})
locked_short_volume = locked_short_dict.get(open_symbol, 0)
if locked_short_volume < open_volume:
self.write_log(u'锁单中,没有足够得空单:{},需求:{}'.format(locked_short_volume, open_volume))
return None
# 检查空单昨仓是否满足
symbol_pos = self.cta_engine.get_position_holding(open_symbol)
if (open_direction == Direction.LONG and symbol_pos.short_yd < open_volume) \
or (open_direction == Direction.SHORT and symbol_pos.long_yd < open_volume):
self.write_log(u'昨仓数量,多单:{},空单:{},不满足:{}'
.format(symbol_pos.long_yd, symbol_pos.short_yd, open_volume))
return None
# 合并/抽离出 满足open_volume得多格,
target_long_grid = None
remove_long_grid_ids = []
for g in sorted(locked_long_grids, key=lambda grid: grid.volume):
if g.order_status or len(g.order_ids) > 0:
continue
if target_long_grid is None:
target_long_grid = g
if g.volume == open_volume:
self.write_log(u'第一个网格持仓数量一致:g.volume:{},open_volume:{}'
.format(g.volume, open_volume))
break
elif g.volume > open_volume:
self.write_log(u'第一个网格持仓数量大于需求:g.volume:{},open_volume:{}'
.format(g.volume, open_volume))
remain_grid = copy(g)
g.volume = open_volume
remain_grid.volume -= open_volume
remain_grid.id = str(uuid.uuid1())
self.gt.dn_grids.append(remain_grid)
self.write_log(u'添加剩余仓位到新多单网格:g.volume:{}'
.format(remain_grid.volume))
break
else:
if g.volume <= open_volume - target_long_grid.volume:
self.write_log(u'网格持仓数量:g.volume:{},open_volume:{},保留格:{}'
.format(g.volume,
open_volume,
target_long_grid.volume))
target_long_grid.volume += g.volume
g.volume = 0
self.write_log(u'计划移除:{}'.format(g.id))
remove_long_grid_ids.append(g.id)
else:
self.write_log(u'转移前网格持仓数量:g.volume:{},open_volume:{},保留格:{}'
.format(g.volume,
open_volume,
target_long_grid.volume))
g.volume -= (open_volume - target_long_grid.volume)
target_long_grid.volume = open_volume
self.write_log(u'转移后网格持仓数量:g.volume:{},open_volume:{},保留格:{}'
.format(g.volume,
open_volume,
target_long_grid.volume))
break
target_short_grid = None
remove_short_grid_ids = []
for g in sorted(locked_short_grids, key=lambda grid: grid.volume):
if g.order_status or g.order_ids:
continue
if target_short_grid is None:
target_short_grid = g
if g.volume == open_volume:
self.write_log(u'第一个空单网格持仓数量满足需求:g.volume:{},open_volume:{}'
.format(g.volume, open_volume))
break
elif g.volume > open_volume:
self.write_log(u'第一个空单网格持仓数量大于需求:g.volume:{},open_volume:{}'
.format(g.volume, open_volume))
remain_grid = copy(g)
g.volume = open_volume
remain_grid.volume -= open_volume
remain_grid.id = str(uuid.uuid1())
self.gt.up_grids.append(remain_grid)
self.write_log(u'添加剩余仓位到新空单网格:g.volume:{}'
.format(remain_grid.volume))
break
else:
if g.volume <= open_volume - target_short_grid.volume:
target_short_grid.volume += g.volume
g.volume = 0
remove_short_grid_ids.append(g.id)
else:
self.write_log(u'转移前空单网格持仓数量:g.volume:{},open_volume:{},保留格:{}'
.format(g.volume,
open_volume,
target_short_grid.volume))
g.volume -= (open_volume - target_short_grid.volume)
target_short_grid.volume = open_volume
self.write_log(u'转移后空单网格持仓数量:g.volume:{},open_volume:{},保留格:{}'
.format(g.volume,
open_volume,
target_short_grid.volume))
break
if target_long_grid.volume is None or target_short_grid is None:
self.write_log(u'未能定位多单网格和空单网格,不能解锁')
return None
# 移除volume为0的网格
self.gt.remove_grids_by_ids(direction=Direction.LONG, ids=remove_long_grid_ids)
self.gt.remove_grids_by_ids(direction=Direction.SHORT, ids=remove_short_grid_ids)
if open_direction == Direction.LONG:
self.write_log(u'保留多单,对空单:{}平仓'.format(target_short_grid.id))
# 对空单目标网格进行平仓
cover_price = self.cta_engine.get_price(open_symbol)
# 使用止损价作为平仓
self.write_log(u'空单止损价 :{} =>{}'.format(target_short_grid.stop_price, cover_price - 10 * self.price_tick))
target_short_grid.stop_price = cover_price - 10 * self.price_tick
# 更新对锁格类型=>指定类型
self.write_log(u'空单类型 :{} =>{}'.format(target_short_grid.type, grid_type))
target_short_grid.type = grid_type
# 返回保留的多单网格
return target_long_grid
else:
self.write_log(u'保留空单,对多单平仓')
sell_price = self.cta_engine.get_price(open_symbol)
# # 使用止损价作为平仓
self.write_log(u'多单止损价 :{} =>{}'.format(target_short_grid.stop_price, sell_price + 10 * self.price_tick))
target_long_grid.stop_price = sell_price + 10 * self.price_tick
# 更新对锁格类型=>指定类型
self.write_log(u'多单类型 :{} =>{}'.format(target_short_grid.type, grid_type))
target_long_grid.type = grid_type
# 返回保留的空单网格
return target_short_grid
def tns_close_locked_grids(self, grid_type):
"""
事务对所有对锁网格进行平仓
:return:
"""
# 正在委托时,不处理
if self.entrust != 0:
return
if not self.activate_today_lock:
return
# 多单得对锁格
locked_long_grids = self.gt.get_opened_grids_within_types(direction=Direction.LONG, types=[LOCK_GRID])
if len(locked_long_grids) == 0:
return
locked_long_dict = {}
for g in locked_long_grids:
vt_symbol = g.snapshot.get('mi_symbol', self.vt_symbol)
volume = g.volume - g.traded_volume
locked_long_dict.update({vt_symbol: locked_long_dict.get(vt_symbol, 0) + volume})
if g.order_status or g.order_ids:
self.write_log(u'当前对锁格:{}存在委托,不进行解锁'.format(g.to_json()))
return
locked_long_volume = sum(locked_long_dict.values(), 0)
# 空单对锁格
locked_short_grids = self.gt.get_opened_grids_within_types(direction=Direction.SHORT, types=[LOCK_GRID])
if len(locked_short_grids) == 0:
return
locked_short_dict = {}
for g in locked_short_grids:
vt_symbol = g.snapshot.get('mi_symbol', self.vt_symbol)
volume = g.volume - g.traded_volume
locked_short_dict.update({vt_symbol: locked_short_dict.get(vt_symbol, 0) + volume})
if g.order_status or g.order_ids:
self.write_log(u'当前对锁格:{}存在委托,不进行解锁'.format(g.to_json()))
return
locked_short_volume = sum(locked_short_dict.values(), 0)
# debug info
self.write_log(u'多单对锁格:{}'.format([g.to_json() for g in locked_long_grids]))
self.write_log(u'空单对锁格:{}'.format([g.to_json() for g in locked_short_grids]))
if locked_long_volume != locked_short_volume:
self.write_error(u'{}对锁格多空数量不一致,不能解锁.\n多:{},\n空:{}'
.format(self.strategy_name, locked_long_volume, locked_short_volume))
return
# 检查所有品种得昨仓是否满足数量
for vt_symbol, volume in locked_long_dict.items():
pos = self.cta_engine.get_position_holding(vt_symbol, None)
if pos is None:
self.write_error(u'{} 没有获取{}得持仓信息,不能解锁')
return
# 检查多空单得昨单能否满足
if pos.long_yd < volume or pos.short_yd < volume:
self.write_error(u'{}持仓昨仓多单:{},空单:{},不满足解锁数量:{}'
.format(vt_symbol, pos.long_yd, pos.short_td, volume))
return
if pos.long_td > 0 or pos.short_td > 0:
self.write_log(u'{}存在今多仓:{},空仓{},不满足解锁条件'.format(vt_symbol, pos.long_td, pos.short_td))
return
price = self.cta_engine.get_price(vt_symbol)
if price is None:
self.write_error(u'{}价格不在tick_dict缓存中,不能解锁'.format(vt_symbol))
# 所有合约价格和仓位都满足同时解开
for g in locked_long_grids:
dist_record = dict()
dist_record['datetime'] = self.cur_datetime
dist_record['symbol'] = self.vt_symbol
dist_record['price'] = self.cur_mi_price
dist_record['volume'] = g.volume
dist_record['operation'] = 'close lock[long]'
self.save_dist(dist_record)
# 通过切换回普通网格,提升止损价的方式实现平仓
self.write_log(
u'网格 从锁仓 {}=>{},提升止损价{}=>{}进行离场'.format(LOCK_GRID, grid_type, g.stop_price,
self.cur_99_price / 2))
g.type = grid_type
g.stop_price = self.cur_99_price / 2
for g in locked_short_grids:
dist_record = dict()
dist_record['datetime'] = self.cur_datetime
dist_record['symbol'] = self.vt_symbol
dist_record['price'] = self.cur_mi_price
dist_record['volume'] = g.volume
dist_record['operation'] = 'close lock[short]'
self.save_dist(dist_record)
# 通过切换回普通网格,提升止损价的方式实现平仓
self.write_log(u'网格 从锁仓 {}=>{},提升止损价{}=>{}进行离场'.format(LOCK_GRID, grid_type, g.stop_price,
self.cur_99_price * 2))
g.type = grid_type
g.stop_price = self.cur_99_price * 2
def grid_check_stop(self):
"""
网格逐一止损/止盈检查 (根据指数价格进行止损止盈)
:return:
"""
if self.entrust != 0:
return
if not self.trading:
if not self.backtesting:
self.write_error(u'当前不允许交易')
return
# 多单网格逐一止损/止盈检查:
long_grids = self.gt.get_opened_grids_without_types(direction=Direction.LONG, types=[LOCK_GRID])
for g in long_grids:
# 满足离场条件,或者碰到止损价格
if g.stop_price > 0 and g.stop_price > self.cur_99_price \
and g.open_status and not g.order_status:
dist_record = dict()
dist_record['datetime'] = self.cur_datetime
dist_record['symbol'] = self.idx_symbol
dist_record['volume'] = g.volume
dist_record['price'] = self.cur_99_price
dist_record['operation'] = 'stop leave'
dist_record['signals'] = '{}<{}'.format(self.cur_99_price, g.stop_price)
# 止损离场
self.write_log(u'{} 指数价:{} 触发多单止损线{},{}当前价:{}。指数开仓价:{},主力开仓价:{},v:{}'.
format(self.cur_datetime, self.cur_99_price, g.stop_price, self.vt_symbol,
self.cur_mi_price,
g.open_price, g.snapshot.get('open_price'), g.volume))
self.save_dist(dist_record)
if self.tns_close_long_pos(g):
self.write_log(u'多单止盈/止损委托成功')
else:
self.write_error(u'多单止损委托失败')
# 空单网格止损检查
short_grids = self.gt.get_opened_grids_without_types(direction=Direction.SHORT, types=[LOCK_GRID])
for g in short_grids:
if g.stop_price > 0 and g.stop_price < self.cur_99_price \
and g.open_status and not g.order_status:
dist_record = dict()
dist_record['datetime'] = self.cur_datetime
dist_record['symbol'] = self.idx_symbol
dist_record['volume'] = g.volume
dist_record['price'] = self.cur_99_price
dist_record['operation'] = 'stop leave'
dist_record['signals'] = '{}<{}'.format(self.cur_99_price, g.stop_price)
# 网格止损
self.write_log(u'{} 指数价:{} 触发空单止损线:{},{}最新价:{}。指数开仓价:{},主力开仓价:{},v:{}'.
format(self.cur_datetime, self.cur_99_price, g.stop_price, self.vt_symbol,
self.cur_mi_price,
g.open_price, g.snapshot.get('open_price'), g.volume))
self.save_dist(dist_record)
if self.tns_close_short_pos(g):
self.write_log(u'空单止盈/止损委托成功')
else:
self.write_error(u'委托空单平仓失败')
|
def tns_close_short_pos(self, grid):
"""
事务平空单仓位
1.来源自止损止盈平仓
2.来源自换仓
逻辑: 如果当前账号昨仓满足平仓数量,直接平仓,如果不满足,则创建锁仓网格.
:param 平仓网格
:return:
"""
self.write_log(u'执行事务平空仓位:{}'.format(grid.to_json()))
# 平仓网格得合约
cover_symbol = grid.snapshot.get('mi_symbol', self.vt_symbol)
# vt_symbol => holding position
grid_pos = self.cta_engine.get_position_holding(cover_symbol)
if grid_pos is None:
self.write_error(u'无法获取{}得持仓信息'.format(cover_symbol))
return False
# 昨仓可以满足,发出委托卖出单
if (grid_pos.short_yd >= grid.volume > 0 and grid_pos.long_td == 0 and grid_pos.short_td == 0) \
or not self.activate_today_lock:
if self.activate_today_lock:
self.write_log(u'昨仓空单:{},没有今仓, 满足条件,直接平昨仓'.format(grid_pos.short_yd))
cover_price = self.cta_engine.get_price(cover_symbol)
if cover_price is None:
self.write_error(f'暂时没有{cover_symbol}行情,不能执行平仓')
return False
# 实盘使用对价
if not self.backtesting:
cover_tick = self.cta_engine.get_tick(cover_symbol)
if cover_tick and 0 < cover_price < cover_tick.ask_price_1 :
cover_price = cover_tick.ask_price_1
# 发出cover委托
if grid.traded_volume > 0:
grid.volume -= grid.traded_volume
grid.traded_volume = 0
# 非股指,需要检查是否有持仓
if self.exchange!=Exchange.CFFEX and grid_pos.short_pos < grid.volume:
self.write_error(f'账号{cover_symbol}空单持仓:{grid_pos.short_pos}不满足平仓:{grid.volume}要求:')
return False
vt_orderids = self.cover(price=cover_price,
volume=grid.volume,
vt_symbol=cover_symbol,
order_type=self.order_type,
order_time=self.cur_datetime,
lock=self.exchange==Exchange.CFFEX,
grid=grid)
if len(vt_orderids) == 0:
self.write_error(u'空单平仓委托失败')
return False
else:
self.write_log(u'空单平仓委托成功,编号:{}'.format(vt_orderids))
return True
# 当前没有昨仓,采用锁仓处理
else:
self.write_log(u'昨仓空单:{}不满足条件,建立对锁仓'.format(grid_pos.short_yd))
dist_record = dict()
dist_record['datetime'] = self.cur_datetime
dist_record['symbol'] = cover_symbol
dist_record['price'] = self.cur_mi_price
dist_record['volume'] = grid.volume
dist_record['operation'] = 'add long lock[short]'
self.save_dist(dist_record)
# 创建一个对锁网格
lock_grid = copy(grid)
# 网格类型, => 锁仓格
lock_grid.type = LOCK_GRID
lock_grid.id = str(uuid.uuid1())
lock_grid.direction = Direction.LONG
lock_grid.open_status = False
lock_grid.order_status = False
lock_grid.order_ids = []
vt_orderids = self.buy(price=self.cur_mi_price,
volume=lock_grid.volume,
vt_symbol=cover_symbol,
order_type=self.order_type,
grid=lock_grid)
if len(vt_orderids) > 0:
# 原做空网格得类型,设置为锁仓格
grid.type = LOCK_GRID
self.write_log(u'委托创建对锁单(多单)成功,委托编号:{},{},p:{},v:{}'
.format(vt_orderids,
self.vt_symbol,
self.cur_mi_price,
lock_grid.volume))
lock_grid.snapshot.update({'mi_symbol': self.vt_symbol, 'open_price': self.cur_mi_price})
self.gt.dn_grids.append(lock_grid)
return True
else:
self.write_error(u'未能委托对锁单(多单)')
return False
| 2,027 | 2,125 |
""""""
import os
import uuid
import bz2
import pickle
import traceback
import zlib
from abc import ABC
from copy import copy,deepcopy
from typing import Any, Callable
from logging import INFO, ERROR
from datetime import datetime
from vnpy.trader.constant import Interval, Direction, Offset, Status, OrderType, Color, Exchange
from vnpy.trader.object import BarData, TickData, OrderData, TradeData
from vnpy.trader.utility import virtual, append_data, extract_vt_symbol, get_underlying_symbol
from .base import StopOrder, EngineType
from vnpy.component.cta_grid_trade import CtaGrid, CtaGridTrade, LOCK_GRID
from vnpy.component.cta_position import CtaPosition
from vnpy.component.cta_policy import CtaPolicy # noqa
class CtaTemplate(ABC):
"""CTA策略模板"""
author = ""
parameters = []
variables = []
# 保存委托单编号和相关委托单的字典
# key为委托单编号
# value为该合约相关的委托单
active_orders = {}
def __init__(
self,
cta_engine: Any,
strategy_name: str,
vt_symbol: str,
setting: dict,
):
""""""
self.cta_engine = cta_engine
self.strategy_name = strategy_name
self.vt_symbol = vt_symbol
self.inited = False # 是否初始化完毕
self.trading = False # 是否开始交易
self.pos = 0 # 持仓/仓差
self.entrust = 0 # 是否正在委托, 0, 无委托 , 1, 委托方向是LONG, -1, 委托方向是SHORT
self.tick_dict = {} # 记录所有on_tick传入最新tick
self.active_orders = {}
# Copy a new variables list here to avoid duplicate insert when multiple
# strategy instances are created with the same strategy class.
self.variables = copy(self.variables)
self.variables.insert(0, "inited")
self.variables.insert(1, "trading")
self.variables.insert(2, "pos")
self.variables.insert(3, "entrust")
def update_setting(self, setting: dict):
"""
Update strategy parameter wtih value in setting dict.
"""
for name in self.parameters:
if name in setting:
setattr(self, name, setting[name])
@classmethod
def get_class_parameters(cls):
"""
Get default parameters dict of strategy class.
"""
class_parameters = {}
for name in cls.parameters:
class_parameters[name] = getattr(cls, name)
return class_parameters
def get_parameters(self):
"""
Get strategy parameters dict.
"""
strategy_parameters = {}
for name in self.parameters:
strategy_parameters[name] = getattr(self, name)
return strategy_parameters
def get_variables(self):
"""
Get strategy variables dict.
"""
strategy_variables = {}
for name in self.variables:
strategy_variables[name] = getattr(self, name)
return strategy_variables
def get_data(self):
"""
Get strategy data.
"""
strategy_data = {
"strategy_name": self.strategy_name,
"vt_symbol": self.vt_symbol,
"class_name": self.__class__.__name__,
"author": self.author,
"parameters": self.get_parameters(),
"variables": self.get_variables(),
}
return strategy_data
def get_positions(self):
""" 返回持仓数量"""
pos_list = []
if self.pos > 0:
pos_list.append({
"vt_symbol": self.vt_symbol,
"direction": "long",
"volume": self.pos
})
elif self.pos < 0:
pos_list.append({
"vt_symbol": self.vt_symbol,
"direction": "short",
"volume": abs(self.pos)
})
return pos_list
@virtual
def on_timer(self):
pass
@virtual
def on_init(self):
"""
Callback when strategy is inited.
"""
pass
@virtual
def on_start(self):
"""
Callback when strategy is started.
"""
pass
@virtual
def on_stop(self):
"""
Callback when strategy is stopped.
"""
pass
@virtual
def on_tick(self, tick: TickData):
"""
Callback of new tick data update.
"""
pass
@virtual
def on_bar(self, bar: BarData):
"""
Callback of new bar data update.
"""
pass
@virtual
def on_trade(self, trade: TradeData):
"""
Callback of new trade data update.
"""
pass
@virtual
def on_order(self, order: OrderData):
"""
Callback of new order data update.
"""
pass
@virtual
def on_stop_order(self, stop_order: StopOrder):
"""
Callback of stop order update.
"""
pass
def buy(self, price: float, volume: float, stop: bool = False, lock: bool = False,
vt_symbol: str = '', order_type: OrderType = OrderType.LIMIT,
order_time: datetime = None, grid: CtaGrid = None):
"""
Send buy order to open a long position.
"""
if order_type in [OrderType.FAK, OrderType.FOK]:
if self.is_upper_limit(vt_symbol):
self.write_error(u'涨停价不做FAK/FOK委托')
return []
if volume == 0:
self.write_error(f'委托数量有误,必须大于0,{vt_symbol}, price:{price}')
return []
return self.send_order(vt_symbol=vt_symbol,
direction=Direction.LONG,
offset=Offset.OPEN,
price=price,
volume=volume,
stop=stop,
lock=lock,
order_type=order_type,
order_time=order_time,
grid=grid)
def sell(self, price: float, volume: float, stop: bool = False, lock: bool = False,
vt_symbol: str = '', order_type: OrderType = OrderType.LIMIT,
order_time: datetime = None, grid: CtaGrid = None):
"""
Send sell order to close a long position.
"""
if order_type in [OrderType.FAK, OrderType.FOK]:
if self.is_lower_limit(vt_symbol):
self.write_error(u'跌停价不做FAK/FOK sell委托')
return []
if volume == 0:
self.write_error(f'委托数量有误,必须大于0,{vt_symbol}, price:{price}')
return []
return self.send_order(vt_symbol=vt_symbol,
direction=Direction.SHORT,
offset=Offset.CLOSE,
price=price,
volume=volume,
stop=stop,
lock=lock,
order_type=order_type,
order_time=order_time,
grid=grid)
def short(self, price: float, volume: float, stop: bool = False, lock: bool = False,
vt_symbol: str = '', order_type: OrderType = OrderType.LIMIT,
order_time: datetime = None, grid: CtaGrid = None):
"""
Send short order to open as short position.
"""
if order_type in [OrderType.FAK, OrderType.FOK]:
if self.is_lower_limit(vt_symbol):
self.write_error(u'跌停价不做FAK/FOK short委托')
return []
if volume == 0:
self.write_error(f'委托数量有误,必须大于0,{vt_symbol}, price:{price}')
return []
return self.send_order(vt_symbol=vt_symbol,
direction=Direction.SHORT,
offset=Offset.OPEN,
price=price,
volume=volume,
stop=stop,
lock=lock,
order_type=order_type,
order_time=order_time,
grid=grid)
def cover(self, price: float, volume: float, stop: bool = False, lock: bool = False,
vt_symbol: str = '', order_type: OrderType = OrderType.LIMIT,
order_time: datetime = None, grid: CtaGrid = None):
"""
Send cover order to close a short position.
"""
if order_type in [OrderType.FAK, OrderType.FOK]:
if self.is_upper_limit(vt_symbol):
self.write_error(u'涨停价不做FAK/FOK cover委托')
return []
if volume == 0:
self.write_error(f'委托数量有误,必须大于0,{vt_symbol}, price:{price}')
return []
return self.send_order(vt_symbol=vt_symbol,
direction=Direction.LONG,
offset=Offset.CLOSE,
price=price,
volume=volume,
stop=stop,
lock=lock,
order_type=order_type,
order_time=order_time,
grid=grid)
def send_order(
self,
vt_symbol: str,
direction: Direction,
offset: Offset,
price: float,
volume: float,
stop: bool = False,
lock: bool = False,
order_type: OrderType = OrderType.LIMIT,
order_time: datetime = None,
grid: CtaGrid = None
):
"""
Send a new order.
"""
# 兼容cta_strategy的模板,缺省不指定vt_symbol时,使用策略配置的vt_symbol
if vt_symbol == '':
vt_symbol = self.vt_symbol
if not self.trading:
self.write_log(f'非交易状态')
return []
vt_orderids = self.cta_engine.send_order(
strategy=self,
vt_symbol=vt_symbol,
direction=direction,
offset=offset,
price=price,
volume=volume,
stop=stop,
lock=lock,
order_type=order_type
)
if len(vt_orderids) == 0:
self.write_error(f'{self.strategy_name}调用cta_engine.send_order委托返回失败,vt_symbol:{vt_symbol}')
# f',direction:{direction.value},offset:{offset.value},'
# f'price:{price},volume:{volume},stop:{stop},lock:{lock},'
# f'order_type:{order_type}')
if order_time is None:
order_time = datetime.now()
for vt_orderid in vt_orderids:
d = {
'direction': direction,
'offset': offset,
'vt_symbol': vt_symbol,
'price': price,
'volume': volume,
'order_type': order_type,
'traded': 0,
'order_time': order_time,
'status': Status.SUBMITTING
}
if grid:
d.update({'grid': grid})
grid.order_ids.append(vt_orderid)
self.active_orders.update({vt_orderid: d})
if direction == Direction.LONG:
self.entrust = 1
elif direction == Direction.SHORT:
self.entrust = -1
return vt_orderids
def cancel_order(self, vt_orderid: str):
"""
Cancel an existing order.
"""
if self.trading:
return self.cta_engine.cancel_order(self, vt_orderid)
return False
def cancel_all(self):
"""
Cancel all orders sent by strategy.
"""
if self.trading:
self.cta_engine.cancel_all(self)
def is_upper_limit(self, symbol):
"""是否涨停"""
tick = self.tick_dict.get(symbol, None)
if tick is None or tick.limit_up is None or tick.limit_up == 0:
return False
if tick.bid_price_1 == tick.limit_up:
return True
def is_lower_limit(self, symbol):
"""是否跌停"""
tick = self.tick_dict.get(symbol, None)
if tick is None or tick.limit_down is None or tick.limit_down == 0:
return False
if tick.ask_price_1 == tick.limit_down:
return True
def write_log(self, msg: str, level: int = INFO):
"""
Write a log message.
"""
self.cta_engine.write_log(msg=msg, strategy_name=self.strategy_name, level=level)
def write_error(self, msg: str):
"""write error log message"""
self.write_log(msg=msg, level=ERROR)
def get_engine_type(self):
"""
Return whether the cta_engine is backtesting or live trading.
"""
return self.cta_engine.get_engine_type()
def load_bar(
self,
days: int,
interval: Interval = Interval.MINUTE,
callback: Callable = None,
interval_num: int = 1
):
"""
Load historical bar data for initializing strategy.
"""
if not callback:
callback = self.on_bar
self.cta_engine.load_bar(self.vt_symbol, days, interval, callback, interval_num)
def load_tick(self, days: int):
"""
Load historical tick data for initializing strategy.
"""
self.cta_engine.load_tick(self.vt_symbol, days, self.on_tick)
def put_event(self):
"""
Put an strategy data event for ui update.
"""
if self.inited:
self.cta_engine.put_strategy_event(self)
def send_email(self, msg):
"""
Send email to default receiver.
"""
if self.inited:
self.cta_engine.send_email(msg, self)
def sync_data(self):
"""
Sync strategy variables value into disk storage.
"""
if self.trading:
self.cta_engine.sync_strategy_data(self)
class CtaSignal(ABC):
""""""
def __init__(self):
""""""
self.signal_pos = 0
@virtual
def on_tick(self, tick: TickData):
"""
Callback of new tick data update.
"""
pass
@virtual
def on_bar(self, bar: BarData):
"""
Callback of new bar data update.
"""
pass
def set_signal_pos(self, pos):
""""""
self.signal_pos = pos
def get_signal_pos(self):
""""""
return self.signal_pos
class TargetPosTemplate(CtaTemplate):
""""""
tick_add = 1
last_tick = None
last_bar = None
target_pos = 0
vt_orderids = []
def __init__(self, cta_engine, strategy_name, vt_symbol, setting):
""""""
super(TargetPosTemplate, self).__init__(
cta_engine, strategy_name, vt_symbol, setting
)
self.variables.append("target_pos")
@virtual
def on_tick(self, tick: TickData):
"""
Callback of new tick data update.
"""
self.last_tick = tick
if self.trading:
self.trade()
@virtual
def on_bar(self, bar: BarData):
"""
Callback of new bar data update.
"""
self.last_bar = bar
@virtual
def on_order(self, order: OrderData):
"""
Callback of new order data update.
"""
vt_orderid = order.vt_orderid
if not order.is_active() and vt_orderid in self.vt_orderids:
self.vt_orderids.remove(vt_orderid)
def set_target_pos(self, target_pos):
""""""
self.target_pos = target_pos
self.trade()
def trade(self):
""""""
self.cancel_all()
pos_change = self.target_pos - self.pos
if not pos_change:
return
long_price = 0
short_price = 0
if self.last_tick:
if pos_change > 0:
long_price = self.last_tick.ask_price_1 + self.tick_add
if self.last_tick.limit_up:
long_price = min(long_price, self.last_tick.limit_up)
else:
short_price = self.last_tick.bid_price_1 - self.tick_add
if self.last_tick.limit_down:
short_price = max(short_price, self.last_tick.limit_down)
else:
if pos_change > 0:
long_price = self.last_bar.close_price + self.tick_add
else:
short_price = self.last_bar.close_price - self.tick_add
if self.get_engine_type() == EngineType.BACKTESTING:
if pos_change > 0:
vt_orderids = self.buy(long_price, abs(pos_change))
else:
vt_orderids = self.short(short_price, abs(pos_change))
self.vt_orderids.extend(vt_orderids)
else:
if self.vt_orderids:
return
if pos_change > 0:
if self.pos < 0:
if pos_change < abs(self.pos):
vt_orderids = self.cover(long_price, pos_change)
else:
vt_orderids = self.cover(long_price, abs(self.pos))
else:
vt_orderids = self.buy(long_price, abs(pos_change))
else:
if self.pos > 0:
if abs(pos_change) < self.pos:
vt_orderids = self.sell(short_price, abs(pos_change))
else:
vt_orderids = self.sell(short_price, abs(self.pos))
else:
vt_orderids = self.short(short_price, abs(pos_change))
self.vt_orderids.extend(vt_orderids)
class CtaProTemplate(CtaTemplate):
"""
增强模板
"""
idx_symbol = None # 指数合约
exchange = Exchange.LOCAL
price_tick = 1 # 商品的最小价格跳动
symbol_size = 10 # 商品得合约乘数
margin_rate = 0.1 # 商品的保证金
# 委托类型
order_type = OrderType.LIMIT
cancel_seconds = 120 # 撤单时间(秒)
# 资金相关
max_invest_rate = 0.1 # 最大仓位(0~1)
max_invest_margin = 0 # 资金上限 0,不限制
max_invest_pos = 0 # 单向头寸数量上限 0,不限制
# 是否回测状态
backtesting = False
# 逻辑过程日志
dist_fieldnames = ['datetime', 'symbol', 'volume', 'price',
'operation', 'signal', 'stop_price', 'target_price',
'long_pos', 'short_pos']
def __init__(self, cta_engine, strategy_name, vt_symbol, setting):
""""""
self.position = None # 仓位组件
self.policy = None # 事务执行组件
self.gt = None # 网格交易组件
self.klines = {} # K线组件字典: kline_name: kline
self.cur_datetime = None # 当前Tick时间
self.cur_mi_tick = None # 最新的主力合约tick( vt_symbol)
self.cur_99_tick = None # 最新得指数合约tick( idx_symbol)
self.cur_mi_price = None # 当前价(主力合约 vt_symbol)
self.cur_99_price = None # 当前价(tick时,根据tick更新,onBar回测时,根据bar.close更新)
self.last_minute = None # 最后的分钟,用于on_tick内每分钟处理的逻辑
super(CtaProTemplate, self).__init__(
cta_engine, strategy_name, vt_symbol, setting
)
# 增加仓位管理模块
self.position = CtaPosition(strategy=self)
# 增加网格持久化模块
self.gt = CtaGridTrade(strategy=self)
# 增加指数合约
if 'idx_symbol' not in self.parameters:
self.parameters.append('idx_symbol')
if 'backtesting' not in self.parameters:
self.parameters.append('backtesting')
def update_setting(self, setting: dict):
"""
Update strategy parameter wtih value in setting dict.
"""
for name in self.parameters:
if name in setting:
setattr(self, name, setting[name])
symbol, self.exchange = extract_vt_symbol(self.vt_symbol)
if self.idx_symbol is None:
self.idx_symbol = get_underlying_symbol(symbol).upper() + '99.' + self.exchange.value
if self.vt_symbol != self.idx_symbol:
self.write_log(f'指数合约:{self.idx_symbol}, 主力合约:{self.vt_symbol}')
self.price_tick = self.cta_engine.get_price_tick(self.vt_symbol)
self.symbol_size = self.cta_engine.get_size(self.vt_symbol)
self.margin_rate = self.cta_engine.get_margin_rate(self.vt_symbol)
def sync_data(self):
"""同步更新数据"""
if not self.backtesting:
self.write_log(u'保存k线缓存数据')
self.save_klines_to_cache()
if self.inited and self.trading:
self.write_log(u'保存policy数据')
self.policy.save()
def save_klines_to_cache(self, kline_names: list = []):
"""
保存K线数据到缓存
:param kline_names: 一般为self.klines的keys
:return:
"""
if len(kline_names) == 0:
kline_names = list(self.klines.keys())
# 获取保存路径
save_path = self.cta_engine.get_data_path()
# 保存缓存的文件名
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_klines.pkb2'))
with bz2.BZ2File(file_name, 'wb') as f:
klines = {}
for kline_name in kline_names:
kline = self.klines.get(kline_name, None)
# if kline:
# kline.strategy = None
# kline.cb_on_bar = None
klines.update({kline_name: kline})
pickle.dump(klines, f)
def load_klines_from_cache(self, kline_names: list = []):
"""
从缓存加载K线数据
:param kline_names:
:return:
"""
if len(kline_names) == 0:
kline_names = list(self.klines.keys())
save_path = self.cta_engine.get_data_path()
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_klines.pkb2'))
try:
last_bar_dt = None
with bz2.BZ2File(file_name, 'rb') as f:
klines = pickle.load(f)
# 逐一恢复K线
for kline_name in kline_names:
# 缓存的k线实例
cache_kline = klines.get(kline_name, None)
# 当前策略实例的K线实例
strategy_kline = self.klines.get(kline_name, None)
if cache_kline and strategy_kline:
# 临时保存当前的回调函数
cb_on_bar = strategy_kline.cb_on_bar
# 缓存实例数据 =》 当前实例数据
strategy_kline.__dict__.update(cache_kline.__dict__)
kline_first_bar_dt = None
kline_last_bar_dt = None
if len(strategy_kline.line_bar) > 0:
kline_first_bar_dt = strategy_kline.line_bar[0].datetime
kline_last_bar_dt = strategy_kline.line_bar[-1].datetime
# 所有K线的最后时间
if last_bar_dt and strategy_kline.cur_datetime:
last_bar_dt = max(last_bar_dt, strategy_kline.cur_datetime)
else:
last_bar_dt = strategy_kline.cur_datetime
# 重新绑定k线策略与on_bar回调函数
strategy_kline.strategy = self
strategy_kline.cb_on_bar = cb_on_bar
self.write_log(f'恢复{kline_name}缓存数据:[{kline_first_bar_dt}] => [{kline_last_bar_dt}], bar结束时间:{last_bar_dt}')
self.write_log(u'加载缓存k线数据完毕')
return last_bar_dt
except Exception as ex:
self.write_error(f'加载缓存K线数据失败:{str(ex)}')
return None
def get_klines_snapshot(self):
"""返回当前klines的切片数据"""
try:
d = {
'strategy': self.strategy_name,
'datetime': datetime.now()}
klines = {}
for kline_name in sorted(self.klines.keys()):
klines.update({kline_name: self.klines.get(kline_name).get_data()})
kline_names = list(klines.keys())
binary_data = zlib.compress(pickle.dumps(klines))
d.update({'kline_names': kline_names, 'klines': binary_data, 'zlib': True})
return d
except Exception as ex:
self.write_error(f'获取klines切片数据失败:{str(ex)}')
return {}
def init_position(self):
"""
初始化Positin
使用网格的持久化,获取开仓状态的多空单,更新
:return:
"""
self.write_log(u'init_position(),初始化持仓')
pos_symbols = set()
remove_ids = []
if len(self.gt.up_grids) <= 0:
self.position.short_pos = 0
# 加载已开仓的空单数据,网格JSON
short_grids = self.gt.load(direction=Direction.SHORT, open_status_filter=[True])
if len(short_grids) == 0:
self.write_log(u'没有持久化的空单数据')
self.gt.up_grids = []
else:
self.gt.up_grids = short_grids
for sg in short_grids:
if len(sg.order_ids) > 0 or sg.order_status:
self.write_log(f'重置委托状态:{sg.order_status},清除委托单:{sg.order_ids}')
sg.order_status = False
sg.order_ids = []
short_symbol = sg.snapshot.get('mi_symbol', self.vt_symbol)
if sg.traded_volume > 0:
if sg.open_status and sg.volume== sg.traded_volume:
msg = f'{self.strategy_name} {short_symbol}空单持仓{sg.volume},已成交:{sg.traded_volume},不加载'
self.write_log(msg)
self.send_wechat(msg)
remove_ids.append(sg.id)
continue
pos_symbols.add(short_symbol)
self.write_log(u'加载持仓空单[ID:{},vt_symbol:{},价格:{}],[指数:{},价格:{}],数量:{}手'
.format(sg.id, short_symbol, sg.snapshot.get('open_price'),
self.idx_symbol, sg.open_price, sg.volume))
self.position.short_pos -= sg.volume
self.write_log(u'持久化空单,共持仓:{}手'.format(abs(self.position.short_pos)))
if len(remove_ids) > 0:
self.gt.remove_grids_by_ids(direction=Direction.SHORT,ids=remove_ids)
remove_ids = []
if len(self.gt.dn_grids) <= 0:
# 加载已开仓的多数据,网格JSON
self.position.long_pos = 0
long_grids = self.gt.load(direction=Direction.LONG, open_status_filter=[True])
if len(long_grids) == 0:
self.write_log(u'没有持久化的多单数据')
self.gt.dn_grids = []
else:
self.gt.dn_grids = long_grids
for lg in long_grids:
if len(lg.order_ids) > 0 or lg.order_status:
self.write_log(f'重置委托状态:{lg.order_status},清除委托单:{lg.order_ids}')
lg.order_status = False
lg.order_ids = []
# lg.type = self.line.name
long_symbol = lg.snapshot.get('mi_symbol', self.vt_symbol)
if lg.traded_volume > 0:
if lg.open_status and lg.volume == lg.traded_volume:
msg = f'{self.strategy_name} {long_symbol}多单持仓{lg.volume},已成交:{lg.traded_volume},不加载'
self.write_log(msg)
self.send_wechat(msg)
remove_ids.append(lg.id)
continue
pos_symbols.add(long_symbol)
self.write_log(u'加载持仓多单[ID:{},vt_symbol:{},价格:{}],[指数{},价格:{}],数量:{}手'
.format(lg.id, long_symbol, lg.snapshot.get('open_price'),
self.idx_symbol, lg.open_price, lg.volume))
self.position.long_pos += lg.volume
self.write_log(f'持久化多单,共持仓:{self.position.long_pos}手')
if len(remove_ids) > 0:
self.gt.remove_grids_by_ids(direction=Direction.LONG,ids=remove_ids)
self.position.pos = self.position.long_pos + self.position.short_pos
self.write_log(u'{}加载持久化数据完成,多单:{},空单:{},共:{}手'
.format(self.strategy_name,
self.position.long_pos,
abs(self.position.short_pos),
self.position.pos))
self.pos = self.position.pos
self.gt.save()
self.display_grids()
#if not self.backtesting:
if len(self.vt_symbol) > 0 and self.vt_symbol not in pos_symbols:
pos_symbols.add(self.vt_symbol)
if self.idx_symbol and self.idx_symbol not in pos_symbols:
pos_symbols.add(self.idx_symbol)
# 如果持仓的合约,不在self.vt_symbol中,需要订阅
for symbol in list(pos_symbols):
self.write_log(f'新增订阅合约:{symbol}')
self.cta_engine.subscribe_symbol(strategy_name=self.strategy_name, vt_symbol=symbol)
def get_positions(self):
"""
获取策略当前持仓(重构,使用主力合约)
:return: [{'vt_symbol':symbol,'direction':direction,'volume':volume]
"""
if not self.position:
return []
pos_list = []
if self.position.long_pos > 0:
for g in self.gt.get_opened_grids(direction=Direction.LONG):
vt_symbol = g.snapshot.get('mi_symbol', g.vt_symbol if g.vt_symbol and '99' not in g.vt_symbol else self.vt_symbol)
open_price = g.snapshot.get('open_price', g.open_price)
pos_list.append({'vt_symbol': vt_symbol,
'direction': 'long',
'volume': g.volume - g.traded_volume,
'price': open_price})
if abs(self.position.short_pos) > 0:
for g in self.gt.get_opened_grids(direction=Direction.SHORT):
vt_symbol = g.snapshot.get('mi_symbol', g.vt_symbol if g.vt_symbol and '99' not in g.vt_symbol else self.vt_symbol)
open_price = g.snapshot.get('open_price', g.open_price)
pos_list.append({'vt_symbol': vt_symbol,
'direction': 'short',
'volume': abs(g.volume - g.traded_volume),
'price': open_price})
if self.cur_datetime and (datetime.now() - self.cur_datetime).total_seconds() < 10:
self.write_log(u'当前持仓:{}'.format(pos_list))
return pos_list
def get_policy_json(self):
"""获取policy的json格式数据"""
if not self.policy:
return None
data = self.policy.to_json()
return data
def get_grid_trade_json(self):
"""获取gt组件的json格式数据"""
if not self.gt:
return None
data = self.gt.to_json()
return data
def tns_cancel_logic(self, dt, force=False):
"撤单逻辑"""
if len(self.active_orders) < 1:
self.entrust = 0
return
for vt_orderid in list(self.active_orders.keys()):
order_info = self.active_orders.get(vt_orderid)
order_grid = order_info.get('grid',None)
if order_info.get('status', None) in [Status.CANCELLED, Status.REJECTED]:
self.active_orders.pop(vt_orderid, None)
continue
order_time = order_info.get('order_time')
over_ms = (dt - order_time).total_seconds()
# 白天开盘或许有指数与真实tick的时间延迟,这个时刻不做撤单功能
if f'{dt.hour}:{dt.minute}' in ['10:30', '13:30']:
continue
if (over_ms > self.cancel_seconds) \
or force: # 超过设置的时间还未成交
self.write_log(f'{dt}, 超时{over_ms}秒未成交,取消委托单:{order_info}')
if self.cancel_order(vt_orderid):
order_info.update({'status': Status.CANCELLING})
else:
order_info.update({'status': Status.CANCELLED})
if order_grid:
if vt_orderid in order_grid.order_ids:
order_grid.order_ids.remove(vt_orderid)
if len(order_grid.order_ids) == 0:
order_grid.order_status = False
if len(self.active_orders) < 1:
self.entrust = 0
def tns_switch_long_pos(self, open_new=True):
"""
切换合约,从持仓的非主力合约,切换至主力合约
:param open_new: 是否开仓主力合约
:return:
"""
if self.entrust != 0:
return
if self.position.long_pos == 0:
return
if self.cur_mi_price == 0:
return
none_mi_grid = None
none_mi_symbol = None
self.write_log(f'持仓换月=>启动.')
# 找出非主力合约的持仓网格
for g in self.gt.get_opened_grids(direction=Direction.LONG):
none_mi_symbol = g.snapshot.get('mi_symbol', g.vt_symbol)
# 如果持仓的合约,跟策略配置的vt_symbol一致,则不处理
if none_mi_symbol is None or none_mi_symbol == self.vt_symbol:
self.write_log(f'none_mi_symbol:{none_mi_symbol}, vt_symbol:{self.vt_symbol} 一致,不处理')
continue
# 如果未开仓,或者处于委托状态,或者已交易完毕,不处理
if not g.open_status or g.order_status or g.volume - g.traded_volume <= 0:
self.write_log(f'开仓状态:{g.open_status}, 委托状态:{g.order_status},网格持仓:{g.volume} ,已交易数量:{g.traded_volume}, 不处理')
continue
none_mi_grid = g
if g.traded_volume > 0 and g.volume - g.traded_volume > 0:
g.volume -= g.traded_volume
g.traded_volume = 0
break
if none_mi_grid is None:
return
self.write_log(f'持仓换月=>找到多单持仓:{none_mi_symbol},持仓数量:{none_mi_grid.volume}')
# 找到行情中非主力合约/主力合约的最新价
none_mi_tick = self.tick_dict.get(none_mi_symbol)
mi_tick = self.tick_dict.get(self.vt_symbol, None)
if none_mi_tick is None or mi_tick is None:
return
# 如果涨停价,不做卖出
if self.is_upper_limit(none_mi_symbol) or self.is_upper_limit(self.vt_symbol):
self.write_log(f'{none_mi_symbol} 或 {self.vt_symbol} 为涨停价,不做换仓')
return
none_mi_price = max(none_mi_tick.last_price, none_mi_tick.bid_price_1)
grid = deepcopy(none_mi_grid)
grid.id = str(uuid.uuid1())
grid.open_status = False
self.write_log(f'持仓换月=>复制持仓信息{none_mi_symbol},ID:{none_mi_grid.id} => {self.vt_symbol},ID:{grid.id}')
# 委托卖出非主力合约
vt_orderids = self.sell(price=none_mi_price,
volume=none_mi_grid.volume,
vt_symbol=none_mi_symbol,
order_type=self.order_type,
grid=none_mi_grid)
if len(vt_orderids) > 0:
self.write_log(f'持仓换月=>委托卖出非主力合约{none_mi_symbol}持仓:{none_mi_grid.volume}')
# 已经发生过换月的,不执行买入新合约
if none_mi_grid.snapshot.get("switched", False):
self.write_log(f'持仓换月=>已经执行过换月,不再创建新的买入操作')
return
none_mi_grid.snapshot.update({'switched': True})
# 如果不买入新主力合约,直接返回
# 某些策略会自动重新开仓得
if not open_new:
self.write_log(f'不买入新的主力合约:{self.vt_symbol},数量:{grid.volume}')
self.gt.save()
return
# 添加买入主力合约
grid.snapshot.update({'mi_symbol': self.vt_symbol, 'open_price': self.cur_mi_price})
self.gt.dn_grids.append(grid)
vt_orderids = self.buy(price=self.cur_mi_price + 5 * self.price_tick,
volume=grid.volume,
vt_symbol=self.vt_symbol,
order_type=self.order_type,
grid=grid)
if len(vt_orderids) > 0:
self.write_log(u'持仓换月=>委托买入主力合约:{},价格:{},数量:{}'
.format(self.vt_symbol, self.cur_mi_price, grid.volume))
else:
self.write_error(f'持仓换月=>委托买入主力合约:{self.vt_symbol}失败')
self.gt.save()
else:
self.write_error(f'持仓换月=>委托卖出非主力合约:{none_mi_symbol}失败')
def tns_switch_short_pos(self,open_new=True):
"""
切换合约,从持仓的非主力合约,切换至主力合约
:param open_new: 是否开仓新得主力合约
:return:
"""
if self.entrust != 0:
return
if self.position.short_pos == 0:
return
if self.cur_mi_price == 0:
return
none_mi_grid = None
none_mi_symbol = None
# 找出非主力合约的持仓网格
for g in self.gt.get_opened_grids(direction=Direction.SHORT):
none_mi_symbol = g.snapshot.get('mi_symbol')
if none_mi_symbol is None or none_mi_symbol == self.vt_symbol:
continue
if not g.open_status or g.order_status or g.volume - g.traded_volume <= 0:
continue
none_mi_grid = g
if g.traded_volume > 0 and g.volume - g.traded_volume > 0:
g.volume -= g.traded_volume
g.traded_volume = 0
break
# 找不到与主力合约不一致的持仓网格
if none_mi_grid is None:
return
# 找到行情中非主力合约的最新价
none_mi_tick = self.tick_dict.get(none_mi_symbol)
mi_tick = self.tick_dict.get(self.vt_symbol, None)
if none_mi_tick is None or mi_tick is None:
return
# 如果跌停价,不做cover
if self.is_lower_limit(none_mi_symbol) or self.is_lower_limit(self.vt_symbol):
return
none_mi_price = max(none_mi_tick.last_price, none_mi_tick.bid_price_1)
grid = deepcopy(none_mi_grid)
grid.id = str(uuid.uuid1())
# 委托平空非主力合约
vt_orderids = self.cover(price=none_mi_price,
volume=none_mi_grid.volume,
vt_symbol=none_mi_symbol,
order_type=self.order_type,
grid=none_mi_grid)
if len(vt_orderids) > 0:
self.write_log(f'委托平空非主力合约{none_mi_symbol}持仓:{none_mi_grid.volume}')
# 已经发生过换月的,不执行开空新合约
if none_mi_grid.snapshot.get("switched", False):
self.write_log(f'已经执行过换月,不再创建新的空操作')
return
none_mi_grid.snapshot.update({'switched': True})
# 如果不开空新主力合约,直接返回
# 某些策略会自动重新开仓得
if not open_new:
self.write_log(f'不开空新的主力合约:{self.vt_symbol},数量:{grid.volume}')
self.gt.save()
return
# 添加卖出主力合约
grid.id = str(uuid.uuid1())
grid.snapshot.update({'mi_symbol': self.vt_symbol, 'open_price': self.cur_mi_price})
self.gt.up_grids.append(grid)
vt_orderids = self.short(price=self.cur_mi_price,
volume=grid.volume,
vt_symbol=self.vt_symbol,
order_type=self.order_type,
grid=grid)
if len(vt_orderids) > 0:
self.write_log(f'委托做空主力合约:{self.vt_symbol},价格:{self.cur_mi_price},数量:{grid.volume}')
else:
self.write_error(f'委托做空主力合约:{self.vt_symbol}失败')
self.gt.save()
else:
self.write_error(f'委托平空非主力合约:{none_mi_symbol}失败')
def display_grids(self):
"""更新网格显示信息"""
if not self.inited:
return
up_grids_info = self.gt.to_str(direction=Direction.SHORT)
if len(self.gt.up_grids) > 0:
self.write_log(up_grids_info)
dn_grids_info = self.gt.to_str(direction=Direction.LONG)
if len(self.gt.dn_grids) > 0:
self.write_log(dn_grids_info)
def display_tns(self):
"""显示事务的过程记录=》 log"""
if not self.inited:
return
self.write_log(u'{} 当前指数{}价格:{},当前主力{}价格:{}'
.format(self.cur_datetime,
self.idx_symbol, self.cur_99_price,
self.vt_symbol, self.cur_mi_price))
if hasattr(self, 'policy'):
policy = getattr(self, 'policy')
op = getattr(policy, 'to_json', None)
if callable(op):
self.write_log(u'当前Policy:{}'.format(policy.to_json()))
def save_dist(self, dist_data):
"""
保存策略逻辑过程记录=》 csv文件按
:param dist_data:
:return:
"""
if self.backtesting:
save_path = self.cta_engine.get_logs_path()
else:
save_path = self.cta_engine.get_data_path()
try:
if self.position and 'long_pos' not in dist_data:
dist_data.update({'long_pos': self.position.long_pos})
if self.position and 'short_pos' not in dist_data:
dist_data.update({'short_pos': self.position.short_pos})
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_dist.csv'))
append_data(file_name=file_name, dict_data=dist_data, field_names=self.dist_fieldnames)
except Exception as ex:
self.write_error(u'save_dist 异常:{} {}'.format(str(ex), traceback.format_exc()))
def save_tns(self, tns_data):
"""
保存多空事务记录=》csv文件,便于后续分析
:param tns_data: {"datetime":xxx, "direction":"long"或者"short", "price":xxx}
:return:
"""
if self.backtesting:
save_path = self.cta_engine.get_logs_path()
else:
save_path = self.cta_engine.get_data_path()
try:
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_tns.csv'))
append_data(file_name=file_name, dict_data=tns_data)
except Exception as ex:
self.write_error(u'save_tns 异常:{} {}'.format(str(ex), traceback.format_exc()))
def send_wechat(self, msg: str):
"""实盘时才发送微信"""
if self.backtesting:
return
self.cta_engine.send_wechat(msg=msg, strategy=self)
class CtaProFutureTemplate(CtaProTemplate):
"""期货交易增强版模板"""
activate_fak = False
activate_today_lock = False
def __init__(self, cta_engine, strategy_name, vt_symbol, setting):
""""""
super().__init__(cta_engine, strategy_name, vt_symbol, setting)
self.parameters.append('activate_fak')
self.parameters.append('activate_today_lock')
def update_setting(self, setting: dict):
"""更新配置参数"""
super().update_setting(setting)
# 实盘时,判断是否激活使用FAK模式
if not self.backtesting:
if self.activate_fak:
self.order_type = OrderType.FAK
def load_policy(self):
"""加载policy"""
if self.policy:
self.write_log(u'load_policy(),初始化Policy')
self.policy.load()
self.write_log(u'Policy:{}'.format(self.policy.to_json()))
def on_start(self):
"""启动策略(必须由用户继承实现)"""
self.write_log(u'启动')
self.trading = True
self.put_event()
def on_stop(self):
"""停止策略(必须由用户继承实现)"""
self.active_orders.clear()
self.pos = 0
self.entrust = 0
self.write_log(u'停止')
self.put_event()
def on_trade(self, trade: TradeData):
"""
交易更新
支持股指期货的对锁单或者解锁
:param trade:
:return:
"""
self.write_log(u'{},交易更新 =>{},\n 当前持仓:{} '
.format(self.cur_datetime,
trade.__dict__,
self.position.pos))
dist_record = dict()
if self.backtesting:
dist_record['datetime'] = trade.time
else:
dist_record['datetime'] = ' '.join([self.cur_datetime.strftime('%Y-%m-%d'), trade.time])
dist_record['volume'] = trade.volume
dist_record['price'] = trade.price
dist_record['symbol'] = trade.vt_symbol
# 处理股指锁单
if trade.exchange == Exchange.CFFEX and not self.backtesting:
if trade.direction == Direction.LONG:
if abs(self.position.short_pos) >= trade.volume:
self.position.short_pos += trade.volume
else:
self.position.long_pos += trade.volume
else:
if self.position.long_pos >= trade.volume:
self.position.long_pos -= trade.volume
else:
self.position.short_pos -= trade.volume
self.position.pos = self.position.long_pos + self.position.short_pos
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
else:
if trade.direction == Direction.LONG and trade.offset == Offset.OPEN:
dist_record['operation'] = 'buy'
self.position.open_pos(trade.direction, volume=trade.volume)
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
if trade.direction == Direction.SHORT and trade.offset == Offset.OPEN:
dist_record['operation'] = 'short'
self.position.open_pos(trade.direction, volume=trade.volume)
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
if trade.direction == Direction.LONG and trade.offset != Offset.OPEN:
dist_record['operation'] = 'cover'
self.position.close_pos(trade.direction, volume=trade.volume)
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
if trade.direction == Direction.SHORT and trade.offset != Offset.OPEN:
dist_record['operation'] = 'sell'
self.position.close_pos(trade.direction, volume=trade.volume)
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
self.save_dist(dist_record)
self.pos = self.position.pos
def fix_order(self, order: OrderData):
"""修正order被拆单得情况"""
order_info = self.active_orders.get(order.vt_orderid, None)
if order_info:
volume = order_info.get('volume')
if volume != order.volume:
self.write_log(f'修正order被拆单得情况,调整{order.vt_orderid} volume:{volume}=>{order.volume}')
order_info.update({'volume': order.volume})
def on_order(self, order: OrderData):
"""报单更新"""
# 未执行的订单中,存在是异常,删除
self.write_log(u'{}报单更新 => {}'.format(self.cur_datetime, order.__dict__))
# 修正order被拆单得情况"
self.fix_order(order)
if order.vt_orderid in self.active_orders:
active_order = self.active_orders[order.vt_orderid]
if order.volume == order.traded and order.status in [Status.ALLTRADED]:
self.on_order_all_traded(order)
#elif order.offset == Offset.OPEN and order.status in [Status.CANCELLED]:
# 这里 换成active_order的,因为原始order有可能被换成锁仓方式
elif active_order['offset'] == Offset.OPEN and order.status in [Status.CANCELLED]:
# 开仓委托单被撤销
self.on_order_open_canceled(order)
#elif order.offset != Offset.OPEN and order.status in [Status.CANCELLED]:
# # 这里 换成active_order的,因为原始order有可能被换成锁仓方式
elif active_order['offset'] != Offset.OPEN and order.status in [Status.CANCELLED]:
# 平仓委托单被撤销
self.on_order_close_canceled(order)
elif order.status == Status.REJECTED:
if active_order['offset'] == Offset.OPEN:
self.write_error(u'{}委托单开{}被拒,price:{},total:{},traded:{},status:{}'
.format(order.vt_symbol, order.direction, order.price, order.volume,
order.traded, order.status))
self.on_order_open_canceled(order)
else:
self.write_error(u'OnOrder({})委托单平{}被拒,price:{},total:{},traded:{},status:{}'
.format(order.vt_symbol, order.direction, order.price, order.volume,
order.traded, order.status))
self.on_order_close_canceled(order)
else:
self.write_log(u'委托单未完成,total:{},traded:{},tradeStatus:{}'
.format(order.volume, order.traded, order.status))
else:
self.write_error(u'委托单{}不在策略的未完成订单列表中:{}'.format(order.vt_orderid, self.active_orders))
def on_order_all_traded(self, order: OrderData):
"""
订单全部成交
:param order:
:return:
"""
self.write_log(u'报单更新 => 委托单全部完成:{}'.format(order.__dict__))
active_order = self.active_orders[order.vt_orderid]
# 通过vt_orderid,找到对应的网格
grid = active_order.get('grid', None)
if grid is not None:
# 移除当前委托单
if order.vt_orderid in grid.order_ids:
grid.order_ids.remove(order.vt_orderid)
# 网格的所有委托单已经执行完毕
if len(grid.order_ids) == 0:
grid.order_status = False
grid.traded_volume = 0
# 平仓完毕(cover, sell)
if active_order['offset'] != Offset.OPEN:
grid.open_status = False
grid.close_status = True
grid.open_time = None
self.write_log(f'{grid.direction.value}单已平仓完毕,order_price:{order.price}'
+ f',volume:{order.volume}')
self.write_log(f'移除网格:{grid.to_json()}')
self.gt.remove_grids_by_ids(direction=grid.direction, ids=[grid.id])
# 开仓完毕( buy, short)
else:
grid.open_status = True
grid.open_time = self.cur_datetime
self.write_log(f'{grid.direction.value}单已开仓完毕,order_price:{order.price}'
+ f',volume:{order.volume}')
# 网格的所有委托单部分执行完毕
else:
old_traded_volume = grid.traded_volume
grid.traded_volume += order.volume
self.write_log(f'{grid.direction.value}单部分{order.offset}仓,'
+ f'网格volume:{grid.volume}, traded_volume:{old_traded_volume}=>{grid.traded_volume}')
self.write_log(f'剩余委托单号:{grid.order_ids}')
self.gt.save()
else:
self.write_error(f'on_trade找不到对应grid')
# 在策略得活动订单中,移除
self.active_orders.pop(order.vt_orderid, None)
def on_order_open_canceled(self, order: OrderData):
"""
委托开仓单撤销
如果是FAK模式,重新修改价格,再提交
FAK用于实盘,需要增加涨跌停判断
:param order:
:return:
"""
self.write_log(u'报单更新 => 委托开仓 => 撤销:{}'.format(order.__dict__))
if not self.trading:
if not self.backtesting:
self.write_error(u'当前不允许交易')
return
if order.vt_orderid not in self.active_orders:
self.write_error(u'{}不在未完成的委托单中{}。'.format(order.vt_orderid, self.active_orders))
return
# 直接更新“未完成委托单”,更新volume,retry次数
old_order = self.active_orders[order.vt_orderid]
self.write_log(u'报单更新 => {} 未完成订单信息:{}'.format(order.vt_orderid, old_order))
old_order['traded'] = order.traded
order_vt_symbol = copy(old_order['vt_symbol'])
order_volume = old_order['volume'] - old_order['traded']
order_price = old_order['price']
order_type = old_order.get('order_type', OrderType.LIMIT)
order_retry = old_order.get('retry', 0)
grid = old_order.get('grid', None)
if grid:
if order.vt_orderid in grid.order_ids:
grid.order_ids.remove(order.vt_orderid)
if order_volume <= 0:
msg = u'{} {}{}需重新开仓数量为{},不再开仓' \
.format(self.strategy_name,
order.vt_orderid,
order_vt_symbol,
order_volume)
self.write_error(msg)
self.write_log(u'移除:{}'.format(order.vt_orderid))
self.active_orders.pop(order.vt_orderid, None)
return
if order_retry > 20:
# 这里超过20次尝试失败后,不再尝试,发出告警信息
msg = u'{} {}/{}手, 重试开仓次数{}>20' \
.format(self.strategy_name,
order_vt_symbol,
order_volume,
order_retry)
self.write_error(msg)
self.send_wechat(msg)
# 网格的所有委托单已经执行完毕
if len(grid.order_ids) == 0:
grid.order_status = False
self.gt.save()
self.write_log(u'网格信息更新:{}'.format(grid.__dict__))
self.write_log(u'移除:{}'.format(order.vt_orderid))
self.active_orders.pop(order.vt_orderid, None)
return
order_retry += 1
# FAK 重新开单
if old_order['direction'] == Direction.LONG and order_type == OrderType.FAK:
# 删除旧的委托记录
self.write_log(u'移除旧的委托记录:{}'.format(order.vt_orderid))
self.active_orders.pop(order.vt_orderid, None)
if order.traded > 0:
old_traded_volume = grid.traded_volume
grid.traded_volume += order.traded
self.write_log(f'{grid.direction.value}单部分{order.offset}仓,'
+ f'网格volume:{grid.volume}, traded_volume:{old_traded_volume}=>{grid.traded_volume}')
# 更新网格交易器
self.write_log(u'FAK模式,需要重新发送buy委托.grid:{}'.format(grid.__dict__))
# 更新委托平仓价
buy_price = max(self.cur_mi_tick.ask_price_1, self.cur_mi_tick.last_price, order_price) + self.price_tick
# 不能超过涨停价
if self.cur_mi_tick.limit_up > 0 and buy_price > self.cur_mi_tick.limit_up:
buy_price = self.cur_mi_tick.limit_up
if self.is_upper_limit(self.vt_symbol):
self.write_log(u'{}涨停,不做buy'.format(self.vt_symbol))
return
# 发送委托
vt_orderids = self.buy(price=buy_price,
volume=order_volume,
vt_symbol=self.vt_symbol,
order_type=OrderType.FAK,
order_time=self.cur_datetime,
grid=grid)
if not vt_orderids:
self.write_error(u'重新提交{} {}手开多单,价格:{},失败'.
format(self.vt_symbol, order_volume, buy_price))
return
# 更新retry的次数
for vt_orderid in vt_orderids:
info = self.active_orders.get(vt_orderid, None)
info.update({'retry': order_retry})
self.gt.save()
elif old_order['direction'] == Direction.SHORT and order_type == OrderType.FAK:
# 删除旧的委托记录
self.write_log(u'移除旧的委托记录:{}'.format(order.vt_orderid))
self.active_orders.pop(order.vt_orderid, None)
if order.traded > 0:
old_traded_volume = grid.traded_volume
grid.traded_volume += order.traded
self.write_log(f'{grid.direction.value}单部分{order.offset}仓,'
+ f'网格volume:{grid.volume}, traded_volume:{old_traded_volume}=>{grid.traded_volume}')
self.write_log(u'FAK模式,需要重新发送short委托.grid:{}'.format(grid.__dict__))
short_price = min(self.cur_mi_tick.bid_price_1, self.cur_mi_tick.last_price, order_price) - self.price_tick
# 不能超过跌停价
if self.cur_mi_tick.limit_down > 0 and short_price < self.cur_mi_tick.limit_down:
short_price = self.cur_mi_tick.limit_down
if self.is_lower_limit(self.vt_symbol):
self.write_log(u'{}跌停,不做short'.format(self.vt_symbol))
return
# 发送委托
vt_orderids = self.short(price=short_price,
volume=order_volume,
vt_symbol=self.vt_symbol,
order_type=OrderType.FAK,
order_time=self.cur_datetime,
grid=grid)
if not vt_orderids:
self.write_error(
u'重新提交{} {}手开空单,价格:{}, 失败'.format(self.vt_symbol, order_volume, short_price))
return
# 更新retry的次数
for vt_orderid in vt_orderids:
info = self.active_orders.get(vt_orderid, None)
info.update({'retry': order_retry})
self.gt.save()
else:
pre_status = old_order.get('status', Status.NOTTRADED)
old_order.update({'status': Status.CANCELLED})
self.write_log(u'委托单方式{},状态:{}=>{}'.format(order_type, pre_status, old_order.get('status')))
if grid:
if order.vt_orderid in grid.order_ids:
grid.order_ids.remove(order.vt_orderid)
if not grid.order_ids:
grid.order_status = False
self.gt.save()
self.active_orders.update({order.vt_orderid: old_order})
self.display_grids()
def on_order_close_canceled(self, order: OrderData):
"""委托平仓单撤销"""
self.write_log(u'报单更新 => 委托平仓 => 撤销:{}'.format(order.__dict__))
if order.vt_orderid not in self.active_orders:
self.write_error(u'{}不在未完成的委托单中:{}。'.format(order.vt_orderid, self.active_orders))
return
if not self.trading:
self.write_error(u'当前不允许交易')
return
# 直接更新“未完成委托单”,更新volume,Retry次数
old_order = self.active_orders[order.vt_orderid]
self.write_log(u'报单更新 => {} 未完成订单信息:{}'.format(order.vt_orderid, old_order))
old_order['traded'] = order.traded
# order_time = old_order['order_time']
order_vt_symbol = copy(old_order['vt_symbol'])
order_volume = old_order['volume'] - old_order['traded']
order_price = old_order['price']
order_type = old_order.get('order_type', OrderType.LIMIT)
order_retry = old_order.get('retry', 0)
grid = old_order.get('grid', None)
if grid:
if order.vt_orderid in grid.order_ids:
grid.order_ids.remove(order.vt_orderid)
if order_volume <= 0:
msg = u'{} {}{}重新平仓数量为{},不再平仓' \
.format(self.strategy_name, order.vt_orderid, order_vt_symbol, order_volume)
self.write_error(msg)
self.send_wechat(msg)
self.write_log(u'活动订单移除:{}'.format(order.vt_orderid))
self.active_orders.pop(order.vt_orderid, None)
return
if order_retry > 20:
msg = u'{} 平仓撤单 {}/{}手, 重试平仓次数{}>20' \
.format(self.strategy_name, order_vt_symbol, order_volume, order_retry)
self.write_error(msg)
self.send_wechat(msg)
if not grid.order_ids:
grid.order_status = False
self.gt.save()
self.write_log(u'更新网格=>{}'.format(grid.__dict__))
self.write_log(u'移除活动订单:{}'.format(order.vt_orderid))
self.active_orders.pop(order.vt_orderid, None)
return
order_retry += 1
if old_order['direction'] == Direction.LONG and order_type == OrderType.FAK:
self.write_log(u'移除活动订单:{}'.format(order.vt_orderid))
self.active_orders.pop(order.vt_orderid, None)
if order.traded > 0:
old_traded_volume = grid.traded_volume
grid.traded_volume += order.traded
self.write_log(f'{grid.direction.value}单部分{order.offset}仓,'
+ f'网格volume:{grid.volume}, traded_volume:{old_traded_volume}=>{grid.traded_volume}')
self.write_log(u'FAK模式,需要重新发送cover委托.grid:{}'.format(grid.__dict__))
# 更新委托平仓价
cover_tick = self.tick_dict.get(order_vt_symbol, self.cur_mi_tick)
cover_price = max(cover_tick.ask_price_1, cover_tick.last_price, order_price) + self.price_tick
# 不能超过涨停价
if cover_tick.limit_up > 0 and cover_price > cover_tick.limit_up:
cover_price = cover_tick.limit_up
if self.is_upper_limit(order_vt_symbol):
self.write_log(u'{}涨停,不做cover'.format(order_vt_symbol))
return
pos = self.cta_engine.get_position_holding(vt_symbol=order_vt_symbol)
if pos is None:
self.write_error(f'{self.strategy_name}无法获取{order_vt_symbol}的持仓信息,无法平仓')
return
if pos.short_pos < order_volume:
self.write_error(f'{self.strategy_name}{order_vt_symbol}的持仓空单{pos.short_pos}不满足平仓{order_volume}要求,无法平仓')
return
# 发送委托
vt_orderids = self.cover(price=cover_price,
volume=order_volume,
vt_symbol=order_vt_symbol,
order_type=OrderType.FAK,
order_time=self.cur_datetime,
grid=grid)
if not vt_orderids:
self.write_error(u'重新提交{} {}手平空单{}失败'.format(order_vt_symbol, order_volume, cover_price))
return
for vt_orderid in vt_orderids:
info = self.active_orders.get(vt_orderid)
info.update({'retry': order_retry})
self.gt.save()
elif old_order['direction'] == Direction.SHORT and order_type == OrderType.FAK:
self.write_log(u'移除活动订单:{}'.format(order.vt_orderid))
self.active_orders.pop(order.vt_orderid, None)
if order.traded > 0:
old_traded_volume = grid.traded_volume
grid.traded_volume += order.traded
self.write_log(f'{grid.direction.value}单部分{order.offset}仓,'
+ f'网格volume:{grid.volume}, traded_volume:{old_traded_volume}=>{grid.traded_volume}')
self.write_log(u'FAK模式,需要重新发送sell委托.grid:{}'.format(grid.__dict__))
sell_tick = self.tick_dict.get(order_vt_symbol, self.cur_mi_tick)
sell_price = min(sell_tick.bid_price_1, sell_tick.last_price, order_price) - self.price_tick
# 不能超过跌停价
if sell_tick.limit_down > 0 and sell_price < sell_tick.limit_down:
sell_price = sell_tick.limit_down
if self.is_lower_limit(order_vt_symbol):
self.write_log(u'{}涨停,不做sell'.format(order_vt_symbol))
return
pos = self.cta_engine.get_position_holding(vt_symbol=order_vt_symbol)
if pos is None:
self.write_error(f'{self.strategy_name}无法获取{order_vt_symbol}的持仓信息,无法平仓')
return
if pos.long_pos < order_volume:
self.write_error(f'{self.strategy_name}{order_vt_symbol}的持仓多单{pos.long_pos}不满足平仓{order_volume}要求,无法平仓')
return
# 发送委托
vt_orderids = self.sell(price=sell_price,
volume=order_volume,
vt_symbol=order_vt_symbol,
order_type=OrderType.FAK,
order_time=self.cur_datetime,
grid=grid)
if not vt_orderids:
self.write_error(u'重新提交{} {}手平多单{}失败'.format(order_vt_symbol, order_volume, sell_price))
return
for vt_orderid in vt_orderids:
info = self.active_orders.get(vt_orderid)
info.update({'retry': order_retry})
self.gt.save()
# 普通限价单委托方式
else:
pre_status = old_order.get('status', Status.NOTTRADED)
old_order.update({'status': Status.CANCELLED})
self.write_log(u'委托单状态:{}=>{}'.format(pre_status, old_order.get('status')))
if grid:
# 判断是否有部分交易
if order.traded > 0:
old_traded_volume = grid.traded_volume
grid.traded_volume += order.traded
self.write_log(f'{grid.direction.value}单部分{order.offset}仓,'
+ f'网格volume:{grid.volume}, traded_volume:{old_traded_volume}=>{grid.traded_volume}')
if order.vt_orderid in grid.order_ids:
grid.order_ids.remove(order.vt_orderid)
if len(grid.order_ids) == 0:
grid.order_status = False
self.gt.save()
self.active_orders.update({order.vt_orderid: old_order})
self.display_grids()
def on_stop_order(self, stop_order: StopOrder):
"""
停止单更新
需要自己重载,处理各类触发、撤单等情况
"""
self.write_log(f'停止单触发:{stop_order.__dict__}')
def cancel_all_orders(self):
"""
重载撤销所有正在进行得委托
:return:
"""
self.write_log(u'撤销所有正在进行得委托')
self.tns_cancel_logic(dt=datetime.now(), force=True, reopen=False)
def tns_cancel_logic(self, dt, force=False, reopen=False):
"撤单逻辑"""
if len(self.active_orders) < 1:
self.entrust = 0
return
canceled_ids = []
for vt_orderid in list(self.active_orders.keys()):
order_info = self.active_orders[vt_orderid]
order_vt_symbol = order_info.get('vt_symbol', self.vt_symbol)
order_time = order_info['order_time']
order_volume = order_info['volume'] - order_info['traded']
# order_price = order_info['price']
# order_direction = order_info['direction']
# order_offset = order_info['offset']
order_grid = order_info['grid']
order_status = order_info.get('status', Status.NOTTRADED)
order_type = order_info.get('order_type', OrderType.LIMIT)
over_seconds = (dt - order_time).total_seconds()
# 只处理未成交的限价委托单
if order_status in [Status.NOTTRADED, Status.SUBMITTING] and (
order_type == OrderType.LIMIT or '.SPD' in order_vt_symbol):
if over_seconds > self.cancel_seconds or force: # 超过设置的时间还未成交
self.write_log(u'撤单逻辑 => 超时{}秒未成交,取消委托单:vt_orderid:{},order:{}'
.format(over_seconds, vt_orderid, order_info))
order_info.update({'status': Status.CANCELLING})
self.active_orders.update({vt_orderid: order_info})
ret = self.cancel_order(str(vt_orderid))
if not ret:
self.write_error(f'{self.strategy_name}撤单逻辑 => {order_vt_symbol}撤单失败')
#self.write_log(u'撤单逻辑 => 撤单失败,更新状态为撤单成功')
# order_info.update({'status': Status.CANCELLED})
# self.active_orders.update({vt_orderid: order_info})
# if order_grid:
# if vt_orderid in order_grid.order_ids:
# order_grid.order_ids.remove(vt_orderid)
# if len(order_grid.order_ids) == 0:
# order_grid.order_status = False
continue
# 处理状态为‘撤销’的委托单
elif order_status == Status.CANCELLED:
self.write_log(u'撤单逻辑 => 委托单{}已成功撤单,将删除未完成订单{}'.format(vt_orderid, order_info))
canceled_ids.append(vt_orderid)
if reopen:
# 撤销的委托单,属于开仓类,需要重新委托
if order_info['offset'] == Offset.OPEN:
self.write_log(u'撤单逻辑 => 重新开仓')
# 开空委托单
if order_info['direction'] == Direction.SHORT:
short_price = self.cur_mi_price - self.price_tick
if order_grid.volume != order_volume and order_volume > 0:
self.write_log(
u'网格volume:{},order_volume:{}不一致,修正'.format(order_grid.volume, order_volume))
order_grid.volume = order_volume
self.write_log(u'重新提交{}开空委托,开空价{},v:{}'.format(order_vt_symbol, short_price, order_volume))
vt_orderids = self.short(price=short_price,
volume=order_volume,
vt_symbol=order_vt_symbol,
order_type=order_type,
order_time=self.cur_datetime,
grid=order_grid)
if len(vt_orderids) > 0:
self.write_log(u'委托成功,orderid:{}'.format(vt_orderids))
order_grid.snapshot.update({'open_price': short_price})
else:
self.write_error(u'撤单后,重新委托开空仓失败')
else:
buy_price = self.cur_mi_price + self.price_tick
if order_grid.volume != order_volume and order_volume > 0:
self.write_log(
u'网格volume:{},order_volume:{}不一致,修正'.format(order_grid.volume, order_volume))
order_grid.volume = order_volume
self.write_log(u'重新提交{}开多委托,开多价{},v:{}'.format(order_vt_symbol, buy_price, order_volume))
vt_orderids = self.buy(price=buy_price,
volume=order_volume,
vt_symbol=order_vt_symbol,
order_type=order_type,
order_time=self.cur_datetime,
grid=order_grid)
if len(vt_orderids) > 0:
self.write_log(u'委托成功,orderids:{}'.format(vt_orderids))
order_grid.snapshot.update({'open_price': buy_price})
else:
self.write_error(u'撤单后,重新委托开多仓失败')
else:
# 属于平多委托单
if order_info['direction'] == Direction.SHORT:
sell_price = self.cur_mi_price - self.price_tick
self.write_log(u'重新提交{}平多委托,{},v:{}'.format(order_vt_symbol, sell_price, order_volume))
vt_orderids = self.sell(price=sell_price,
volume=order_volume,
vt_symbol=order_vt_symbol,
order_type=order_type,
order_time=self.cur_datetime,
grid=order_grid)
if len(vt_orderids) > 0:
self.write_log(u'委托成功,orderids:{}'.format(vt_orderids))
else:
self.write_error(u'撤单后,重新委托平多仓失败')
# 属于平空委托单
else:
cover_price = self.cur_mi_price + self.price_tick
self.write_log(u'重新提交{}平空委托,委托价{},v:{}'.format(order_vt_symbol, cover_price, order_volume))
vt_orderids = self.cover(price=cover_price,
volume=order_volume,
vt_symbol=order_vt_symbol,
order_type=order_type,
order_time=self.cur_datetime,
grid=order_grid)
if len(vt_orderids) > 0:
self.write_log(u'委托成功,orderids:{}'.format(vt_orderids))
else:
self.write_error(u'撤单后,重新委托平空仓失败')
else:
self.write_log(u'撤单逻辑 => 无须重新开仓')
if order_info['offset'] == Offset.OPEN \
and order_grid \
and len(order_grid.order_ids) == 0:
if order_info['traded'] == 0 and order_grid.traded_volume == 0:
self.write_log(u'撤单逻辑 => 无任何成交 => 移除委托网格{}'.format(order_grid.__dict__))
order_info['grid'] = None
self.gt.remove_grids_by_ids(direction=order_grid.direction, ids=[order_grid.id])
elif order_info['traded'] > 0:
self.write_log('撤单逻辑 = > 部分开仓')
if order_grid.traded_volume < order_info['traded']:
self.write_log('撤单逻辑 = > 调整网格开仓数 {} => {}'.format(order_grid.traded_volume, order_info['traded'] ))
order_grid.traded_volume = order_info['traded']
self.write_log(f'撤单逻辑 => 调整网格委托状态=> False, 开仓状态:True, 开仓数量:{order_grid.volume}=>{order_grid.traded_volume}')
order_grid.order_status = False
order_grid.open_status = True
order_grid.volume = order_grid.traded_volume
order_grid.traded_volume = 0
# 删除撤单的订单
for vt_orderid in canceled_ids:
self.write_log(u'撤单逻辑 => 删除未完成订单:{}'.format(vt_orderid))
self.active_orders.pop(vt_orderid, None)
if len(self.active_orders) == 0:
self.entrust = 0
def tns_close_long_pos(self, grid):
"""
事务平多单仓位
1.来源自止损止盈平仓
逻辑: 如果当前账号昨仓满足平仓数量,直接平仓,如果不满足,则创建锁仓网格.
:param 平仓网格
:return:
"""
self.write_log(u'执行事务平多仓位:{}'.format(grid.to_json()))
# 平仓网格得合约
sell_symbol = grid.snapshot.get('mi_symbol', self.vt_symbol)
# 从cta engine获取当前账号中,sell_symbol的持仓情况
grid_pos = self.cta_engine.get_position_holding(vt_symbol=sell_symbol)
if grid_pos is None:
self.write_error(u'无法获取{}得持仓信息'.format(sell_symbol))
return False
# 不需要日内锁仓,或者昨仓可以满足,发出委托卖出单
if (grid_pos.long_yd >= grid.volume > 0 and grid_pos.long_td == 0 and grid_pos.short_td == 0) \
or not self.activate_today_lock:
if self.activate_today_lock:
self.write_log(u'昨仓多单:{},没有今仓,满足条件,直接平昨仓'.format(grid_pos.long_yd))
sell_price = self.cta_engine.get_price(sell_symbol)
if sell_price is None:
self.write_error(f'暂时不能获取{sell_symbol}价格,不能平仓')
return False
# 实盘使用对价
if not self.backtesting:
sell_tick = self.cta_engine.get_tick(sell_symbol)
if sell_tick and 0 < sell_tick.bid_price_1 < sell_price:
sell_price = sell_tick.bid_price_1
# 发出平多委托
if grid.traded_volume > 0:
grid.volume -= grid.traded_volume
grid.traded_volume = 0
# 非股指,才需要检查现有仓位
if self.exchange!= Exchange.CFFEX and grid_pos.long_pos <grid.volume:
self.write_error(f'账号{sell_symbol}多单持仓:{grid_pos.long_pos}不满足平仓:{grid.volume}要求:')
return False
vt_orderids = self.sell(price=sell_price,
volume=grid.volume,
vt_symbol=sell_symbol,
order_type=self.order_type,
order_time=self.cur_datetime,
lock=self.exchange==Exchange.CFFEX,
grid=grid)
if len(vt_orderids) == 0:
self.write_error(u'多单平仓委托失败')
return False
else:
self.write_log(u'多单平仓委托成功,编号:{}'.format(vt_orderids))
return True
# 当前没有昨仓,采用锁仓处理
else:
self.write_log(u'昨仓多单:{}不满足条件,创建对锁仓'.format(grid_pos.long_yd))
dist_record = dict()
dist_record['datetime'] = self.cur_datetime
dist_record['symbol'] = sell_symbol
dist_record['price'] = self.cur_mi_price
dist_record['volume'] = grid.volume
dist_record['operation'] = 'add short lock[long]'
self.save_dist(dist_record)
# 创建一个对锁网格
lock_grid = copy(grid)
# 网格类型, => 锁仓格
lock_grid.type = LOCK_GRID
lock_grid.id = str(uuid.uuid1())
lock_grid.direction = Direction.SHORT
lock_grid.open_status = False
lock_grid.order_status = False
lock_grid.order_ids = []
vt_orderids = self.short(self.cur_mi_price,
volume=lock_grid.volume,
vt_symbol=self.vt_symbol,
order_type=self.order_type,
order_time=self.cur_datetime,
grid=lock_grid)
if len(vt_orderids) > 0:
# 原做多网格得类型,设置为锁仓格
grid.type = LOCK_GRID
self.write_log(u'委托创建对锁单(空单)成功,委托编号:{},{},p:{},v:{}'
.format(vt_orderids,
sell_symbol,
self.cur_mi_price,
lock_grid.volume))
lock_grid.snapshot.update({'mi_symbol': self.vt_symbol, 'open_price': self.cur_mi_price})
self.gt.up_grids.append(lock_grid)
return True
else:
self.write_error(u'未能委托对锁单(空单)')
return False
def tns_close_short_pos(self, grid):
"""
事务平空单仓位
1.来源自止损止盈平仓
2.来源自换仓
逻辑: 如果当前账号昨仓满足平仓数量,直接平仓,如果不满足,则创建锁仓网格.
:param 平仓网格
:return:
"""
self.write_log(u'执行事务平空仓位:{}'.format(grid.to_json()))
# 平仓网格得合约
cover_symbol = grid.snapshot.get('mi_symbol', self.vt_symbol)
# vt_symbol => holding position
grid_pos = self.cta_engine.get_position_holding(cover_symbol)
if grid_pos is None:
self.write_error(u'无法获取{}得持仓信息'.format(cover_symbol))
return False
# 昨仓可以满足,发出委托卖出单
if (grid_pos.short_yd >= grid.volume > 0 and grid_pos.long_td == 0 and grid_pos.short_td == 0) \
or not self.activate_today_lock:
if self.activate_today_lock:
self.write_log(u'昨仓空单:{},没有今仓, 满足条件,直接平昨仓'.format(grid_pos.short_yd))
cover_price = self.cta_engine.get_price(cover_symbol)
if cover_price is None:
self.write_error(f'暂时没有{cover_symbol}行情,不能执行平仓')
return False
# 实盘使用对价
if not self.backtesting:
cover_tick = self.cta_engine.get_tick(cover_symbol)
if cover_tick and 0 < cover_price < cover_tick.ask_price_1 :
cover_price = cover_tick.ask_price_1
# 发出cover委托
if grid.traded_volume > 0:
grid.volume -= grid.traded_volume
grid.traded_volume = 0
# 非股指,需要检查是否有持仓
if self.exchange!=Exchange.CFFEX and grid_pos.short_pos < grid.volume:
self.write_error(f'账号{cover_symbol}空单持仓:{grid_pos.short_pos}不满足平仓:{grid.volume}要求:')
return False
vt_orderids = self.cover(price=cover_price,
volume=grid.volume,
vt_symbol=cover_symbol,
order_type=self.order_type,
order_time=self.cur_datetime,
lock=self.exchange==Exchange.CFFEX,
grid=grid)
if len(vt_orderids) == 0:
self.write_error(u'空单平仓委托失败')
return False
else:
self.write_log(u'空单平仓委托成功,编号:{}'.format(vt_orderids))
return True
# 当前没有昨仓,采用锁仓处理
else:
self.write_log(u'昨仓空单:{}不满足条件,建立对锁仓'.format(grid_pos.short_yd))
dist_record = dict()
dist_record['datetime'] = self.cur_datetime
dist_record['symbol'] = cover_symbol
dist_record['price'] = self.cur_mi_price
dist_record['volume'] = grid.volume
dist_record['operation'] = 'add long lock[short]'
self.save_dist(dist_record)
# 创建一个对锁网格
lock_grid = copy(grid)
# 网格类型, => 锁仓格
lock_grid.type = LOCK_GRID
lock_grid.id = str(uuid.uuid1())
lock_grid.direction = Direction.LONG
lock_grid.open_status = False
lock_grid.order_status = False
lock_grid.order_ids = []
vt_orderids = self.buy(price=self.cur_mi_price,
volume=lock_grid.volume,
vt_symbol=cover_symbol,
order_type=self.order_type,
grid=lock_grid)
if len(vt_orderids) > 0:
# 原做空网格得类型,设置为锁仓格
grid.type = LOCK_GRID
self.write_log(u'委托创建对锁单(多单)成功,委托编号:{},{},p:{},v:{}'
.format(vt_orderids,
self.vt_symbol,
self.cur_mi_price,
lock_grid.volume))
lock_grid.snapshot.update({'mi_symbol': self.vt_symbol, 'open_price': self.cur_mi_price})
self.gt.dn_grids.append(lock_grid)
return True
else:
self.write_error(u'未能委托对锁单(多单)')
return False
def tns_open_from_lock(self, open_symbol, open_volume, grid_type, open_direction):
"""
从锁仓单中,获取已开的网格(对手仓设置为止损)
1, 检查多空锁仓单中,是否有满足数量得昨仓,
2, 定位到需求网格,
:param open_symbol: 开仓合约(主力合约)
:param open_volume:
:param grid_type 更新网格的类型
:param open_direction: 开仓方向
:return: None, 保留的格
"""
# 检查多单得对锁格
locked_long_grids = self.gt.get_opened_grids_within_types(direction=Direction.LONG, types=[LOCK_GRID])
if len(locked_long_grids) == 0:
return None
locked_long_dict = {}
for g in locked_long_grids:
symbol = g.snapshot.get('mi_symbol', self.vt_symbol)
if g.order_status or g.order_ids:
self.write_log(u'当前对锁格:{}存在委托,不纳入计算'.format(g.to_json()))
continue
if symbol != open_symbol:
self.write_log(u'不处理symbol不一致: 委托请求:{}, Grid mi Symbol:{}'.format(open_symbol, symbol))
continue
volume = g.volume - g.traded_volume
locked_long_dict.update({symbol: locked_long_dict.get(symbol, 0) + volume})
locked_long_volume = locked_long_dict.get(open_symbol, 0)
if locked_long_volume < open_volume:
self.write_log(u'锁单中,没有足够得多单:{},需求:{}'.format(locked_long_volume, open_volume))
return None
# 空单对锁格
locked_short_grids = self.gt.get_opened_grids_within_types(direction=Direction.SHORT, types=[LOCK_GRID])
if len(locked_short_grids) == 0:
return None
locked_short_dict = {}
for g in locked_short_grids:
symbol = g.snapshot.get('mi_symbol', self.vt_symbol)
if g.order_status or g.order_ids:
self.write_log(u'当前对锁格:{}存在委托,不进行解锁'.format(g.to_json()))
continue
if symbol != open_symbol:
self.write_log(u'不处理symbol不一致: 委托请求:{}, Grid mi Symbol:{}'.format(open_symbol, symbol))
continue
volume = g.volume - g.traded_volume
locked_short_dict.update({symbol: locked_short_dict.get(symbol, 0) + volume})
locked_short_volume = locked_short_dict.get(open_symbol, 0)
if locked_short_volume < open_volume:
self.write_log(u'锁单中,没有足够得空单:{},需求:{}'.format(locked_short_volume, open_volume))
return None
# 检查空单昨仓是否满足
symbol_pos = self.cta_engine.get_position_holding(open_symbol)
if (open_direction == Direction.LONG and symbol_pos.short_yd < open_volume) \
or (open_direction == Direction.SHORT and symbol_pos.long_yd < open_volume):
self.write_log(u'昨仓数量,多单:{},空单:{},不满足:{}'
.format(symbol_pos.long_yd, symbol_pos.short_yd, open_volume))
return None
# 合并/抽离出 满足open_volume得多格,
target_long_grid = None
remove_long_grid_ids = []
for g in sorted(locked_long_grids, key=lambda grid: grid.volume):
if g.order_status or len(g.order_ids) > 0:
continue
if target_long_grid is None:
target_long_grid = g
if g.volume == open_volume:
self.write_log(u'第一个网格持仓数量一致:g.volume:{},open_volume:{}'
.format(g.volume, open_volume))
break
elif g.volume > open_volume:
self.write_log(u'第一个网格持仓数量大于需求:g.volume:{},open_volume:{}'
.format(g.volume, open_volume))
remain_grid = copy(g)
g.volume = open_volume
remain_grid.volume -= open_volume
remain_grid.id = str(uuid.uuid1())
self.gt.dn_grids.append(remain_grid)
self.write_log(u'添加剩余仓位到新多单网格:g.volume:{}'
.format(remain_grid.volume))
break
else:
if g.volume <= open_volume - target_long_grid.volume:
self.write_log(u'网格持仓数量:g.volume:{},open_volume:{},保留格:{}'
.format(g.volume,
open_volume,
target_long_grid.volume))
target_long_grid.volume += g.volume
g.volume = 0
self.write_log(u'计划移除:{}'.format(g.id))
remove_long_grid_ids.append(g.id)
else:
self.write_log(u'转移前网格持仓数量:g.volume:{},open_volume:{},保留格:{}'
.format(g.volume,
open_volume,
target_long_grid.volume))
g.volume -= (open_volume - target_long_grid.volume)
target_long_grid.volume = open_volume
self.write_log(u'转移后网格持仓数量:g.volume:{},open_volume:{},保留格:{}'
.format(g.volume,
open_volume,
target_long_grid.volume))
break
target_short_grid = None
remove_short_grid_ids = []
for g in sorted(locked_short_grids, key=lambda grid: grid.volume):
if g.order_status or g.order_ids:
continue
if target_short_grid is None:
target_short_grid = g
if g.volume == open_volume:
self.write_log(u'第一个空单网格持仓数量满足需求:g.volume:{},open_volume:{}'
.format(g.volume, open_volume))
break
elif g.volume > open_volume:
self.write_log(u'第一个空单网格持仓数量大于需求:g.volume:{},open_volume:{}'
.format(g.volume, open_volume))
remain_grid = copy(g)
g.volume = open_volume
remain_grid.volume -= open_volume
remain_grid.id = str(uuid.uuid1())
self.gt.up_grids.append(remain_grid)
self.write_log(u'添加剩余仓位到新空单网格:g.volume:{}'
.format(remain_grid.volume))
break
else:
if g.volume <= open_volume - target_short_grid.volume:
target_short_grid.volume += g.volume
g.volume = 0
remove_short_grid_ids.append(g.id)
else:
self.write_log(u'转移前空单网格持仓数量:g.volume:{},open_volume:{},保留格:{}'
.format(g.volume,
open_volume,
target_short_grid.volume))
g.volume -= (open_volume - target_short_grid.volume)
target_short_grid.volume = open_volume
self.write_log(u'转移后空单网格持仓数量:g.volume:{},open_volume:{},保留格:{}'
.format(g.volume,
open_volume,
target_short_grid.volume))
break
if target_long_grid.volume is None or target_short_grid is None:
self.write_log(u'未能定位多单网格和空单网格,不能解锁')
return None
# 移除volume为0的网格
self.gt.remove_grids_by_ids(direction=Direction.LONG, ids=remove_long_grid_ids)
self.gt.remove_grids_by_ids(direction=Direction.SHORT, ids=remove_short_grid_ids)
if open_direction == Direction.LONG:
self.write_log(u'保留多单,对空单:{}平仓'.format(target_short_grid.id))
# 对空单目标网格进行平仓
cover_price = self.cta_engine.get_price(open_symbol)
# 使用止损价作为平仓
self.write_log(u'空单止损价 :{} =>{}'.format(target_short_grid.stop_price, cover_price - 10 * self.price_tick))
target_short_grid.stop_price = cover_price - 10 * self.price_tick
# 更新对锁格类型=>指定类型
self.write_log(u'空单类型 :{} =>{}'.format(target_short_grid.type, grid_type))
target_short_grid.type = grid_type
# 返回保留的多单网格
return target_long_grid
else:
self.write_log(u'保留空单,对多单平仓')
sell_price = self.cta_engine.get_price(open_symbol)
# # 使用止损价作为平仓
self.write_log(u'多单止损价 :{} =>{}'.format(target_short_grid.stop_price, sell_price + 10 * self.price_tick))
target_long_grid.stop_price = sell_price + 10 * self.price_tick
# 更新对锁格类型=>指定类型
self.write_log(u'多单类型 :{} =>{}'.format(target_short_grid.type, grid_type))
target_long_grid.type = grid_type
# 返回保留的空单网格
return target_short_grid
def tns_close_locked_grids(self, grid_type):
"""
事务对所有对锁网格进行平仓
:return:
"""
# 正在委托时,不处理
if self.entrust != 0:
return
if not self.activate_today_lock:
return
# 多单得对锁格
locked_long_grids = self.gt.get_opened_grids_within_types(direction=Direction.LONG, types=[LOCK_GRID])
if len(locked_long_grids) == 0:
return
locked_long_dict = {}
for g in locked_long_grids:
vt_symbol = g.snapshot.get('mi_symbol', self.vt_symbol)
volume = g.volume - g.traded_volume
locked_long_dict.update({vt_symbol: locked_long_dict.get(vt_symbol, 0) + volume})
if g.order_status or g.order_ids:
self.write_log(u'当前对锁格:{}存在委托,不进行解锁'.format(g.to_json()))
return
locked_long_volume = sum(locked_long_dict.values(), 0)
# 空单对锁格
locked_short_grids = self.gt.get_opened_grids_within_types(direction=Direction.SHORT, types=[LOCK_GRID])
if len(locked_short_grids) == 0:
return
locked_short_dict = {}
for g in locked_short_grids:
vt_symbol = g.snapshot.get('mi_symbol', self.vt_symbol)
volume = g.volume - g.traded_volume
locked_short_dict.update({vt_symbol: locked_short_dict.get(vt_symbol, 0) + volume})
if g.order_status or g.order_ids:
self.write_log(u'当前对锁格:{}存在委托,不进行解锁'.format(g.to_json()))
return
locked_short_volume = sum(locked_short_dict.values(), 0)
# debug info
self.write_log(u'多单对锁格:{}'.format([g.to_json() for g in locked_long_grids]))
self.write_log(u'空单对锁格:{}'.format([g.to_json() for g in locked_short_grids]))
if locked_long_volume != locked_short_volume:
self.write_error(u'{}对锁格多空数量不一致,不能解锁.\n多:{},\n空:{}'
.format(self.strategy_name, locked_long_volume, locked_short_volume))
return
# 检查所有品种得昨仓是否满足数量
for vt_symbol, volume in locked_long_dict.items():
pos = self.cta_engine.get_position_holding(vt_symbol, None)
if pos is None:
self.write_error(u'{} 没有获取{}得持仓信息,不能解锁')
return
# 检查多空单得昨单能否满足
if pos.long_yd < volume or pos.short_yd < volume:
self.write_error(u'{}持仓昨仓多单:{},空单:{},不满足解锁数量:{}'
.format(vt_symbol, pos.long_yd, pos.short_td, volume))
return
if pos.long_td > 0 or pos.short_td > 0:
self.write_log(u'{}存在今多仓:{},空仓{},不满足解锁条件'.format(vt_symbol, pos.long_td, pos.short_td))
return
price = self.cta_engine.get_price(vt_symbol)
if price is None:
self.write_error(u'{}价格不在tick_dict缓存中,不能解锁'.format(vt_symbol))
# 所有合约价格和仓位都满足同时解开
for g in locked_long_grids:
dist_record = dict()
dist_record['datetime'] = self.cur_datetime
dist_record['symbol'] = self.vt_symbol
dist_record['price'] = self.cur_mi_price
dist_record['volume'] = g.volume
dist_record['operation'] = 'close lock[long]'
self.save_dist(dist_record)
# 通过切换回普通网格,提升止损价的方式实现平仓
self.write_log(
u'网格 从锁仓 {}=>{},提升止损价{}=>{}进行离场'.format(LOCK_GRID, grid_type, g.stop_price,
self.cur_99_price / 2))
g.type = grid_type
g.stop_price = self.cur_99_price / 2
for g in locked_short_grids:
dist_record = dict()
dist_record['datetime'] = self.cur_datetime
dist_record['symbol'] = self.vt_symbol
dist_record['price'] = self.cur_mi_price
dist_record['volume'] = g.volume
dist_record['operation'] = 'close lock[short]'
self.save_dist(dist_record)
# 通过切换回普通网格,提升止损价的方式实现平仓
self.write_log(u'网格 从锁仓 {}=>{},提升止损价{}=>{}进行离场'.format(LOCK_GRID, grid_type, g.stop_price,
self.cur_99_price * 2))
g.type = grid_type
g.stop_price = self.cur_99_price * 2
def grid_check_stop(self):
"""
网格逐一止损/止盈检查 (根据指数价格进行止损止盈)
:return:
"""
if self.entrust != 0:
return
if not self.trading:
if not self.backtesting:
self.write_error(u'当前不允许交易')
return
# 多单网格逐一止损/止盈检查:
long_grids = self.gt.get_opened_grids_without_types(direction=Direction.LONG, types=[LOCK_GRID])
for g in long_grids:
# 满足离场条件,或者碰到止损价格
if g.stop_price > 0 and g.stop_price > self.cur_99_price \
and g.open_status and not g.order_status:
dist_record = dict()
dist_record['datetime'] = self.cur_datetime
dist_record['symbol'] = self.idx_symbol
dist_record['volume'] = g.volume
dist_record['price'] = self.cur_99_price
dist_record['operation'] = 'stop leave'
dist_record['signals'] = '{}<{}'.format(self.cur_99_price, g.stop_price)
# 止损离场
self.write_log(u'{} 指数价:{} 触发多单止损线{},{}当前价:{}。指数开仓价:{},主力开仓价:{},v:{}'.
format(self.cur_datetime, self.cur_99_price, g.stop_price, self.vt_symbol,
self.cur_mi_price,
g.open_price, g.snapshot.get('open_price'), g.volume))
self.save_dist(dist_record)
if self.tns_close_long_pos(g):
self.write_log(u'多单止盈/止损委托成功')
else:
self.write_error(u'多单止损委托失败')
# 空单网格止损检查
short_grids = self.gt.get_opened_grids_without_types(direction=Direction.SHORT, types=[LOCK_GRID])
for g in short_grids:
if g.stop_price > 0 and g.stop_price < self.cur_99_price \
and g.open_status and not g.order_status:
dist_record = dict()
dist_record['datetime'] = self.cur_datetime
dist_record['symbol'] = self.idx_symbol
dist_record['volume'] = g.volume
dist_record['price'] = self.cur_99_price
dist_record['operation'] = 'stop leave'
dist_record['signals'] = '{}<{}'.format(self.cur_99_price, g.stop_price)
# 网格止损
self.write_log(u'{} 指数价:{} 触发空单止损线:{},{}最新价:{}。指数开仓价:{},主力开仓价:{},v:{}'.
format(self.cur_datetime, self.cur_99_price, g.stop_price, self.vt_symbol,
self.cur_mi_price,
g.open_price, g.snapshot.get('open_price'), g.volume))
self.save_dist(dist_record)
if self.tns_close_short_pos(g):
self.write_log(u'空单止盈/止损委托成功')
else:
self.write_error(u'委托空单平仓失败')
|
send_message
|
Sends message in bold mode/Enviar mensagem em negrito.
:param chat_id: ID of Telegram account/ID da conta Telgram.
:param text: Message/Mensagem.
:param parse_mode: Ignore.
:param token: ID Telegram bot/ID do bot Telegram.
|
#!/usr/bin/python
import pathlib
import requests
import smtplib
import logging
import coloredlogs
import verboselogs
from etc.api.keys import *
path_atual_tl = str(pathlib.Path(__file__).parent.absolute())
path_tl_final = path_atual_tl.replace('/etc/notification','')
def logando_notification(tipo, mensagem):
"""
Generates the log message/Gera a mensagem de log.
:param tipo: Sets the log type/Seta o tipo de log.
:param mensagem: Sets the message of log/Seta a mensagem do log.
:return: Returns the complete log's body/Retorna o corpo completo do log.
"""
logger = logging.getLogger(__name__)
coloredlogs.install(level='DEBUG')
coloredlogs.install(level='DEBUG', logger=logger)
logging.basicConfig(format='%(asctime)s %(hostname)s %(name)s[%(process)d] %(levelname)s %(message)s')
logger = verboselogs.VerboseLogger('')
if tipo == 'verbose':
logger.verbose(mensagem)
elif tipo == 'debug':
logger.debug(mensagem)
elif tipo == 'info':
logger.info(mensagem)
elif tipo == 'warning':
logger.warning(mensagem)
elif tipo == 'error':
logger.error(mensagem)
elif tipo == 'critical':
logger.critical(mensagem)
else:
pass
def notificar_telegram(status_nosafe=False, data_nosafe=None):
"""
Generates the notification to Telegram account/Gera a notificação para a conta do Telegram.
"""
usuarios = []
with open(f'{path_tl_final}/etc/notification/users.txt', 'r') as lista:
separar = lista.readlines()
if status_nosafe:
mensagem = str(data_nosafe)
else:
with open(f'{path_tl_final}/etc/notification/message.txt', 'r') as mensagem_corpo:
mensagem = str(mensagem_corpo.read())
for i in separar:
i = i.strip('\r')
i = i.strip('\n')
i = i.split(';')
usuarios += i
for i in usuarios:
if i == '' or i == ' ':
usuarios.remove(i)
for mandar in usuarios:
token = telegram_bot
chat_id = mandar
texto = mensagem
#url_req = f'https://api.telegram.org/bot{token}/sendMessage?chat_id={chat_id}&text={texto}'
send_message(chat_id=mandar, text=mensagem, token=telegram_bot)
#results = requests.get(url_req)
# MASKED: send_message function (lines 89-103)
|
def send_message(chat_id, text=None, parse_mode = 'Markdown', token=None):
"""
Sends message in bold mode/Enviar mensagem em negrito.
:param chat_id: ID of Telegram account/ID da conta Telgram.
:param text: Message/Mensagem.
:param parse_mode: Ignore.
:param token: ID Telegram bot/ID do bot Telegram.
"""
URL = f'https://api.telegram.org/bot{token}/sendMessage?chat_id={chat_id}&text={text}'
answer = {'chat_id': chat_id, 'text': text, 'parse_mode': 'Markdown'}
r = requests.post(URL, json=answer)
if (text == '/bold'):
send_message(chat_id, 'Here comes the'+'*'+'bold'+'*'+'text!')
| 89 | 103 |
#!/usr/bin/python
import pathlib
import requests
import smtplib
import logging
import coloredlogs
import verboselogs
from etc.api.keys import *
path_atual_tl = str(pathlib.Path(__file__).parent.absolute())
path_tl_final = path_atual_tl.replace('/etc/notification','')
def logando_notification(tipo, mensagem):
"""
Generates the log message/Gera a mensagem de log.
:param tipo: Sets the log type/Seta o tipo de log.
:param mensagem: Sets the message of log/Seta a mensagem do log.
:return: Returns the complete log's body/Retorna o corpo completo do log.
"""
logger = logging.getLogger(__name__)
coloredlogs.install(level='DEBUG')
coloredlogs.install(level='DEBUG', logger=logger)
logging.basicConfig(format='%(asctime)s %(hostname)s %(name)s[%(process)d] %(levelname)s %(message)s')
logger = verboselogs.VerboseLogger('')
if tipo == 'verbose':
logger.verbose(mensagem)
elif tipo == 'debug':
logger.debug(mensagem)
elif tipo == 'info':
logger.info(mensagem)
elif tipo == 'warning':
logger.warning(mensagem)
elif tipo == 'error':
logger.error(mensagem)
elif tipo == 'critical':
logger.critical(mensagem)
else:
pass
def notificar_telegram(status_nosafe=False, data_nosafe=None):
"""
Generates the notification to Telegram account/Gera a notificação para a conta do Telegram.
"""
usuarios = []
with open(f'{path_tl_final}/etc/notification/users.txt', 'r') as lista:
separar = lista.readlines()
if status_nosafe:
mensagem = str(data_nosafe)
else:
with open(f'{path_tl_final}/etc/notification/message.txt', 'r') as mensagem_corpo:
mensagem = str(mensagem_corpo.read())
for i in separar:
i = i.strip('\r')
i = i.strip('\n')
i = i.split(';')
usuarios += i
for i in usuarios:
if i == '' or i == ' ':
usuarios.remove(i)
for mandar in usuarios:
token = telegram_bot
chat_id = mandar
texto = mensagem
#url_req = f'https://api.telegram.org/bot{token}/sendMessage?chat_id={chat_id}&text={texto}'
send_message(chat_id=mandar, text=mensagem, token=telegram_bot)
#results = requests.get(url_req)
def send_message(chat_id, text=None, parse_mode = 'Markdown', token=None):
"""
Sends message in bold mode/Enviar mensagem em negrito.
:param chat_id: ID of Telegram account/ID da conta Telgram.
:param text: Message/Mensagem.
:param parse_mode: Ignore.
:param token: ID Telegram bot/ID do bot Telegram.
"""
URL = f'https://api.telegram.org/bot{token}/sendMessage?chat_id={chat_id}&text={text}'
answer = {'chat_id': chat_id, 'text': text, 'parse_mode': 'Markdown'}
r = requests.post(URL, json=answer)
if (text == '/bold'):
send_message(chat_id, 'Here comes the'+'*'+'bold'+'*'+'text!')
|
_cnn_net
|
Create the CNN net topology.
:return keras.Sequential(): CNN topology.
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
"""
@ide: PyCharm
@author: Pedro Silva
@contact: [email protected]
@created: out-10 of 2019
"""
import os
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as kback
from tensorflow import keras
class QRSNet(object):
# MASKED: _cnn_net function (lines 21-70)
@classmethod
def build(cls, net_type):
"""
Build the CNN topology.
:param str net_type: the network type, CNN or LSTM.
:return keras.Sequential(): CNN topology.
"""
if net_type == 'cnn':
qrs_detector = cls._cnn_net()
else:
raise NotImplementedError('Only the CNN network was implemented.')
return qrs_detector
@classmethod
def _prepare_data(cls, data_x, input_shape, data_y, number_of_classes, normalize):
"""
Prepare the data for the training, turning it into a numpy array.
:param list data_x: data that will be used to train.
:param tuple input_shape: the input shape that the data must have to be used as training data.
:param list data_y: the labels related to the data used to train.
:param int number_of_classes: number of classes of the problem.
:param bool normalize: if the data should be normalized (True) or not (False).
:return np.array: the data processed.
"""
if len(input_shape) == 2:
data_x = np.asarray(data_x).reshape(-1, input_shape[0], input_shape[1]) # Reshape for CNN - should work!!
elif len(input_shape) == 3:
data_x = np.asarray(data_x).reshape(-1, input_shape[0], input_shape[1], input_shape[2]) # Reshape for CNN - should work!!
else:
raise Exception('Only inputs of two and three dimensions were implemented.')
if normalize:
data_x = data_x / np.amax(data_x)
data_y = keras.utils.to_categorical(data_y).reshape(-1, number_of_classes)
return data_x, data_y
@classmethod
def train(cls, model, train_x, train_y, validation_x, validation_y, number_of_classes, input_shape=(300, 1),
epochs=10, lr=1e-4, batch_size=4, optimizer=None, loss=None, metrics=None, normalize=False, show_net_info=True):
"""
Function used to train the model.
:param keras.Sequential model: model to be trained.
:param list train_x: data that will be used to train.
:param list train_y: the labels related to the data used to train.
:param list validation_x: data that will be used to validate the model trained.
:param list validation_y: the labels related to the data used to validate the model trained.
:param int number_of_classes: number of classes of the problem.
:param tuple input_shape: the input shape that the data must have to be used as training data.
:param int epochs: total epochs that the model will be trained.
:param float lr: learning rate used to train.
:param int batch_size: batch size used to train.
:param optimizer: which optimizer will be used to train.
:param str loss: loss function used during the training.
:param list metrics: metrics used to evaluate the trained model.
:param bool normalize: if the data should be normalized (True) or not (False).
:param bool show_net_info: if the network topology should be showed (True) or not (False).
:return keras.Sequential, dict: model trained and the history of the training process.
"""
if optimizer is None:
optimizer = keras.optimizers.SGD(lr=lr, momentum=0.9, decay=1e-4/epochs)
if loss is None:
loss = keras.losses.categorical_crossentropy
if metrics is None:
metrics = ['acc']
elif type(metrics) is not list:
metrics = [metrics]
# Set optimizer
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
if show_net_info:
print(model.summary())
# Prepare data
train_x, train_y = cls._prepare_data(train_x, input_shape, train_y, number_of_classes, normalize)
validation_x, validation_y = cls._prepare_data(validation_x, input_shape, validation_y, number_of_classes, normalize)
kback.set_value(model.optimizer.lr, lr)
train_history = model.fit(x=train_x, y=train_y, validation_data=(validation_x, validation_y), batch_size=batch_size, epochs=epochs)
# H = model.fit(x=train_x, y=train_y, batch_size=batch_size, epochs=epochs)
return model, train_history
@classmethod
def save_model(cls, model, model_name):
try:
model.save(model_name)
except OSError:
# serialize model to JSON
model_json = model.to_json()
with open(model_name.replace('.h5', '.json'), 'w') as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(model_name)
@classmethod
def load_model(cls, model_name):
if os.path.exists(model_name.replace('.h5', '.json')):
# load json and create model
json_file = open(model_name.replace('.h5', '.json'), 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = keras.models.model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(model_name)
return loaded_model
else:
return keras.models.load_model(model_name)
|
@classmethod
def _cnn_net(cls):
"""
Create the CNN net topology.
:return keras.Sequential(): CNN topology.
"""
qrs_detector = keras.Sequential()
# CONV1
qrs_detector.add(keras.layers.Conv1D(96, 49, activation=tf.nn.relu, input_shape=(300, 1), strides=1, name='conv1'))
# POOLING 1
qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool1'))
# CONV2
qrs_detector.add(keras.layers.Conv1D(128, 25, activation=tf.nn.relu, strides=1, name='conv2'))
# POOLING 2
qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool2'))
# CONV3
qrs_detector.add(keras.layers.Conv1D(256, 9, activation=tf.nn.relu, strides=1, name='conv3'))
# POOLING 3
qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool3'))
# CONV4
qrs_detector.add(keras.layers.Conv1D(512, 9, activation=tf.nn.relu, strides=1, name='conv4'))
# POOLING 4
qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool4'))
qrs_detector.add(keras.layers.Flatten(data_format=None, name='flatten'))
# FC1
qrs_detector.add(keras.layers.Dense(units=4096, activation=tf.nn.relu, name='fc1'))
# FC2
qrs_detector.add(keras.layers.Dense(units=4096, activation=tf.nn.relu, name='fc2'))
# DROP1
qrs_detector.add(keras.layers.Dropout(rate=0.5, name='drop1'))
# Classes
qrs_detector.add(keras.layers.Dense(units=2, name='classes'))
# SoftMax
qrs_detector.add(keras.layers.Activation(activation=tf.nn.softmax, name='softmax'))
return qrs_detector
| 21 | 70 |
#!/usr/bin/python
# -*- encoding: utf-8 -*-
"""
@ide: PyCharm
@author: Pedro Silva
@contact: [email protected]
@created: out-10 of 2019
"""
import os
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as kback
from tensorflow import keras
class QRSNet(object):
@classmethod
def _cnn_net(cls):
"""
Create the CNN net topology.
:return keras.Sequential(): CNN topology.
"""
qrs_detector = keras.Sequential()
# CONV1
qrs_detector.add(keras.layers.Conv1D(96, 49, activation=tf.nn.relu, input_shape=(300, 1), strides=1, name='conv1'))
# POOLING 1
qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool1'))
# CONV2
qrs_detector.add(keras.layers.Conv1D(128, 25, activation=tf.nn.relu, strides=1, name='conv2'))
# POOLING 2
qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool2'))
# CONV3
qrs_detector.add(keras.layers.Conv1D(256, 9, activation=tf.nn.relu, strides=1, name='conv3'))
# POOLING 3
qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool3'))
# CONV4
qrs_detector.add(keras.layers.Conv1D(512, 9, activation=tf.nn.relu, strides=1, name='conv4'))
# POOLING 4
qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool4'))
qrs_detector.add(keras.layers.Flatten(data_format=None, name='flatten'))
# FC1
qrs_detector.add(keras.layers.Dense(units=4096, activation=tf.nn.relu, name='fc1'))
# FC2
qrs_detector.add(keras.layers.Dense(units=4096, activation=tf.nn.relu, name='fc2'))
# DROP1
qrs_detector.add(keras.layers.Dropout(rate=0.5, name='drop1'))
# Classes
qrs_detector.add(keras.layers.Dense(units=2, name='classes'))
# SoftMax
qrs_detector.add(keras.layers.Activation(activation=tf.nn.softmax, name='softmax'))
return qrs_detector
@classmethod
def build(cls, net_type):
"""
Build the CNN topology.
:param str net_type: the network type, CNN or LSTM.
:return keras.Sequential(): CNN topology.
"""
if net_type == 'cnn':
qrs_detector = cls._cnn_net()
else:
raise NotImplementedError('Only the CNN network was implemented.')
return qrs_detector
@classmethod
def _prepare_data(cls, data_x, input_shape, data_y, number_of_classes, normalize):
"""
Prepare the data for the training, turning it into a numpy array.
:param list data_x: data that will be used to train.
:param tuple input_shape: the input shape that the data must have to be used as training data.
:param list data_y: the labels related to the data used to train.
:param int number_of_classes: number of classes of the problem.
:param bool normalize: if the data should be normalized (True) or not (False).
:return np.array: the data processed.
"""
if len(input_shape) == 2:
data_x = np.asarray(data_x).reshape(-1, input_shape[0], input_shape[1]) # Reshape for CNN - should work!!
elif len(input_shape) == 3:
data_x = np.asarray(data_x).reshape(-1, input_shape[0], input_shape[1], input_shape[2]) # Reshape for CNN - should work!!
else:
raise Exception('Only inputs of two and three dimensions were implemented.')
if normalize:
data_x = data_x / np.amax(data_x)
data_y = keras.utils.to_categorical(data_y).reshape(-1, number_of_classes)
return data_x, data_y
@classmethod
def train(cls, model, train_x, train_y, validation_x, validation_y, number_of_classes, input_shape=(300, 1),
epochs=10, lr=1e-4, batch_size=4, optimizer=None, loss=None, metrics=None, normalize=False, show_net_info=True):
"""
Function used to train the model.
:param keras.Sequential model: model to be trained.
:param list train_x: data that will be used to train.
:param list train_y: the labels related to the data used to train.
:param list validation_x: data that will be used to validate the model trained.
:param list validation_y: the labels related to the data used to validate the model trained.
:param int number_of_classes: number of classes of the problem.
:param tuple input_shape: the input shape that the data must have to be used as training data.
:param int epochs: total epochs that the model will be trained.
:param float lr: learning rate used to train.
:param int batch_size: batch size used to train.
:param optimizer: which optimizer will be used to train.
:param str loss: loss function used during the training.
:param list metrics: metrics used to evaluate the trained model.
:param bool normalize: if the data should be normalized (True) or not (False).
:param bool show_net_info: if the network topology should be showed (True) or not (False).
:return keras.Sequential, dict: model trained and the history of the training process.
"""
if optimizer is None:
optimizer = keras.optimizers.SGD(lr=lr, momentum=0.9, decay=1e-4/epochs)
if loss is None:
loss = keras.losses.categorical_crossentropy
if metrics is None:
metrics = ['acc']
elif type(metrics) is not list:
metrics = [metrics]
# Set optimizer
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
if show_net_info:
print(model.summary())
# Prepare data
train_x, train_y = cls._prepare_data(train_x, input_shape, train_y, number_of_classes, normalize)
validation_x, validation_y = cls._prepare_data(validation_x, input_shape, validation_y, number_of_classes, normalize)
kback.set_value(model.optimizer.lr, lr)
train_history = model.fit(x=train_x, y=train_y, validation_data=(validation_x, validation_y), batch_size=batch_size, epochs=epochs)
# H = model.fit(x=train_x, y=train_y, batch_size=batch_size, epochs=epochs)
return model, train_history
@classmethod
def save_model(cls, model, model_name):
try:
model.save(model_name)
except OSError:
# serialize model to JSON
model_json = model.to_json()
with open(model_name.replace('.h5', '.json'), 'w') as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(model_name)
@classmethod
def load_model(cls, model_name):
if os.path.exists(model_name.replace('.h5', '.json')):
# load json and create model
json_file = open(model_name.replace('.h5', '.json'), 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = keras.models.model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(model_name)
return loaded_model
else:
return keras.models.load_model(model_name)
|
build
|
Build the CNN topology.
:param str net_type: the network type, CNN or LSTM.
:return keras.Sequential(): CNN topology.
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
"""
@ide: PyCharm
@author: Pedro Silva
@contact: [email protected]
@created: out-10 of 2019
"""
import os
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as kback
from tensorflow import keras
class QRSNet(object):
@classmethod
def _cnn_net(cls):
"""
Create the CNN net topology.
:return keras.Sequential(): CNN topology.
"""
qrs_detector = keras.Sequential()
# CONV1
qrs_detector.add(keras.layers.Conv1D(96, 49, activation=tf.nn.relu, input_shape=(300, 1), strides=1, name='conv1'))
# POOLING 1
qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool1'))
# CONV2
qrs_detector.add(keras.layers.Conv1D(128, 25, activation=tf.nn.relu, strides=1, name='conv2'))
# POOLING 2
qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool2'))
# CONV3
qrs_detector.add(keras.layers.Conv1D(256, 9, activation=tf.nn.relu, strides=1, name='conv3'))
# POOLING 3
qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool3'))
# CONV4
qrs_detector.add(keras.layers.Conv1D(512, 9, activation=tf.nn.relu, strides=1, name='conv4'))
# POOLING 4
qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool4'))
qrs_detector.add(keras.layers.Flatten(data_format=None, name='flatten'))
# FC1
qrs_detector.add(keras.layers.Dense(units=4096, activation=tf.nn.relu, name='fc1'))
# FC2
qrs_detector.add(keras.layers.Dense(units=4096, activation=tf.nn.relu, name='fc2'))
# DROP1
qrs_detector.add(keras.layers.Dropout(rate=0.5, name='drop1'))
# Classes
qrs_detector.add(keras.layers.Dense(units=2, name='classes'))
# SoftMax
qrs_detector.add(keras.layers.Activation(activation=tf.nn.softmax, name='softmax'))
return qrs_detector
# MASKED: build function (lines 72-84)
@classmethod
def _prepare_data(cls, data_x, input_shape, data_y, number_of_classes, normalize):
"""
Prepare the data for the training, turning it into a numpy array.
:param list data_x: data that will be used to train.
:param tuple input_shape: the input shape that the data must have to be used as training data.
:param list data_y: the labels related to the data used to train.
:param int number_of_classes: number of classes of the problem.
:param bool normalize: if the data should be normalized (True) or not (False).
:return np.array: the data processed.
"""
if len(input_shape) == 2:
data_x = np.asarray(data_x).reshape(-1, input_shape[0], input_shape[1]) # Reshape for CNN - should work!!
elif len(input_shape) == 3:
data_x = np.asarray(data_x).reshape(-1, input_shape[0], input_shape[1], input_shape[2]) # Reshape for CNN - should work!!
else:
raise Exception('Only inputs of two and three dimensions were implemented.')
if normalize:
data_x = data_x / np.amax(data_x)
data_y = keras.utils.to_categorical(data_y).reshape(-1, number_of_classes)
return data_x, data_y
@classmethod
def train(cls, model, train_x, train_y, validation_x, validation_y, number_of_classes, input_shape=(300, 1),
epochs=10, lr=1e-4, batch_size=4, optimizer=None, loss=None, metrics=None, normalize=False, show_net_info=True):
"""
Function used to train the model.
:param keras.Sequential model: model to be trained.
:param list train_x: data that will be used to train.
:param list train_y: the labels related to the data used to train.
:param list validation_x: data that will be used to validate the model trained.
:param list validation_y: the labels related to the data used to validate the model trained.
:param int number_of_classes: number of classes of the problem.
:param tuple input_shape: the input shape that the data must have to be used as training data.
:param int epochs: total epochs that the model will be trained.
:param float lr: learning rate used to train.
:param int batch_size: batch size used to train.
:param optimizer: which optimizer will be used to train.
:param str loss: loss function used during the training.
:param list metrics: metrics used to evaluate the trained model.
:param bool normalize: if the data should be normalized (True) or not (False).
:param bool show_net_info: if the network topology should be showed (True) or not (False).
:return keras.Sequential, dict: model trained and the history of the training process.
"""
if optimizer is None:
optimizer = keras.optimizers.SGD(lr=lr, momentum=0.9, decay=1e-4/epochs)
if loss is None:
loss = keras.losses.categorical_crossentropy
if metrics is None:
metrics = ['acc']
elif type(metrics) is not list:
metrics = [metrics]
# Set optimizer
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
if show_net_info:
print(model.summary())
# Prepare data
train_x, train_y = cls._prepare_data(train_x, input_shape, train_y, number_of_classes, normalize)
validation_x, validation_y = cls._prepare_data(validation_x, input_shape, validation_y, number_of_classes, normalize)
kback.set_value(model.optimizer.lr, lr)
train_history = model.fit(x=train_x, y=train_y, validation_data=(validation_x, validation_y), batch_size=batch_size, epochs=epochs)
# H = model.fit(x=train_x, y=train_y, batch_size=batch_size, epochs=epochs)
return model, train_history
@classmethod
def save_model(cls, model, model_name):
try:
model.save(model_name)
except OSError:
# serialize model to JSON
model_json = model.to_json()
with open(model_name.replace('.h5', '.json'), 'w') as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(model_name)
@classmethod
def load_model(cls, model_name):
if os.path.exists(model_name.replace('.h5', '.json')):
# load json and create model
json_file = open(model_name.replace('.h5', '.json'), 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = keras.models.model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(model_name)
return loaded_model
else:
return keras.models.load_model(model_name)
|
@classmethod
def build(cls, net_type):
"""
Build the CNN topology.
:param str net_type: the network type, CNN or LSTM.
:return keras.Sequential(): CNN topology.
"""
if net_type == 'cnn':
qrs_detector = cls._cnn_net()
else:
raise NotImplementedError('Only the CNN network was implemented.')
return qrs_detector
| 72 | 84 |
#!/usr/bin/python
# -*- encoding: utf-8 -*-
"""
@ide: PyCharm
@author: Pedro Silva
@contact: [email protected]
@created: out-10 of 2019
"""
import os
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as kback
from tensorflow import keras
class QRSNet(object):
@classmethod
def _cnn_net(cls):
"""
Create the CNN net topology.
:return keras.Sequential(): CNN topology.
"""
qrs_detector = keras.Sequential()
# CONV1
qrs_detector.add(keras.layers.Conv1D(96, 49, activation=tf.nn.relu, input_shape=(300, 1), strides=1, name='conv1'))
# POOLING 1
qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool1'))
# CONV2
qrs_detector.add(keras.layers.Conv1D(128, 25, activation=tf.nn.relu, strides=1, name='conv2'))
# POOLING 2
qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool2'))
# CONV3
qrs_detector.add(keras.layers.Conv1D(256, 9, activation=tf.nn.relu, strides=1, name='conv3'))
# POOLING 3
qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool3'))
# CONV4
qrs_detector.add(keras.layers.Conv1D(512, 9, activation=tf.nn.relu, strides=1, name='conv4'))
# POOLING 4
qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool4'))
qrs_detector.add(keras.layers.Flatten(data_format=None, name='flatten'))
# FC1
qrs_detector.add(keras.layers.Dense(units=4096, activation=tf.nn.relu, name='fc1'))
# FC2
qrs_detector.add(keras.layers.Dense(units=4096, activation=tf.nn.relu, name='fc2'))
# DROP1
qrs_detector.add(keras.layers.Dropout(rate=0.5, name='drop1'))
# Classes
qrs_detector.add(keras.layers.Dense(units=2, name='classes'))
# SoftMax
qrs_detector.add(keras.layers.Activation(activation=tf.nn.softmax, name='softmax'))
return qrs_detector
@classmethod
def build(cls, net_type):
"""
Build the CNN topology.
:param str net_type: the network type, CNN or LSTM.
:return keras.Sequential(): CNN topology.
"""
if net_type == 'cnn':
qrs_detector = cls._cnn_net()
else:
raise NotImplementedError('Only the CNN network was implemented.')
return qrs_detector
@classmethod
def _prepare_data(cls, data_x, input_shape, data_y, number_of_classes, normalize):
"""
Prepare the data for the training, turning it into a numpy array.
:param list data_x: data that will be used to train.
:param tuple input_shape: the input shape that the data must have to be used as training data.
:param list data_y: the labels related to the data used to train.
:param int number_of_classes: number of classes of the problem.
:param bool normalize: if the data should be normalized (True) or not (False).
:return np.array: the data processed.
"""
if len(input_shape) == 2:
data_x = np.asarray(data_x).reshape(-1, input_shape[0], input_shape[1]) # Reshape for CNN - should work!!
elif len(input_shape) == 3:
data_x = np.asarray(data_x).reshape(-1, input_shape[0], input_shape[1], input_shape[2]) # Reshape for CNN - should work!!
else:
raise Exception('Only inputs of two and three dimensions were implemented.')
if normalize:
data_x = data_x / np.amax(data_x)
data_y = keras.utils.to_categorical(data_y).reshape(-1, number_of_classes)
return data_x, data_y
@classmethod
def train(cls, model, train_x, train_y, validation_x, validation_y, number_of_classes, input_shape=(300, 1),
epochs=10, lr=1e-4, batch_size=4, optimizer=None, loss=None, metrics=None, normalize=False, show_net_info=True):
"""
Function used to train the model.
:param keras.Sequential model: model to be trained.
:param list train_x: data that will be used to train.
:param list train_y: the labels related to the data used to train.
:param list validation_x: data that will be used to validate the model trained.
:param list validation_y: the labels related to the data used to validate the model trained.
:param int number_of_classes: number of classes of the problem.
:param tuple input_shape: the input shape that the data must have to be used as training data.
:param int epochs: total epochs that the model will be trained.
:param float lr: learning rate used to train.
:param int batch_size: batch size used to train.
:param optimizer: which optimizer will be used to train.
:param str loss: loss function used during the training.
:param list metrics: metrics used to evaluate the trained model.
:param bool normalize: if the data should be normalized (True) or not (False).
:param bool show_net_info: if the network topology should be showed (True) or not (False).
:return keras.Sequential, dict: model trained and the history of the training process.
"""
if optimizer is None:
optimizer = keras.optimizers.SGD(lr=lr, momentum=0.9, decay=1e-4/epochs)
if loss is None:
loss = keras.losses.categorical_crossentropy
if metrics is None:
metrics = ['acc']
elif type(metrics) is not list:
metrics = [metrics]
# Set optimizer
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
if show_net_info:
print(model.summary())
# Prepare data
train_x, train_y = cls._prepare_data(train_x, input_shape, train_y, number_of_classes, normalize)
validation_x, validation_y = cls._prepare_data(validation_x, input_shape, validation_y, number_of_classes, normalize)
kback.set_value(model.optimizer.lr, lr)
train_history = model.fit(x=train_x, y=train_y, validation_data=(validation_x, validation_y), batch_size=batch_size, epochs=epochs)
# H = model.fit(x=train_x, y=train_y, batch_size=batch_size, epochs=epochs)
return model, train_history
@classmethod
def save_model(cls, model, model_name):
try:
model.save(model_name)
except OSError:
# serialize model to JSON
model_json = model.to_json()
with open(model_name.replace('.h5', '.json'), 'w') as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(model_name)
@classmethod
def load_model(cls, model_name):
if os.path.exists(model_name.replace('.h5', '.json')):
# load json and create model
json_file = open(model_name.replace('.h5', '.json'), 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = keras.models.model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(model_name)
return loaded_model
else:
return keras.models.load_model(model_name)
|
_prepare_data
|
Prepare the data for the training, turning it into a numpy array.
:param list data_x: data that will be used to train.
:param tuple input_shape: the input shape that the data must have to be used as training data.
:param list data_y: the labels related to the data used to train.
:param int number_of_classes: number of classes of the problem.
:param bool normalize: if the data should be normalized (True) or not (False).
:return np.array: the data processed.
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
"""
@ide: PyCharm
@author: Pedro Silva
@contact: [email protected]
@created: out-10 of 2019
"""
import os
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as kback
from tensorflow import keras
class QRSNet(object):
@classmethod
def _cnn_net(cls):
"""
Create the CNN net topology.
:return keras.Sequential(): CNN topology.
"""
qrs_detector = keras.Sequential()
# CONV1
qrs_detector.add(keras.layers.Conv1D(96, 49, activation=tf.nn.relu, input_shape=(300, 1), strides=1, name='conv1'))
# POOLING 1
qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool1'))
# CONV2
qrs_detector.add(keras.layers.Conv1D(128, 25, activation=tf.nn.relu, strides=1, name='conv2'))
# POOLING 2
qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool2'))
# CONV3
qrs_detector.add(keras.layers.Conv1D(256, 9, activation=tf.nn.relu, strides=1, name='conv3'))
# POOLING 3
qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool3'))
# CONV4
qrs_detector.add(keras.layers.Conv1D(512, 9, activation=tf.nn.relu, strides=1, name='conv4'))
# POOLING 4
qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool4'))
qrs_detector.add(keras.layers.Flatten(data_format=None, name='flatten'))
# FC1
qrs_detector.add(keras.layers.Dense(units=4096, activation=tf.nn.relu, name='fc1'))
# FC2
qrs_detector.add(keras.layers.Dense(units=4096, activation=tf.nn.relu, name='fc2'))
# DROP1
qrs_detector.add(keras.layers.Dropout(rate=0.5, name='drop1'))
# Classes
qrs_detector.add(keras.layers.Dense(units=2, name='classes'))
# SoftMax
qrs_detector.add(keras.layers.Activation(activation=tf.nn.softmax, name='softmax'))
return qrs_detector
@classmethod
def build(cls, net_type):
"""
Build the CNN topology.
:param str net_type: the network type, CNN or LSTM.
:return keras.Sequential(): CNN topology.
"""
if net_type == 'cnn':
qrs_detector = cls._cnn_net()
else:
raise NotImplementedError('Only the CNN network was implemented.')
return qrs_detector
# MASKED: _prepare_data function (lines 86-107)
@classmethod
def train(cls, model, train_x, train_y, validation_x, validation_y, number_of_classes, input_shape=(300, 1),
epochs=10, lr=1e-4, batch_size=4, optimizer=None, loss=None, metrics=None, normalize=False, show_net_info=True):
"""
Function used to train the model.
:param keras.Sequential model: model to be trained.
:param list train_x: data that will be used to train.
:param list train_y: the labels related to the data used to train.
:param list validation_x: data that will be used to validate the model trained.
:param list validation_y: the labels related to the data used to validate the model trained.
:param int number_of_classes: number of classes of the problem.
:param tuple input_shape: the input shape that the data must have to be used as training data.
:param int epochs: total epochs that the model will be trained.
:param float lr: learning rate used to train.
:param int batch_size: batch size used to train.
:param optimizer: which optimizer will be used to train.
:param str loss: loss function used during the training.
:param list metrics: metrics used to evaluate the trained model.
:param bool normalize: if the data should be normalized (True) or not (False).
:param bool show_net_info: if the network topology should be showed (True) or not (False).
:return keras.Sequential, dict: model trained and the history of the training process.
"""
if optimizer is None:
optimizer = keras.optimizers.SGD(lr=lr, momentum=0.9, decay=1e-4/epochs)
if loss is None:
loss = keras.losses.categorical_crossentropy
if metrics is None:
metrics = ['acc']
elif type(metrics) is not list:
metrics = [metrics]
# Set optimizer
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
if show_net_info:
print(model.summary())
# Prepare data
train_x, train_y = cls._prepare_data(train_x, input_shape, train_y, number_of_classes, normalize)
validation_x, validation_y = cls._prepare_data(validation_x, input_shape, validation_y, number_of_classes, normalize)
kback.set_value(model.optimizer.lr, lr)
train_history = model.fit(x=train_x, y=train_y, validation_data=(validation_x, validation_y), batch_size=batch_size, epochs=epochs)
# H = model.fit(x=train_x, y=train_y, batch_size=batch_size, epochs=epochs)
return model, train_history
@classmethod
def save_model(cls, model, model_name):
try:
model.save(model_name)
except OSError:
# serialize model to JSON
model_json = model.to_json()
with open(model_name.replace('.h5', '.json'), 'w') as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(model_name)
@classmethod
def load_model(cls, model_name):
if os.path.exists(model_name.replace('.h5', '.json')):
# load json and create model
json_file = open(model_name.replace('.h5', '.json'), 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = keras.models.model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(model_name)
return loaded_model
else:
return keras.models.load_model(model_name)
|
@classmethod
def _prepare_data(cls, data_x, input_shape, data_y, number_of_classes, normalize):
"""
Prepare the data for the training, turning it into a numpy array.
:param list data_x: data that will be used to train.
:param tuple input_shape: the input shape that the data must have to be used as training data.
:param list data_y: the labels related to the data used to train.
:param int number_of_classes: number of classes of the problem.
:param bool normalize: if the data should be normalized (True) or not (False).
:return np.array: the data processed.
"""
if len(input_shape) == 2:
data_x = np.asarray(data_x).reshape(-1, input_shape[0], input_shape[1]) # Reshape for CNN - should work!!
elif len(input_shape) == 3:
data_x = np.asarray(data_x).reshape(-1, input_shape[0], input_shape[1], input_shape[2]) # Reshape for CNN - should work!!
else:
raise Exception('Only inputs of two and three dimensions were implemented.')
if normalize:
data_x = data_x / np.amax(data_x)
data_y = keras.utils.to_categorical(data_y).reshape(-1, number_of_classes)
return data_x, data_y
| 86 | 107 |
#!/usr/bin/python
# -*- encoding: utf-8 -*-
"""
@ide: PyCharm
@author: Pedro Silva
@contact: [email protected]
@created: out-10 of 2019
"""
import os
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as kback
from tensorflow import keras
class QRSNet(object):
@classmethod
def _cnn_net(cls):
"""
Create the CNN net topology.
:return keras.Sequential(): CNN topology.
"""
qrs_detector = keras.Sequential()
# CONV1
qrs_detector.add(keras.layers.Conv1D(96, 49, activation=tf.nn.relu, input_shape=(300, 1), strides=1, name='conv1'))
# POOLING 1
qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool1'))
# CONV2
qrs_detector.add(keras.layers.Conv1D(128, 25, activation=tf.nn.relu, strides=1, name='conv2'))
# POOLING 2
qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool2'))
# CONV3
qrs_detector.add(keras.layers.Conv1D(256, 9, activation=tf.nn.relu, strides=1, name='conv3'))
# POOLING 3
qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool3'))
# CONV4
qrs_detector.add(keras.layers.Conv1D(512, 9, activation=tf.nn.relu, strides=1, name='conv4'))
# POOLING 4
qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool4'))
qrs_detector.add(keras.layers.Flatten(data_format=None, name='flatten'))
# FC1
qrs_detector.add(keras.layers.Dense(units=4096, activation=tf.nn.relu, name='fc1'))
# FC2
qrs_detector.add(keras.layers.Dense(units=4096, activation=tf.nn.relu, name='fc2'))
# DROP1
qrs_detector.add(keras.layers.Dropout(rate=0.5, name='drop1'))
# Classes
qrs_detector.add(keras.layers.Dense(units=2, name='classes'))
# SoftMax
qrs_detector.add(keras.layers.Activation(activation=tf.nn.softmax, name='softmax'))
return qrs_detector
@classmethod
def build(cls, net_type):
"""
Build the CNN topology.
:param str net_type: the network type, CNN or LSTM.
:return keras.Sequential(): CNN topology.
"""
if net_type == 'cnn':
qrs_detector = cls._cnn_net()
else:
raise NotImplementedError('Only the CNN network was implemented.')
return qrs_detector
@classmethod
def _prepare_data(cls, data_x, input_shape, data_y, number_of_classes, normalize):
"""
Prepare the data for the training, turning it into a numpy array.
:param list data_x: data that will be used to train.
:param tuple input_shape: the input shape that the data must have to be used as training data.
:param list data_y: the labels related to the data used to train.
:param int number_of_classes: number of classes of the problem.
:param bool normalize: if the data should be normalized (True) or not (False).
:return np.array: the data processed.
"""
if len(input_shape) == 2:
data_x = np.asarray(data_x).reshape(-1, input_shape[0], input_shape[1]) # Reshape for CNN - should work!!
elif len(input_shape) == 3:
data_x = np.asarray(data_x).reshape(-1, input_shape[0], input_shape[1], input_shape[2]) # Reshape for CNN - should work!!
else:
raise Exception('Only inputs of two and three dimensions were implemented.')
if normalize:
data_x = data_x / np.amax(data_x)
data_y = keras.utils.to_categorical(data_y).reshape(-1, number_of_classes)
return data_x, data_y
@classmethod
def train(cls, model, train_x, train_y, validation_x, validation_y, number_of_classes, input_shape=(300, 1),
epochs=10, lr=1e-4, batch_size=4, optimizer=None, loss=None, metrics=None, normalize=False, show_net_info=True):
"""
Function used to train the model.
:param keras.Sequential model: model to be trained.
:param list train_x: data that will be used to train.
:param list train_y: the labels related to the data used to train.
:param list validation_x: data that will be used to validate the model trained.
:param list validation_y: the labels related to the data used to validate the model trained.
:param int number_of_classes: number of classes of the problem.
:param tuple input_shape: the input shape that the data must have to be used as training data.
:param int epochs: total epochs that the model will be trained.
:param float lr: learning rate used to train.
:param int batch_size: batch size used to train.
:param optimizer: which optimizer will be used to train.
:param str loss: loss function used during the training.
:param list metrics: metrics used to evaluate the trained model.
:param bool normalize: if the data should be normalized (True) or not (False).
:param bool show_net_info: if the network topology should be showed (True) or not (False).
:return keras.Sequential, dict: model trained and the history of the training process.
"""
if optimizer is None:
optimizer = keras.optimizers.SGD(lr=lr, momentum=0.9, decay=1e-4/epochs)
if loss is None:
loss = keras.losses.categorical_crossentropy
if metrics is None:
metrics = ['acc']
elif type(metrics) is not list:
metrics = [metrics]
# Set optimizer
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
if show_net_info:
print(model.summary())
# Prepare data
train_x, train_y = cls._prepare_data(train_x, input_shape, train_y, number_of_classes, normalize)
validation_x, validation_y = cls._prepare_data(validation_x, input_shape, validation_y, number_of_classes, normalize)
kback.set_value(model.optimizer.lr, lr)
train_history = model.fit(x=train_x, y=train_y, validation_data=(validation_x, validation_y), batch_size=batch_size, epochs=epochs)
# H = model.fit(x=train_x, y=train_y, batch_size=batch_size, epochs=epochs)
return model, train_history
@classmethod
def save_model(cls, model, model_name):
try:
model.save(model_name)
except OSError:
# serialize model to JSON
model_json = model.to_json()
with open(model_name.replace('.h5', '.json'), 'w') as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(model_name)
@classmethod
def load_model(cls, model_name):
if os.path.exists(model_name.replace('.h5', '.json')):
# load json and create model
json_file = open(model_name.replace('.h5', '.json'), 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = keras.models.model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(model_name)
return loaded_model
else:
return keras.models.load_model(model_name)
|
_convert_dataset_to_image_and_bboxes
|
@param dataset: [image_path, [[x, y, w, h, class_id], ...]]
@return image, bboxes
image: 0.0 ~ 1.0, Dim(1, height, width, channels)
|
"""
MIT License
Copyright (c) 2019 YangYun
Copyright (c) 2020 Việt Hùng
Copyright (c) 2020-2021 Hyeonki Hong <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from typing import List
import cv2
import numpy as np
from tensorflow.keras.utils import Sequence
from .augmentation import mosaic
from ...common import (
media,
convert_dataset_to_ground_truth as _convert_dataset_to_ground_truth,
)
from ...common.config import YOLOConfig
from ...common.parser import parse_dataset
_AUGMETATION_CACHE_SIZE = 50
class YOLODataset(Sequence):
def __init__(
self,
config: YOLOConfig,
dataset_list: str,
dataset_type: str = "converted_coco",
image_path_prefix: str = "",
training: bool = True,
):
self.dataset = parse_dataset(
dataset_list=dataset_list,
dataset_type=dataset_type,
image_path_prefix=image_path_prefix,
)
self._metayolos = []
if config.layer_count["yolo"] > 0:
for i in range(config.layer_count["yolo"]):
self._metayolos.append(config.find_metalayer("yolo", i))
elif config.layer_count["yolo_tpu"] > 0:
for i in range(config.layer_count["yolo_tpu"]):
self._metayolos.append(config.find_metalayer("yolo_tpu", i))
else:
raise RuntimeError(
"YOLODataset: model does not have a yolo or yolo_tpu layer"
)
self._metanet = config.net
self._metayolos_np = np.zeros(
(len(self._metayolos), 7 + len(self._metayolos[-1].mask)),
dtype=np.float32,
)
for i, metayolo in enumerate(self._metayolos):
self._metayolos_np[i, 0] = metayolo.height
self._metayolos_np[i, 1] = metayolo.width
self._metayolos_np[i, 2] = metayolo.channels
self._metayolos_np[i, 3] = metayolo.classes
self._metayolos_np[i, 4] = metayolo.label_smooth_eps
self._metayolos_np[i, 5] = metayolo.max
self._metayolos_np[i, 6] = metayolo.iou_thresh
for j, mask in enumerate(metayolo.mask):
self._metayolos_np[i, 7 + j] = mask
self._anchors_np = np.zeros(
len(self._metayolos[-1].anchors) * 2, dtype=np.float32
)
for i, anchor in enumerate(self._metayolos[-1].anchors):
self._anchors_np[2 * i] = anchor[0] / self._metanet.width
self._anchors_np[2 * i + 1] = anchor[1] / self._metanet.height
# Data augmentation ####################################################
self._augmentation: List[str] = []
if config.net.mosaic:
self._augmentation.append("mosaic")
if training and len(self._augmentation) > 0:
self._augmentation_batch = int(config.net.batch * 0.3)
self._training = True
else:
self._augmentation_batch = 0
self._training = False
self._augmentation_cache = [
self._get_dataset(i) for i in range(_AUGMETATION_CACHE_SIZE)
]
self._augmentation_cache_index = 0
def _convert_dataset_to_ground_truth(self, dataset_bboxes):
"""
@param `dataset_bboxes`: [[b_x, b_y, b_w, b_h, class_id], ...]
@return `groud_truth_one`:
[Dim(yolo.h, yolo.w, yolo.c + len(mask))] * len(yolo)
"""
return _convert_dataset_to_ground_truth(
dataset_bboxes, self._metayolos_np, self._anchors_np
)
# MASKED: _convert_dataset_to_image_and_bboxes function (lines 121-142)
def _get_dataset(self, index: int):
offset = 0
for offset in range(5):
image, bboxes = self._convert_dataset_to_image_and_bboxes(
self.dataset[(index + offset) % len(self.dataset)]
)
if image is None:
offset += 1
else:
return image, bboxes
raise FileNotFoundError("Failed to find images")
def __getitem__(self, index):
"""
@return
`images`: Dim(batch, height, width, channels)
`groud_truth_one`:
[Dim(batch, yolo.h, yolo.w, yolo.c + len(mask))] * len(yolo)
"""
batch_x = []
# [[gt_one, gt_one, ...],
# [gt_one, gt_one, ...], ...]
batch_y = [[] for _ in range(len(self._metayolos))]
start_index = index * self._metanet.batch
for i in range(self._metanet.batch - self._augmentation_batch):
image, bboxes = self._get_dataset(start_index + i)
self._augmentation_cache[self._augmentation_cache_index] = (
image,
bboxes,
)
self._augmentation_cache_index = (
self._augmentation_cache_index + 1
) % _AUGMETATION_CACHE_SIZE
batch_x.append(image)
ground_truth = self._convert_dataset_to_ground_truth(bboxes)
for j in range(len(self._metayolos)):
batch_y[j].append(ground_truth[j])
for i in range(self._augmentation_batch):
augmentation = self._augmentation[
np.random.randint(0, len(self._augmentation))
]
image = None
bboxes = None
if augmentation == "mosaic":
image, bboxes = mosaic(
*[
self._augmentation_cache[
np.random.randint(
0,
_AUGMETATION_CACHE_SIZE,
)
]
for _ in range(4)
]
)
batch_x.append(image)
ground_truth = self._convert_dataset_to_ground_truth(bboxes)
for j in range(len(self._metayolos)):
batch_y[j].append(ground_truth[j])
return np.concatenate(batch_x, axis=0), [
np.stack(y, axis=0) for y in batch_y
]
def __len__(self):
return len(self.dataset) // (
self._metanet.batch - self._augmentation_batch
)
|
def _convert_dataset_to_image_and_bboxes(self, dataset):
"""
@param dataset: [image_path, [[x, y, w, h, class_id], ...]]
@return image, bboxes
image: 0.0 ~ 1.0, Dim(1, height, width, channels)
"""
# pylint: disable=bare-except
try:
image = cv2.imread(dataset[0])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
except:
return None, None
resized_image, resized_bboxes = media.resize_image(
image,
target_shape=self._metanet.input_shape,
ground_truth=dataset[1],
)
resized_image = np.expand_dims(resized_image / 255.0, axis=0)
return resized_image, resized_bboxes
| 121 | 142 |
"""
MIT License
Copyright (c) 2019 YangYun
Copyright (c) 2020 Việt Hùng
Copyright (c) 2020-2021 Hyeonki Hong <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from typing import List
import cv2
import numpy as np
from tensorflow.keras.utils import Sequence
from .augmentation import mosaic
from ...common import (
media,
convert_dataset_to_ground_truth as _convert_dataset_to_ground_truth,
)
from ...common.config import YOLOConfig
from ...common.parser import parse_dataset
_AUGMETATION_CACHE_SIZE = 50
class YOLODataset(Sequence):
def __init__(
self,
config: YOLOConfig,
dataset_list: str,
dataset_type: str = "converted_coco",
image_path_prefix: str = "",
training: bool = True,
):
self.dataset = parse_dataset(
dataset_list=dataset_list,
dataset_type=dataset_type,
image_path_prefix=image_path_prefix,
)
self._metayolos = []
if config.layer_count["yolo"] > 0:
for i in range(config.layer_count["yolo"]):
self._metayolos.append(config.find_metalayer("yolo", i))
elif config.layer_count["yolo_tpu"] > 0:
for i in range(config.layer_count["yolo_tpu"]):
self._metayolos.append(config.find_metalayer("yolo_tpu", i))
else:
raise RuntimeError(
"YOLODataset: model does not have a yolo or yolo_tpu layer"
)
self._metanet = config.net
self._metayolos_np = np.zeros(
(len(self._metayolos), 7 + len(self._metayolos[-1].mask)),
dtype=np.float32,
)
for i, metayolo in enumerate(self._metayolos):
self._metayolos_np[i, 0] = metayolo.height
self._metayolos_np[i, 1] = metayolo.width
self._metayolos_np[i, 2] = metayolo.channels
self._metayolos_np[i, 3] = metayolo.classes
self._metayolos_np[i, 4] = metayolo.label_smooth_eps
self._metayolos_np[i, 5] = metayolo.max
self._metayolos_np[i, 6] = metayolo.iou_thresh
for j, mask in enumerate(metayolo.mask):
self._metayolos_np[i, 7 + j] = mask
self._anchors_np = np.zeros(
len(self._metayolos[-1].anchors) * 2, dtype=np.float32
)
for i, anchor in enumerate(self._metayolos[-1].anchors):
self._anchors_np[2 * i] = anchor[0] / self._metanet.width
self._anchors_np[2 * i + 1] = anchor[1] / self._metanet.height
# Data augmentation ####################################################
self._augmentation: List[str] = []
if config.net.mosaic:
self._augmentation.append("mosaic")
if training and len(self._augmentation) > 0:
self._augmentation_batch = int(config.net.batch * 0.3)
self._training = True
else:
self._augmentation_batch = 0
self._training = False
self._augmentation_cache = [
self._get_dataset(i) for i in range(_AUGMETATION_CACHE_SIZE)
]
self._augmentation_cache_index = 0
def _convert_dataset_to_ground_truth(self, dataset_bboxes):
"""
@param `dataset_bboxes`: [[b_x, b_y, b_w, b_h, class_id], ...]
@return `groud_truth_one`:
[Dim(yolo.h, yolo.w, yolo.c + len(mask))] * len(yolo)
"""
return _convert_dataset_to_ground_truth(
dataset_bboxes, self._metayolos_np, self._anchors_np
)
def _convert_dataset_to_image_and_bboxes(self, dataset):
"""
@param dataset: [image_path, [[x, y, w, h, class_id], ...]]
@return image, bboxes
image: 0.0 ~ 1.0, Dim(1, height, width, channels)
"""
# pylint: disable=bare-except
try:
image = cv2.imread(dataset[0])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
except:
return None, None
resized_image, resized_bboxes = media.resize_image(
image,
target_shape=self._metanet.input_shape,
ground_truth=dataset[1],
)
resized_image = np.expand_dims(resized_image / 255.0, axis=0)
return resized_image, resized_bboxes
def _get_dataset(self, index: int):
offset = 0
for offset in range(5):
image, bboxes = self._convert_dataset_to_image_and_bboxes(
self.dataset[(index + offset) % len(self.dataset)]
)
if image is None:
offset += 1
else:
return image, bboxes
raise FileNotFoundError("Failed to find images")
def __getitem__(self, index):
"""
@return
`images`: Dim(batch, height, width, channels)
`groud_truth_one`:
[Dim(batch, yolo.h, yolo.w, yolo.c + len(mask))] * len(yolo)
"""
batch_x = []
# [[gt_one, gt_one, ...],
# [gt_one, gt_one, ...], ...]
batch_y = [[] for _ in range(len(self._metayolos))]
start_index = index * self._metanet.batch
for i in range(self._metanet.batch - self._augmentation_batch):
image, bboxes = self._get_dataset(start_index + i)
self._augmentation_cache[self._augmentation_cache_index] = (
image,
bboxes,
)
self._augmentation_cache_index = (
self._augmentation_cache_index + 1
) % _AUGMETATION_CACHE_SIZE
batch_x.append(image)
ground_truth = self._convert_dataset_to_ground_truth(bboxes)
for j in range(len(self._metayolos)):
batch_y[j].append(ground_truth[j])
for i in range(self._augmentation_batch):
augmentation = self._augmentation[
np.random.randint(0, len(self._augmentation))
]
image = None
bboxes = None
if augmentation == "mosaic":
image, bboxes = mosaic(
*[
self._augmentation_cache[
np.random.randint(
0,
_AUGMETATION_CACHE_SIZE,
)
]
for _ in range(4)
]
)
batch_x.append(image)
ground_truth = self._convert_dataset_to_ground_truth(bboxes)
for j in range(len(self._metayolos)):
batch_y[j].append(ground_truth[j])
return np.concatenate(batch_x, axis=0), [
np.stack(y, axis=0) for y in batch_y
]
def __len__(self):
return len(self.dataset) // (
self._metanet.batch - self._augmentation_batch
)
|
setup_logging
|
Setup the logging device to log into a uniquely created directory.
Args:
name: Name of the directory for the log-files.
dir: Optional sub-directory within log
|
# MIT License
# Copyright (c) 2020 Simon Schug, João Sacramento
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import os
import time
from importlib import reload
import torch
# Global variables
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
log_dir = None
log_name = None
writer = None
# MASKED: setup_logging function (lines 37-70)
|
def setup_logging(name, dir=""):
"""
Setup the logging device to log into a uniquely created directory.
Args:
name: Name of the directory for the log-files.
dir: Optional sub-directory within log
"""
# Setup global log name and directory
global log_name
log_name = name
# Setup global logging directory
global log_dir
log_dir = os.path.join("log", dir)
# Create the logging folder if it does not exist already
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
# Need to reload logging as otherwise the logger might be captured by another library
reload(logging)
# Setup global logger
logging.basicConfig(
level=logging.INFO,
format="[%(levelname)-5.5s %(asctime)s] %(message)s",
datefmt='%H:%M:%S',
handlers=[
logging.FileHandler(os.path.join(
log_dir, time.strftime("%Y%m%d_%H%M") + "_" + name + ".log")
),
logging.StreamHandler()
])
| 37 | 70 |
# MIT License
# Copyright (c) 2020 Simon Schug, João Sacramento
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import os
import time
from importlib import reload
import torch
# Global variables
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
log_dir = None
log_name = None
writer = None
def setup_logging(name, dir=""):
"""
Setup the logging device to log into a uniquely created directory.
Args:
name: Name of the directory for the log-files.
dir: Optional sub-directory within log
"""
# Setup global log name and directory
global log_name
log_name = name
# Setup global logging directory
global log_dir
log_dir = os.path.join("log", dir)
# Create the logging folder if it does not exist already
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
# Need to reload logging as otherwise the logger might be captured by another library
reload(logging)
# Setup global logger
logging.basicConfig(
level=logging.INFO,
format="[%(levelname)-5.5s %(asctime)s] %(message)s",
datefmt='%H:%M:%S',
handlers=[
logging.FileHandler(os.path.join(
log_dir, time.strftime("%Y%m%d_%H%M") + "_" + name + ".log")
),
logging.StreamHandler()
])
|
__init__
|
Initializes a observation in light dark domain.
Args:
position (tuple): position of the robot.
|
import pomdp_py
class Observation(pomdp_py.Observation):
"""Defines the Observation for the continuous light-dark domain;
Observation space:
:math:`\Omega\subseteq\mathbb{R}^2` the observation of the robot is
an estimate of the robot position :math:`g(x_t)\in\Omega`.
"""
# the number of decimals to round up an observation when it is discrete.
PRECISION=2
# MASKED: __init__ function (lines 15-29)
def discretize(self):
return Observation(self.position, discrete=True)
def __hash__(self):
return hash(self.position)
def __eq__(self, other):
if isinstance(other, Observation):
return self.position == other.position
else:
return False
def __str__(self):
return self.__repr__()
def __repr__(self):
return "Observation(%s)" % (str(self.position))
|
def __init__(self, position, discrete=False):
"""
Initializes a observation in light dark domain.
Args:
position (tuple): position of the robot.
"""
self._discrete = discrete
if len(position) != 2:
raise ValueError("Observation position must be a vector of length 2")
if self._discrete:
self.position = position
else:
self.position = (round(position[0], Observation.PRECISION),
round(position[1], Observation.PRECISION))
| 15 | 29 |
import pomdp_py
class Observation(pomdp_py.Observation):
"""Defines the Observation for the continuous light-dark domain;
Observation space:
:math:`\Omega\subseteq\mathbb{R}^2` the observation of the robot is
an estimate of the robot position :math:`g(x_t)\in\Omega`.
"""
# the number of decimals to round up an observation when it is discrete.
PRECISION=2
def __init__(self, position, discrete=False):
"""
Initializes a observation in light dark domain.
Args:
position (tuple): position of the robot.
"""
self._discrete = discrete
if len(position) != 2:
raise ValueError("Observation position must be a vector of length 2")
if self._discrete:
self.position = position
else:
self.position = (round(position[0], Observation.PRECISION),
round(position[1], Observation.PRECISION))
def discretize(self):
return Observation(self.position, discrete=True)
def __hash__(self):
return hash(self.position)
def __eq__(self, other):
if isinstance(other, Observation):
return self.position == other.position
else:
return False
def __str__(self):
return self.__repr__()
def __repr__(self):
return "Observation(%s)" % (str(self.position))
|
_find_x12
|
If x12path is not given, then either x13as[.exe] or x12a[.exe] must
be found on the PATH. Otherwise, the environmental variable X12PATH or
X13PATH must be defined. If prefer_x13 is True, only X13PATH is searched
for. If it is false, only X12PATH is searched for.
|
"""
Run x12/x13-arima specs in a subprocess from Python and curry results back
into python.
Notes
-----
Many of the functions are called x12. However, they are also intended to work
for x13. If this is not the case, it's a bug.
"""
import os
import subprocess
import tempfile
import re
from warnings import warn
import pandas as pd
from statsmodels.compat.python import iteritems
from statsmodels.tools.tools import Bunch
from statsmodels.tools.sm_exceptions import (X13NotFoundError,
IOWarning, X13Error,
X13Warning)
__all__ = ["x13_arima_select_order", "x13_arima_analysis"]
_binary_names = ('x13as.exe', 'x13as', 'x12a.exe', 'x12a')
class _freq_to_period:
def __getitem__(self, key):
if key.startswith('M'):
return 12
elif key.startswith('Q'):
return 4
elif key.startswith('W'):
return 52
_freq_to_period = _freq_to_period()
_period_to_freq = {12: 'M', 4: 'Q'}
_log_to_x12 = {True: 'log', False: 'none', None: 'auto'}
_bool_to_yes_no = lambda x: 'yes' if x else 'no' # noqa:E731
# MASKED: _find_x12 function (lines 46-79)
def _check_x12(x12path=None):
x12path = _find_x12(x12path)
if not x12path:
raise X13NotFoundError("x12a and x13as not found on path. Give the "
"path, put them on PATH, or set the "
"X12PATH or X13PATH environmental variable.")
return x12path
def _clean_order(order):
"""
Takes something like (1 1 0)(0 1 1) and returns a arma order, sarma
order tuple. Also accepts (1 1 0) and return arma order and (0, 0, 0)
"""
order = re.findall(r"\([0-9 ]*?\)", order)
def clean(x):
return tuple(map(int, re.sub("[()]", "", x).split(" ")))
if len(order) > 1:
order, sorder = map(clean, order)
else:
order = clean(order[0])
sorder = (0, 0, 0)
return order, sorder
def run_spec(x12path, specpath, outname=None, meta=False, datameta=False):
if meta and datameta:
raise ValueError("Cannot specify both meta and datameta.")
if meta:
args = [x12path, "-m " + specpath]
elif datameta:
args = [x12path, "-d " + specpath]
else:
args = [x12path, specpath]
if outname:
args += [outname]
return subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
def _make_automdl_options(maxorder, maxdiff, diff):
options = "\n"
options += "maxorder = ({0} {1})\n".format(maxorder[0], maxorder[1])
if maxdiff is not None: # maxdiff always takes precedence
options += "maxdiff = ({0} {1})\n".format(maxdiff[0], maxdiff[1])
else:
options += "diff = ({0} {1})\n".format(diff[0], diff[1])
return options
def _make_var_names(exog):
if hasattr(exog, "name"):
var_names = exog.name
elif hasattr(exog, "columns"):
var_names = exog.columns
else:
raise ValueError("exog is not a Series or DataFrame or is unnamed.")
try:
var_names = " ".join(var_names)
except TypeError: # cannot have names that are numbers, pandas default
from statsmodels.base.data import _make_exog_names
if exog.ndim == 1:
var_names = "x1"
else:
var_names = " ".join(_make_exog_names(exog))
return var_names
def _make_regression_options(trading, exog):
if not trading and exog is None: # start regression spec
return ""
reg_spec = "regression{\n"
if trading:
reg_spec += " variables = (td)\n"
if exog is not None:
var_names = _make_var_names(exog)
reg_spec += " user = ({0})\n".format(var_names)
reg_spec += " data = ({0})\n".format("\n".join(map(str,
exog.values.ravel().tolist())))
reg_spec += "}\n" # close out regression spec
return reg_spec
def _make_forecast_options(forecast_years):
if forecast_years is None:
return ""
forecast_spec = "forecast{\n"
forecast_spec += "maxlead = ({0})\n}}\n".format(forecast_years)
return forecast_spec
def _check_errors(errors):
errors = errors[errors.find("spc:")+4:].strip()
if errors and 'ERROR' in errors:
raise X13Error(errors)
elif errors and 'WARNING' in errors:
warn(errors, X13Warning)
def _convert_out_to_series(x, dates, name):
"""
Convert x to a DataFrame where x is a string in the format given by
x-13arima-seats output.
"""
from io import StringIO
from pandas import read_csv
out = read_csv(StringIO(x), skiprows=2,
header=None, sep='\t', engine='python')
return out.set_index(dates).rename(columns={1: name})[name]
def _open_and_read(fname):
# opens a file, reads it, and make sure it's closed
with open(fname, 'r') as fin:
fout = fin.read()
return fout
class Spec(object):
@property
def spec_name(self):
return self.__class__.__name__.replace("Spec", "")
def create_spec(self, **kwargs):
spec = """{name} {{
{options}
}}
"""
return spec.format(name=self.spec_name,
options=self.options)
def set_options(self, **kwargs):
options = ""
for key, value in iteritems(kwargs):
options += "{0}={1}\n".format(key, value)
self.__dict__.update({key: value})
self.options = options
class SeriesSpec(Spec):
"""
Parameters
----------
data
appendbcst : bool
appendfcst : bool
comptype
compwt
decimals
modelspan
name
period
precision
to_print
to_save
span
start
title
type
Notes
-----
Rarely used arguments
divpower
missingcode
missingval
saveprecision
trimzero
"""
def __init__(self, data, name='Unnamed Series', appendbcst=False,
appendfcst=False,
comptype=None, compwt=1, decimals=0, modelspan=(),
period=12, precision=0, to_print=[], to_save=[], span=(),
start=(1, 1), title='', series_type=None, divpower=None,
missingcode=-99999, missingval=1000000000):
appendbcst, appendfcst = map(_bool_to_yes_no, [appendbcst,
appendfcst,
])
series_name = "\"{0}\"".format(name[:64]) # trim to 64 characters
title = "\"{0}\"".format(title[:79]) # trim to 79 characters
self.set_options(data=data, appendbcst=appendbcst,
appendfcst=appendfcst, period=period, start=start,
title=title, name=series_name,
)
def pandas_to_series_spec(x):
# from statsmodels.tools.data import _check_period_index
# check_period_index(x)
if hasattr(x, 'columns'): # convert to series
if len(x.columns) > 1:
raise ValueError("Does not handle DataFrame with more than one "
"column")
x = x[x.columns[0]]
data = "({0})".format("\n".join(map(str, x.values.tolist())))
# get periodicity
# get start / first data
# give it a title
try:
period = _freq_to_period[x.index.freqstr]
except (AttributeError, ValueError):
from pandas.tseries.api import infer_freq
period = _freq_to_period[infer_freq(x.index)]
start_date = x.index[0]
if period == 12:
year, stperiod = start_date.year, start_date.month
elif period == 4:
year, stperiod = start_date.year, start_date.quarter
else: # pragma: no cover
raise ValueError("Only monthly and quarterly periods are supported."
" Please report or send a pull request if you want "
"this extended.")
if hasattr(x, 'name'):
name = x.name or "Unnamed Series"
else:
name = 'Unnamed Series'
series_spec = SeriesSpec(data=data, name=name, period=period,
title=name, start="{0}.{1}".format(year,
stperiod))
return series_spec
def x13_arima_analysis(endog, maxorder=(2, 1), maxdiff=(2, 1), diff=None,
exog=None, log=None, outlier=True, trading=False,
forecast_years=None, retspec=False,
speconly=False, start=None, freq=None,
print_stdout=False, x12path=None, prefer_x13=True):
"""
Perform x13-arima analysis for monthly or quarterly data.
Parameters
----------
endog : array_like, pandas.Series
The series to model. It is best to use a pandas object with a
DatetimeIndex or PeriodIndex. However, you can pass an array-like
object. If your object does not have a dates index then ``start`` and
``freq`` are not optional.
maxorder : tuple
The maximum order of the regular and seasonal ARMA polynomials to
examine during the model identification. The order for the regular
polynomial must be greater than zero and no larger than 4. The
order for the seasonal polynomial may be 1 or 2.
maxdiff : tuple
The maximum orders for regular and seasonal differencing in the
automatic differencing procedure. Acceptable inputs for regular
differencing are 1 and 2. The maximum order for seasonal differencing
is 1. If ``diff`` is specified then ``maxdiff`` should be None.
Otherwise, ``diff`` will be ignored. See also ``diff``.
diff : tuple
Fixes the orders of differencing for the regular and seasonal
differencing. Regular differencing may be 0, 1, or 2. Seasonal
differencing may be 0 or 1. ``maxdiff`` must be None, otherwise
``diff`` is ignored.
exog : array_like
Exogenous variables.
log : bool or None
If None, it is automatically determined whether to log the series or
not. If False, logs are not taken. If True, logs are taken.
outlier : bool
Whether or not outliers are tested for and corrected, if detected.
trading : bool
Whether or not trading day effects are tested for.
forecast_years : int
Number of forecasts produced. The default is one year.
retspec : bool
Whether to return the created specification file. Can be useful for
debugging.
speconly : bool
Whether to create the specification file and then return it without
performing the analysis. Can be useful for debugging.
start : str, datetime
Must be given if ``endog`` does not have date information in its index.
Anything accepted by pandas.DatetimeIndex for the start value.
freq : str
Must be givein if ``endog`` does not have date information in its
index. Anything accepted by pandas.DatetimeIndex for the freq value.
print_stdout : bool
The stdout from X12/X13 is suppressed. To print it out, set this
to True. Default is False.
x12path : str or None
The path to x12 or x13 binary. If None, the program will attempt
to find x13as or x12a on the PATH or by looking at X13PATH or
X12PATH depending on the value of prefer_x13.
prefer_x13 : bool
If True, will look for x13as first and will fallback to the X13PATH
environmental variable. If False, will look for x12a first and will
fallback to the X12PATH environmental variable. If x12path points
to the path for the X12/X13 binary, it does nothing.
Returns
-------
res : Bunch
A bunch object with the following attributes:
- results : str
The full output from the X12/X13 run.
- seasadj : pandas.Series
The final seasonally adjusted ``endog``
- trend : pandas.Series
The trend-cycle component of ``endog``
- irregular : pandas.Series
The final irregular component of ``endog``
- stdout : str
The captured stdout produced by x12/x13.
- spec : str, optional
Returned if ``retspec`` is True. The only thing returned if
``speconly`` is True.
Notes
-----
This works by creating a specification file, writing it to a temporary
directory, invoking X12/X13 in a subprocess, and reading the output
directory, invoking exog12/X13 in a subprocess, and reading the output
back in.
"""
x12path = _check_x12(x12path)
if not isinstance(endog, (pd.DataFrame, pd.Series)):
if start is None or freq is None:
raise ValueError("start and freq cannot be none if endog is not "
"a pandas object")
endog = pd.Series(endog, index=pd.DatetimeIndex(start=start,
periods=len(endog),
freq=freq))
spec_obj = pandas_to_series_spec(endog)
spec = spec_obj.create_spec()
spec += "transform{{function={0}}}\n".format(_log_to_x12[log])
if outlier:
spec += "outlier{}\n"
options = _make_automdl_options(maxorder, maxdiff, diff)
spec += "automdl{{{0}}}\n".format(options)
spec += _make_regression_options(trading, exog)
spec += _make_forecast_options(forecast_years)
spec += "x11{ save=(d11 d12 d13) }"
if speconly:
return spec
# write it to a tempfile
# TODO: make this more robust - give the user some control?
ftempin = tempfile.NamedTemporaryFile(delete=False, suffix='.spc')
ftempout = tempfile.NamedTemporaryFile(delete=False)
try:
ftempin.write(spec.encode('utf8'))
ftempin.close()
ftempout.close()
# call x12 arima
p = run_spec(x12path, ftempin.name[:-4], ftempout.name)
p.wait()
stdout = p.stdout.read()
if print_stdout:
print(p.stdout.read())
# check for errors
errors = _open_and_read(ftempout.name + '.err')
_check_errors(errors)
# read in results
results = _open_and_read(ftempout.name + '.out')
seasadj = _open_and_read(ftempout.name + '.d11')
trend = _open_and_read(ftempout.name + '.d12')
irregular = _open_and_read(ftempout.name + '.d13')
finally:
try: # sometimes this gives a permission denied error?
# not sure why. no process should have these open
os.remove(ftempin.name)
os.remove(ftempout.name)
except OSError:
if os.path.exists(ftempin.name):
warn("Failed to delete resource {0}".format(ftempin.name),
IOWarning)
if os.path.exists(ftempout.name):
warn("Failed to delete resource {0}".format(ftempout.name),
IOWarning)
seasadj = _convert_out_to_series(seasadj, endog.index, 'seasadj')
trend = _convert_out_to_series(trend, endog.index, 'trend')
irregular = _convert_out_to_series(irregular, endog.index, 'irregular')
# NOTE: there is not likely anything in stdout that's not in results
# so may be safe to just suppress and remove it
if not retspec:
res = X13ArimaAnalysisResult(observed=endog, results=results,
seasadj=seasadj, trend=trend,
irregular=irregular, stdout=stdout)
else:
res = X13ArimaAnalysisResult(observed=endog, results=results,
seasadj=seasadj, trend=trend,
irregular=irregular, stdout=stdout,
spec=spec)
return res
def x13_arima_select_order(endog, maxorder=(2, 1), maxdiff=(2, 1), diff=None,
exog=None, log=None, outlier=True, trading=False,
forecast_years=None,
start=None, freq=None, print_stdout=False,
x12path=None, prefer_x13=True):
"""
Perform automatic seasonal ARIMA order identification using x12/x13 ARIMA.
Parameters
----------
endog : array_like, pandas.Series
The series to model. It is best to use a pandas object with a
DatetimeIndex or PeriodIndex. However, you can pass an array-like
object. If your object does not have a dates index then ``start`` and
``freq`` are not optional.
maxorder : tuple
The maximum order of the regular and seasonal ARMA polynomials to
examine during the model identification. The order for the regular
polynomial must be greater than zero and no larger than 4. The
order for the seasonal polynomial may be 1 or 2.
maxdiff : tuple
The maximum orders for regular and seasonal differencing in the
automatic differencing procedure. Acceptable inputs for regular
differencing are 1 and 2. The maximum order for seasonal differencing
is 1. If ``diff`` is specified then ``maxdiff`` should be None.
Otherwise, ``diff`` will be ignored. See also ``diff``.
diff : tuple
Fixes the orders of differencing for the regular and seasonal
differencing. Regular differencing may be 0, 1, or 2. Seasonal
differencing may be 0 or 1. ``maxdiff`` must be None, otherwise
``diff`` is ignored.
exog : array_like
Exogenous variables.
log : bool or None
If None, it is automatically determined whether to log the series or
not. If False, logs are not taken. If True, logs are taken.
outlier : bool
Whether or not outliers are tested for and corrected, if detected.
trading : bool
Whether or not trading day effects are tested for.
forecast_years : int
Number of forecasts produced. The default is one year.
start : str, datetime
Must be given if ``endog`` does not have date information in its index.
Anything accepted by pandas.DatetimeIndex for the start value.
freq : str
Must be givein if ``endog`` does not have date information in its
index. Anything accepted by pandas.DatetimeIndex for the freq value.
print_stdout : bool
The stdout from X12/X13 is suppressed. To print it out, set this
to True. Default is False.
x12path : str or None
The path to x12 or x13 binary. If None, the program will attempt
to find x13as or x12a on the PATH or by looking at X13PATH or X12PATH
depending on the value of prefer_x13.
prefer_x13 : bool
If True, will look for x13as first and will fallback to the X13PATH
environmental variable. If False, will look for x12a first and will
fallback to the X12PATH environmental variable. If x12path points
to the path for the X12/X13 binary, it does nothing.
Returns
-------
results : Bunch
A bunch object that has the following attributes:
- order : tuple
The regular order
- sorder : tuple
The seasonal order
- include_mean : bool
Whether to include a mean or not
- results : str
The full results from the X12/X13 analysis
- stdout : str
The captured stdout from the X12/X13 analysis
Notes
-----
This works by creating a specification file, writing it to a temporary
directory, invoking X12/X13 in a subprocess, and reading the output back
in.
"""
results = x13_arima_analysis(endog, x12path=x12path, exog=exog, log=log,
outlier=outlier, trading=trading,
forecast_years=forecast_years,
maxorder=maxorder, maxdiff=maxdiff, diff=diff,
start=start, freq=freq, prefer_x13=prefer_x13)
model = re.search("(?<=Final automatic model choice : ).*",
results.results)
order = model.group()
if re.search("Mean is not significant", results.results):
include_mean = False
elif re.search("Constant", results.results):
include_mean = True
else:
include_mean = False
order, sorder = _clean_order(order)
res = Bunch(order=order, sorder=sorder, include_mean=include_mean,
results=results.results, stdout=results.stdout)
return res
class X13ArimaAnalysisResult(object):
def __init__(self, **kwargs):
for key, value in iteritems(kwargs):
setattr(self, key, value)
def plot(self):
from statsmodels.graphics.utils import _import_mpl
plt = _import_mpl()
fig, axes = plt.subplots(4, 1, sharex=True)
self.observed.plot(ax=axes[0], legend=False)
axes[0].set_ylabel('Observed')
self.seasadj.plot(ax=axes[1], legend=False)
axes[1].set_ylabel('Seas. Adjusted')
self.trend.plot(ax=axes[2], legend=False)
axes[2].set_ylabel('Trend')
self.irregular.plot(ax=axes[3], legend=False)
axes[3].set_ylabel('Irregular')
fig.tight_layout()
return fig
|
def _find_x12(x12path=None, prefer_x13=True):
"""
If x12path is not given, then either x13as[.exe] or x12a[.exe] must
be found on the PATH. Otherwise, the environmental variable X12PATH or
X13PATH must be defined. If prefer_x13 is True, only X13PATH is searched
for. If it is false, only X12PATH is searched for.
"""
global _binary_names
if x12path is not None and x12path.endswith(_binary_names):
# remove binary from path if given
x12path = os.path.dirname(x12path)
if not prefer_x13: # search for x12 first
_binary_names = _binary_names[::-1]
if x12path is None:
x12path = os.getenv("X12PATH", "")
if not x12path:
x12path = os.getenv("X13PATH", "")
elif x12path is None:
x12path = os.getenv("X13PATH", "")
if not x12path:
x12path = os.getenv("X12PATH", "")
for binary in _binary_names:
x12 = os.path.join(x12path, binary)
try:
subprocess.check_call(x12, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return x12
except OSError:
pass
else:
return False
| 46 | 79 |
"""
Run x12/x13-arima specs in a subprocess from Python and curry results back
into python.
Notes
-----
Many of the functions are called x12. However, they are also intended to work
for x13. If this is not the case, it's a bug.
"""
import os
import subprocess
import tempfile
import re
from warnings import warn
import pandas as pd
from statsmodels.compat.python import iteritems
from statsmodels.tools.tools import Bunch
from statsmodels.tools.sm_exceptions import (X13NotFoundError,
IOWarning, X13Error,
X13Warning)
__all__ = ["x13_arima_select_order", "x13_arima_analysis"]
_binary_names = ('x13as.exe', 'x13as', 'x12a.exe', 'x12a')
class _freq_to_period:
def __getitem__(self, key):
if key.startswith('M'):
return 12
elif key.startswith('Q'):
return 4
elif key.startswith('W'):
return 52
_freq_to_period = _freq_to_period()
_period_to_freq = {12: 'M', 4: 'Q'}
_log_to_x12 = {True: 'log', False: 'none', None: 'auto'}
_bool_to_yes_no = lambda x: 'yes' if x else 'no' # noqa:E731
def _find_x12(x12path=None, prefer_x13=True):
"""
If x12path is not given, then either x13as[.exe] or x12a[.exe] must
be found on the PATH. Otherwise, the environmental variable X12PATH or
X13PATH must be defined. If prefer_x13 is True, only X13PATH is searched
for. If it is false, only X12PATH is searched for.
"""
global _binary_names
if x12path is not None and x12path.endswith(_binary_names):
# remove binary from path if given
x12path = os.path.dirname(x12path)
if not prefer_x13: # search for x12 first
_binary_names = _binary_names[::-1]
if x12path is None:
x12path = os.getenv("X12PATH", "")
if not x12path:
x12path = os.getenv("X13PATH", "")
elif x12path is None:
x12path = os.getenv("X13PATH", "")
if not x12path:
x12path = os.getenv("X12PATH", "")
for binary in _binary_names:
x12 = os.path.join(x12path, binary)
try:
subprocess.check_call(x12, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return x12
except OSError:
pass
else:
return False
def _check_x12(x12path=None):
x12path = _find_x12(x12path)
if not x12path:
raise X13NotFoundError("x12a and x13as not found on path. Give the "
"path, put them on PATH, or set the "
"X12PATH or X13PATH environmental variable.")
return x12path
def _clean_order(order):
"""
Takes something like (1 1 0)(0 1 1) and returns a arma order, sarma
order tuple. Also accepts (1 1 0) and return arma order and (0, 0, 0)
"""
order = re.findall(r"\([0-9 ]*?\)", order)
def clean(x):
return tuple(map(int, re.sub("[()]", "", x).split(" ")))
if len(order) > 1:
order, sorder = map(clean, order)
else:
order = clean(order[0])
sorder = (0, 0, 0)
return order, sorder
def run_spec(x12path, specpath, outname=None, meta=False, datameta=False):
if meta and datameta:
raise ValueError("Cannot specify both meta and datameta.")
if meta:
args = [x12path, "-m " + specpath]
elif datameta:
args = [x12path, "-d " + specpath]
else:
args = [x12path, specpath]
if outname:
args += [outname]
return subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
def _make_automdl_options(maxorder, maxdiff, diff):
options = "\n"
options += "maxorder = ({0} {1})\n".format(maxorder[0], maxorder[1])
if maxdiff is not None: # maxdiff always takes precedence
options += "maxdiff = ({0} {1})\n".format(maxdiff[0], maxdiff[1])
else:
options += "diff = ({0} {1})\n".format(diff[0], diff[1])
return options
def _make_var_names(exog):
if hasattr(exog, "name"):
var_names = exog.name
elif hasattr(exog, "columns"):
var_names = exog.columns
else:
raise ValueError("exog is not a Series or DataFrame or is unnamed.")
try:
var_names = " ".join(var_names)
except TypeError: # cannot have names that are numbers, pandas default
from statsmodels.base.data import _make_exog_names
if exog.ndim == 1:
var_names = "x1"
else:
var_names = " ".join(_make_exog_names(exog))
return var_names
def _make_regression_options(trading, exog):
if not trading and exog is None: # start regression spec
return ""
reg_spec = "regression{\n"
if trading:
reg_spec += " variables = (td)\n"
if exog is not None:
var_names = _make_var_names(exog)
reg_spec += " user = ({0})\n".format(var_names)
reg_spec += " data = ({0})\n".format("\n".join(map(str,
exog.values.ravel().tolist())))
reg_spec += "}\n" # close out regression spec
return reg_spec
def _make_forecast_options(forecast_years):
if forecast_years is None:
return ""
forecast_spec = "forecast{\n"
forecast_spec += "maxlead = ({0})\n}}\n".format(forecast_years)
return forecast_spec
def _check_errors(errors):
errors = errors[errors.find("spc:")+4:].strip()
if errors and 'ERROR' in errors:
raise X13Error(errors)
elif errors and 'WARNING' in errors:
warn(errors, X13Warning)
def _convert_out_to_series(x, dates, name):
"""
Convert x to a DataFrame where x is a string in the format given by
x-13arima-seats output.
"""
from io import StringIO
from pandas import read_csv
out = read_csv(StringIO(x), skiprows=2,
header=None, sep='\t', engine='python')
return out.set_index(dates).rename(columns={1: name})[name]
def _open_and_read(fname):
# opens a file, reads it, and make sure it's closed
with open(fname, 'r') as fin:
fout = fin.read()
return fout
class Spec(object):
@property
def spec_name(self):
return self.__class__.__name__.replace("Spec", "")
def create_spec(self, **kwargs):
spec = """{name} {{
{options}
}}
"""
return spec.format(name=self.spec_name,
options=self.options)
def set_options(self, **kwargs):
options = ""
for key, value in iteritems(kwargs):
options += "{0}={1}\n".format(key, value)
self.__dict__.update({key: value})
self.options = options
class SeriesSpec(Spec):
"""
Parameters
----------
data
appendbcst : bool
appendfcst : bool
comptype
compwt
decimals
modelspan
name
period
precision
to_print
to_save
span
start
title
type
Notes
-----
Rarely used arguments
divpower
missingcode
missingval
saveprecision
trimzero
"""
def __init__(self, data, name='Unnamed Series', appendbcst=False,
appendfcst=False,
comptype=None, compwt=1, decimals=0, modelspan=(),
period=12, precision=0, to_print=[], to_save=[], span=(),
start=(1, 1), title='', series_type=None, divpower=None,
missingcode=-99999, missingval=1000000000):
appendbcst, appendfcst = map(_bool_to_yes_no, [appendbcst,
appendfcst,
])
series_name = "\"{0}\"".format(name[:64]) # trim to 64 characters
title = "\"{0}\"".format(title[:79]) # trim to 79 characters
self.set_options(data=data, appendbcst=appendbcst,
appendfcst=appendfcst, period=period, start=start,
title=title, name=series_name,
)
def pandas_to_series_spec(x):
# from statsmodels.tools.data import _check_period_index
# check_period_index(x)
if hasattr(x, 'columns'): # convert to series
if len(x.columns) > 1:
raise ValueError("Does not handle DataFrame with more than one "
"column")
x = x[x.columns[0]]
data = "({0})".format("\n".join(map(str, x.values.tolist())))
# get periodicity
# get start / first data
# give it a title
try:
period = _freq_to_period[x.index.freqstr]
except (AttributeError, ValueError):
from pandas.tseries.api import infer_freq
period = _freq_to_period[infer_freq(x.index)]
start_date = x.index[0]
if period == 12:
year, stperiod = start_date.year, start_date.month
elif period == 4:
year, stperiod = start_date.year, start_date.quarter
else: # pragma: no cover
raise ValueError("Only monthly and quarterly periods are supported."
" Please report or send a pull request if you want "
"this extended.")
if hasattr(x, 'name'):
name = x.name or "Unnamed Series"
else:
name = 'Unnamed Series'
series_spec = SeriesSpec(data=data, name=name, period=period,
title=name, start="{0}.{1}".format(year,
stperiod))
return series_spec
def x13_arima_analysis(endog, maxorder=(2, 1), maxdiff=(2, 1), diff=None,
exog=None, log=None, outlier=True, trading=False,
forecast_years=None, retspec=False,
speconly=False, start=None, freq=None,
print_stdout=False, x12path=None, prefer_x13=True):
"""
Perform x13-arima analysis for monthly or quarterly data.
Parameters
----------
endog : array_like, pandas.Series
The series to model. It is best to use a pandas object with a
DatetimeIndex or PeriodIndex. However, you can pass an array-like
object. If your object does not have a dates index then ``start`` and
``freq`` are not optional.
maxorder : tuple
The maximum order of the regular and seasonal ARMA polynomials to
examine during the model identification. The order for the regular
polynomial must be greater than zero and no larger than 4. The
order for the seasonal polynomial may be 1 or 2.
maxdiff : tuple
The maximum orders for regular and seasonal differencing in the
automatic differencing procedure. Acceptable inputs for regular
differencing are 1 and 2. The maximum order for seasonal differencing
is 1. If ``diff`` is specified then ``maxdiff`` should be None.
Otherwise, ``diff`` will be ignored. See also ``diff``.
diff : tuple
Fixes the orders of differencing for the regular and seasonal
differencing. Regular differencing may be 0, 1, or 2. Seasonal
differencing may be 0 or 1. ``maxdiff`` must be None, otherwise
``diff`` is ignored.
exog : array_like
Exogenous variables.
log : bool or None
If None, it is automatically determined whether to log the series or
not. If False, logs are not taken. If True, logs are taken.
outlier : bool
Whether or not outliers are tested for and corrected, if detected.
trading : bool
Whether or not trading day effects are tested for.
forecast_years : int
Number of forecasts produced. The default is one year.
retspec : bool
Whether to return the created specification file. Can be useful for
debugging.
speconly : bool
Whether to create the specification file and then return it without
performing the analysis. Can be useful for debugging.
start : str, datetime
Must be given if ``endog`` does not have date information in its index.
Anything accepted by pandas.DatetimeIndex for the start value.
freq : str
Must be givein if ``endog`` does not have date information in its
index. Anything accepted by pandas.DatetimeIndex for the freq value.
print_stdout : bool
The stdout from X12/X13 is suppressed. To print it out, set this
to True. Default is False.
x12path : str or None
The path to x12 or x13 binary. If None, the program will attempt
to find x13as or x12a on the PATH or by looking at X13PATH or
X12PATH depending on the value of prefer_x13.
prefer_x13 : bool
If True, will look for x13as first and will fallback to the X13PATH
environmental variable. If False, will look for x12a first and will
fallback to the X12PATH environmental variable. If x12path points
to the path for the X12/X13 binary, it does nothing.
Returns
-------
res : Bunch
A bunch object with the following attributes:
- results : str
The full output from the X12/X13 run.
- seasadj : pandas.Series
The final seasonally adjusted ``endog``
- trend : pandas.Series
The trend-cycle component of ``endog``
- irregular : pandas.Series
The final irregular component of ``endog``
- stdout : str
The captured stdout produced by x12/x13.
- spec : str, optional
Returned if ``retspec`` is True. The only thing returned if
``speconly`` is True.
Notes
-----
This works by creating a specification file, writing it to a temporary
directory, invoking X12/X13 in a subprocess, and reading the output
directory, invoking exog12/X13 in a subprocess, and reading the output
back in.
"""
x12path = _check_x12(x12path)
if not isinstance(endog, (pd.DataFrame, pd.Series)):
if start is None or freq is None:
raise ValueError("start and freq cannot be none if endog is not "
"a pandas object")
endog = pd.Series(endog, index=pd.DatetimeIndex(start=start,
periods=len(endog),
freq=freq))
spec_obj = pandas_to_series_spec(endog)
spec = spec_obj.create_spec()
spec += "transform{{function={0}}}\n".format(_log_to_x12[log])
if outlier:
spec += "outlier{}\n"
options = _make_automdl_options(maxorder, maxdiff, diff)
spec += "automdl{{{0}}}\n".format(options)
spec += _make_regression_options(trading, exog)
spec += _make_forecast_options(forecast_years)
spec += "x11{ save=(d11 d12 d13) }"
if speconly:
return spec
# write it to a tempfile
# TODO: make this more robust - give the user some control?
ftempin = tempfile.NamedTemporaryFile(delete=False, suffix='.spc')
ftempout = tempfile.NamedTemporaryFile(delete=False)
try:
ftempin.write(spec.encode('utf8'))
ftempin.close()
ftempout.close()
# call x12 arima
p = run_spec(x12path, ftempin.name[:-4], ftempout.name)
p.wait()
stdout = p.stdout.read()
if print_stdout:
print(p.stdout.read())
# check for errors
errors = _open_and_read(ftempout.name + '.err')
_check_errors(errors)
# read in results
results = _open_and_read(ftempout.name + '.out')
seasadj = _open_and_read(ftempout.name + '.d11')
trend = _open_and_read(ftempout.name + '.d12')
irregular = _open_and_read(ftempout.name + '.d13')
finally:
try: # sometimes this gives a permission denied error?
# not sure why. no process should have these open
os.remove(ftempin.name)
os.remove(ftempout.name)
except OSError:
if os.path.exists(ftempin.name):
warn("Failed to delete resource {0}".format(ftempin.name),
IOWarning)
if os.path.exists(ftempout.name):
warn("Failed to delete resource {0}".format(ftempout.name),
IOWarning)
seasadj = _convert_out_to_series(seasadj, endog.index, 'seasadj')
trend = _convert_out_to_series(trend, endog.index, 'trend')
irregular = _convert_out_to_series(irregular, endog.index, 'irregular')
# NOTE: there is not likely anything in stdout that's not in results
# so may be safe to just suppress and remove it
if not retspec:
res = X13ArimaAnalysisResult(observed=endog, results=results,
seasadj=seasadj, trend=trend,
irregular=irregular, stdout=stdout)
else:
res = X13ArimaAnalysisResult(observed=endog, results=results,
seasadj=seasadj, trend=trend,
irregular=irregular, stdout=stdout,
spec=spec)
return res
def x13_arima_select_order(endog, maxorder=(2, 1), maxdiff=(2, 1), diff=None,
exog=None, log=None, outlier=True, trading=False,
forecast_years=None,
start=None, freq=None, print_stdout=False,
x12path=None, prefer_x13=True):
"""
Perform automatic seasonal ARIMA order identification using x12/x13 ARIMA.
Parameters
----------
endog : array_like, pandas.Series
The series to model. It is best to use a pandas object with a
DatetimeIndex or PeriodIndex. However, you can pass an array-like
object. If your object does not have a dates index then ``start`` and
``freq`` are not optional.
maxorder : tuple
The maximum order of the regular and seasonal ARMA polynomials to
examine during the model identification. The order for the regular
polynomial must be greater than zero and no larger than 4. The
order for the seasonal polynomial may be 1 or 2.
maxdiff : tuple
The maximum orders for regular and seasonal differencing in the
automatic differencing procedure. Acceptable inputs for regular
differencing are 1 and 2. The maximum order for seasonal differencing
is 1. If ``diff`` is specified then ``maxdiff`` should be None.
Otherwise, ``diff`` will be ignored. See also ``diff``.
diff : tuple
Fixes the orders of differencing for the regular and seasonal
differencing. Regular differencing may be 0, 1, or 2. Seasonal
differencing may be 0 or 1. ``maxdiff`` must be None, otherwise
``diff`` is ignored.
exog : array_like
Exogenous variables.
log : bool or None
If None, it is automatically determined whether to log the series or
not. If False, logs are not taken. If True, logs are taken.
outlier : bool
Whether or not outliers are tested for and corrected, if detected.
trading : bool
Whether or not trading day effects are tested for.
forecast_years : int
Number of forecasts produced. The default is one year.
start : str, datetime
Must be given if ``endog`` does not have date information in its index.
Anything accepted by pandas.DatetimeIndex for the start value.
freq : str
Must be givein if ``endog`` does not have date information in its
index. Anything accepted by pandas.DatetimeIndex for the freq value.
print_stdout : bool
The stdout from X12/X13 is suppressed. To print it out, set this
to True. Default is False.
x12path : str or None
The path to x12 or x13 binary. If None, the program will attempt
to find x13as or x12a on the PATH or by looking at X13PATH or X12PATH
depending on the value of prefer_x13.
prefer_x13 : bool
If True, will look for x13as first and will fallback to the X13PATH
environmental variable. If False, will look for x12a first and will
fallback to the X12PATH environmental variable. If x12path points
to the path for the X12/X13 binary, it does nothing.
Returns
-------
results : Bunch
A bunch object that has the following attributes:
- order : tuple
The regular order
- sorder : tuple
The seasonal order
- include_mean : bool
Whether to include a mean or not
- results : str
The full results from the X12/X13 analysis
- stdout : str
The captured stdout from the X12/X13 analysis
Notes
-----
This works by creating a specification file, writing it to a temporary
directory, invoking X12/X13 in a subprocess, and reading the output back
in.
"""
results = x13_arima_analysis(endog, x12path=x12path, exog=exog, log=log,
outlier=outlier, trading=trading,
forecast_years=forecast_years,
maxorder=maxorder, maxdiff=maxdiff, diff=diff,
start=start, freq=freq, prefer_x13=prefer_x13)
model = re.search("(?<=Final automatic model choice : ).*",
results.results)
order = model.group()
if re.search("Mean is not significant", results.results):
include_mean = False
elif re.search("Constant", results.results):
include_mean = True
else:
include_mean = False
order, sorder = _clean_order(order)
res = Bunch(order=order, sorder=sorder, include_mean=include_mean,
results=results.results, stdout=results.stdout)
return res
class X13ArimaAnalysisResult(object):
def __init__(self, **kwargs):
for key, value in iteritems(kwargs):
setattr(self, key, value)
def plot(self):
from statsmodels.graphics.utils import _import_mpl
plt = _import_mpl()
fig, axes = plt.subplots(4, 1, sharex=True)
self.observed.plot(ax=axes[0], legend=False)
axes[0].set_ylabel('Observed')
self.seasadj.plot(ax=axes[1], legend=False)
axes[1].set_ylabel('Seas. Adjusted')
self.trend.plot(ax=axes[2], legend=False)
axes[2].set_ylabel('Trend')
self.irregular.plot(ax=axes[3], legend=False)
axes[3].set_ylabel('Irregular')
fig.tight_layout()
return fig
|
corpus_reader
|
Lê as extensões dos arquivos .xml no caminho especificado como path e
retorna uma tupla com duas listas.Uma lista contém os paths para os arquivos
.xml e a outra contém os arquivos Document gerados para aquele arquilo .xml
|
# -*- coding: utf-8 -*-
"""
Criado por Lucas Fonseca Lage em 04/03/2020
"""
import re, os, spacy
import numpy as np
from my_wsd import my_lesk
from unicodedata import normalize
from document import Document
from gensim.models import Phrases
# Carregamento do modelo Spacy
nlp = spacy.load('pt_core_news_lg')
# Carregamento dos modelos de bigramas e trigramas
#bigram_model = Phrases.load('./n_gram_models/bigram_gen_model')
#trigram_model = Phrases.load('./n_gram_models/trigram_gen_model')
freq_pos_tag = [('DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN'),
('VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'NOUN'),
('VERB', 'DET', 'NOUN', 'ADP', 'DET', 'NOUN', 'PUNCT'),
('DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'NOUN', 'PUNCT'),
('NOUN', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN', 'PUNCT'),
('VERB', 'ADP', 'DET', 'NOUN', 'ADP', 'NOUN', 'PUNCT'),
('VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'DET'),
('DET', 'NOUN', 'ADP', 'DET', 'NOUN', 'ADP', 'NOUN'),
('NOUN', 'ADP', 'DET', 'NOUN', 'ADP', 'NOUN', 'PUNCT'),
('VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADJ', 'PUNCT')]
# MASKED: corpus_reader function (lines 31-49)
def corpus_yeeter(path):
'''Similar ao corpus_reader. Recebe um caminho para a pasta contendo o
corpus e cria um generator. Cada iteração retorna uma tupla contendo um
caminho para o arquivo .xml e o objeto Document criado a partir do mesmo
'''
prog = re.compile('(\.xml)$')
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
if re.search(prog,filename):
path = os.path.normpath(os.path.join(dirpath,filename))
yield (path, Document(path))
def all_fps(path_to_dir):
'''Recebe o caminho para o diretório e retorna uma lista com os caminhos
absolutos para os arquivos que estão nele
'''
fps = []
for dirpath, dirnames, filenames in os.walk(path_to_dir):
for filename in filenames:
fps.append(os.path.normpath(os.path.join(dirpath,filename)))
return fps
def remover_acentos(text):
'''Remove os acentos da string "text". Usada somente na função pre_process
'''
return normalize('NFKD', text).encode('ASCII', 'ignore').decode('ASCII')
def pre_process(text):
'''Realiza um pré processamento da string de entrada "text".
Retira espaços em branco extras e retira caracteres não alfanuméricos
'''
text = re.sub('\s{2,}',' ',text).strip().lower()
doc = nlp(text)
#Retira numeros
text = ' '.join([token.text for token in doc if token.is_alpha == True
and token.pos_ != 'PUNCT'])
return remover_acentos(text)
def bi_trigram_counter(sentence_list):
"""Retorna uma tupla com o numero de bigramas e trigramas.
Recebe como entrada o texto segmentado em uma lista de sentencas.
"""
bi_sent_list = []
tri_sent_list = []
for sentence in sentence_list:
proc_sent = pre_process(sentence).lower().split()
bigram_sentence = bigram_model[proc_sent]
bi_sent_list.append(bigram_sentence)
for bi_sent in bi_sent_list:
tri_sent = trigram_model[bi_sent]
tri_sent_list.append(tri_sent)
return(bigram_number(bi_sent_list),trigram_number(tri_sent_list))
def bigram_number(bigram_sent_list):
'''Conta o número de bigramas encontrados na redação. Recebe uma lista de
sentenças que configuram a redação.
'''
count = 0
for sent in bigram_sent_list:
for token in sent:
if re.search(u'_',token):
count += 1
return count
def trigram_number(trigram_sent_list):
'''Conta o número de trigramas encontrados na redação. Recebe uma lista de
sentenças que configuram a redação
'''
count = 0
for sent in trigram_sent_list:
for token in sent:
if re.search('(?<=_).+_',token):
count += 1
return count
def n_most_freq_pos_tag_seq(sent_list):
''' Procura na lista de sentenças a sequências de pos_tag mais frequentes e
retorna a quantidade encontrada.
'''
n = 0
pos_list = []
for i in sent_list:
sent_nlp = nlp(i)
sent_pos = []
for token in sent_nlp:
sent_pos.append(token.pos_)
pos_list.append(sent_pos)
for line in pos_list:
if len(line) < 7:
continue
if len(line) >= 7:
while len(line) >= 7:
t = tuple(line[0:7])
if t in freq_pos_tag:
n+=1
line.pop(0)
return n
def subj_n_elements(sentence_list):
''' Recebe a lista de sentenças da redação. Conta a quantidade de elementos
abaixo do sujeito na árvore sintática gerada pelo "dependecy parser" do
Spacy. Retorna o número de sujeitos que possuem uma quantidade de elementos
maior que 7 e também o número total de elementos que fazem parte de um
sujeito em toda a redação.
'''
r_list = []
for spacy_doc in nlp.pipe(sentence_list):
big_subj = 0
subj_el_total = 0
for token in spacy_doc:
if token.dep_ == 'nsubj':
size = len([desc for desc in token.subtree if desc.is_alpha])
if size >= 7:
big_subj += 1
subj_el_total += size
r_list.append((big_subj,subj_el_total))
return tuple([sum(i) for i in zip(*r_list)])
def synset_count(sent_list, lang='por', pos='NOUN'):
i = 0
for spacy_doc in nlp.pipe(sent_list):
for token in spacy_doc:
if token.pos_ == pos:
i += len(wn.synsets(token.text, lang=lang))
return (i, i/len(sent_list))
def hypo_hyper_count(sent_list):
hyper = []
hypo = []
size = len(sent_list)
for sent in nlp.pipe(sent_list):
ss = [my_lesk(sent,token.text) for token in sent if token.pos_=='NOUN']
for s in ss:
try:
hyper.append(len(s.hypernyms()))
hypo.append(len(s.hyponyms()))
except AttributeError:
continue
h_er_sum = sum(hyper)
h_o_sum = sum(hypo)
return(h_er_sum,h_er_sum/size, h_o_sum,h_o_sum/size)
|
def corpus_reader(path):
'''Lê as extensões dos arquivos .xml no caminho especificado como path e
retorna uma tupla com duas listas.Uma lista contém os paths para os arquivos
.xml e a outra contém os arquivos Document gerados para aquele arquilo .xml
'''
prog = re.compile('(\.xml)$')
doc_list = []
f = []
fps = []
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
fps.append(os.path.normpath(os.path.join(dirpath,filename)))
for path in fps:
if re.search(prog,path):
f.append(path)
doc_list.append(Document(path))
return (f, doc_list)
| 31 | 49 |
# -*- coding: utf-8 -*-
"""
Criado por Lucas Fonseca Lage em 04/03/2020
"""
import re, os, spacy
import numpy as np
from my_wsd import my_lesk
from unicodedata import normalize
from document import Document
from gensim.models import Phrases
# Carregamento do modelo Spacy
nlp = spacy.load('pt_core_news_lg')
# Carregamento dos modelos de bigramas e trigramas
#bigram_model = Phrases.load('./n_gram_models/bigram_gen_model')
#trigram_model = Phrases.load('./n_gram_models/trigram_gen_model')
freq_pos_tag = [('DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN'),
('VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'NOUN'),
('VERB', 'DET', 'NOUN', 'ADP', 'DET', 'NOUN', 'PUNCT'),
('DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'NOUN', 'PUNCT'),
('NOUN', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN', 'PUNCT'),
('VERB', 'ADP', 'DET', 'NOUN', 'ADP', 'NOUN', 'PUNCT'),
('VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'DET'),
('DET', 'NOUN', 'ADP', 'DET', 'NOUN', 'ADP', 'NOUN'),
('NOUN', 'ADP', 'DET', 'NOUN', 'ADP', 'NOUN', 'PUNCT'),
('VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADJ', 'PUNCT')]
def corpus_reader(path):
'''Lê as extensões dos arquivos .xml no caminho especificado como path e
retorna uma tupla com duas listas.Uma lista contém os paths para os arquivos
.xml e a outra contém os arquivos Document gerados para aquele arquilo .xml
'''
prog = re.compile('(\.xml)$')
doc_list = []
f = []
fps = []
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
fps.append(os.path.normpath(os.path.join(dirpath,filename)))
for path in fps:
if re.search(prog,path):
f.append(path)
doc_list.append(Document(path))
return (f, doc_list)
def corpus_yeeter(path):
'''Similar ao corpus_reader. Recebe um caminho para a pasta contendo o
corpus e cria um generator. Cada iteração retorna uma tupla contendo um
caminho para o arquivo .xml e o objeto Document criado a partir do mesmo
'''
prog = re.compile('(\.xml)$')
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
if re.search(prog,filename):
path = os.path.normpath(os.path.join(dirpath,filename))
yield (path, Document(path))
def all_fps(path_to_dir):
'''Recebe o caminho para o diretório e retorna uma lista com os caminhos
absolutos para os arquivos que estão nele
'''
fps = []
for dirpath, dirnames, filenames in os.walk(path_to_dir):
for filename in filenames:
fps.append(os.path.normpath(os.path.join(dirpath,filename)))
return fps
def remover_acentos(text):
'''Remove os acentos da string "text". Usada somente na função pre_process
'''
return normalize('NFKD', text).encode('ASCII', 'ignore').decode('ASCII')
def pre_process(text):
'''Realiza um pré processamento da string de entrada "text".
Retira espaços em branco extras e retira caracteres não alfanuméricos
'''
text = re.sub('\s{2,}',' ',text).strip().lower()
doc = nlp(text)
#Retira numeros
text = ' '.join([token.text for token in doc if token.is_alpha == True
and token.pos_ != 'PUNCT'])
return remover_acentos(text)
def bi_trigram_counter(sentence_list):
"""Retorna uma tupla com o numero de bigramas e trigramas.
Recebe como entrada o texto segmentado em uma lista de sentencas.
"""
bi_sent_list = []
tri_sent_list = []
for sentence in sentence_list:
proc_sent = pre_process(sentence).lower().split()
bigram_sentence = bigram_model[proc_sent]
bi_sent_list.append(bigram_sentence)
for bi_sent in bi_sent_list:
tri_sent = trigram_model[bi_sent]
tri_sent_list.append(tri_sent)
return(bigram_number(bi_sent_list),trigram_number(tri_sent_list))
def bigram_number(bigram_sent_list):
'''Conta o número de bigramas encontrados na redação. Recebe uma lista de
sentenças que configuram a redação.
'''
count = 0
for sent in bigram_sent_list:
for token in sent:
if re.search(u'_',token):
count += 1
return count
def trigram_number(trigram_sent_list):
'''Conta o número de trigramas encontrados na redação. Recebe uma lista de
sentenças que configuram a redação
'''
count = 0
for sent in trigram_sent_list:
for token in sent:
if re.search('(?<=_).+_',token):
count += 1
return count
def n_most_freq_pos_tag_seq(sent_list):
''' Procura na lista de sentenças a sequências de pos_tag mais frequentes e
retorna a quantidade encontrada.
'''
n = 0
pos_list = []
for i in sent_list:
sent_nlp = nlp(i)
sent_pos = []
for token in sent_nlp:
sent_pos.append(token.pos_)
pos_list.append(sent_pos)
for line in pos_list:
if len(line) < 7:
continue
if len(line) >= 7:
while len(line) >= 7:
t = tuple(line[0:7])
if t in freq_pos_tag:
n+=1
line.pop(0)
return n
def subj_n_elements(sentence_list):
''' Recebe a lista de sentenças da redação. Conta a quantidade de elementos
abaixo do sujeito na árvore sintática gerada pelo "dependecy parser" do
Spacy. Retorna o número de sujeitos que possuem uma quantidade de elementos
maior que 7 e também o número total de elementos que fazem parte de um
sujeito em toda a redação.
'''
r_list = []
for spacy_doc in nlp.pipe(sentence_list):
big_subj = 0
subj_el_total = 0
for token in spacy_doc:
if token.dep_ == 'nsubj':
size = len([desc for desc in token.subtree if desc.is_alpha])
if size >= 7:
big_subj += 1
subj_el_total += size
r_list.append((big_subj,subj_el_total))
return tuple([sum(i) for i in zip(*r_list)])
def synset_count(sent_list, lang='por', pos='NOUN'):
i = 0
for spacy_doc in nlp.pipe(sent_list):
for token in spacy_doc:
if token.pos_ == pos:
i += len(wn.synsets(token.text, lang=lang))
return (i, i/len(sent_list))
def hypo_hyper_count(sent_list):
hyper = []
hypo = []
size = len(sent_list)
for sent in nlp.pipe(sent_list):
ss = [my_lesk(sent,token.text) for token in sent if token.pos_=='NOUN']
for s in ss:
try:
hyper.append(len(s.hypernyms()))
hypo.append(len(s.hyponyms()))
except AttributeError:
continue
h_er_sum = sum(hyper)
h_o_sum = sum(hypo)
return(h_er_sum,h_er_sum/size, h_o_sum,h_o_sum/size)
|
corpus_yeeter
|
Similar ao corpus_reader. Recebe um caminho para a pasta contendo o
corpus e cria um generator. Cada iteração retorna uma tupla contendo um
caminho para o arquivo .xml e o objeto Document criado a partir do mesmo
|
# -*- coding: utf-8 -*-
"""
Criado por Lucas Fonseca Lage em 04/03/2020
"""
import re, os, spacy
import numpy as np
from my_wsd import my_lesk
from unicodedata import normalize
from document import Document
from gensim.models import Phrases
# Carregamento do modelo Spacy
nlp = spacy.load('pt_core_news_lg')
# Carregamento dos modelos de bigramas e trigramas
#bigram_model = Phrases.load('./n_gram_models/bigram_gen_model')
#trigram_model = Phrases.load('./n_gram_models/trigram_gen_model')
freq_pos_tag = [('DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN'),
('VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'NOUN'),
('VERB', 'DET', 'NOUN', 'ADP', 'DET', 'NOUN', 'PUNCT'),
('DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'NOUN', 'PUNCT'),
('NOUN', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN', 'PUNCT'),
('VERB', 'ADP', 'DET', 'NOUN', 'ADP', 'NOUN', 'PUNCT'),
('VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'DET'),
('DET', 'NOUN', 'ADP', 'DET', 'NOUN', 'ADP', 'NOUN'),
('NOUN', 'ADP', 'DET', 'NOUN', 'ADP', 'NOUN', 'PUNCT'),
('VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADJ', 'PUNCT')]
def corpus_reader(path):
'''Lê as extensões dos arquivos .xml no caminho especificado como path e
retorna uma tupla com duas listas.Uma lista contém os paths para os arquivos
.xml e a outra contém os arquivos Document gerados para aquele arquilo .xml
'''
prog = re.compile('(\.xml)$')
doc_list = []
f = []
fps = []
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
fps.append(os.path.normpath(os.path.join(dirpath,filename)))
for path in fps:
if re.search(prog,path):
f.append(path)
doc_list.append(Document(path))
return (f, doc_list)
# MASKED: corpus_yeeter function (lines 51-61)
def all_fps(path_to_dir):
'''Recebe o caminho para o diretório e retorna uma lista com os caminhos
absolutos para os arquivos que estão nele
'''
fps = []
for dirpath, dirnames, filenames in os.walk(path_to_dir):
for filename in filenames:
fps.append(os.path.normpath(os.path.join(dirpath,filename)))
return fps
def remover_acentos(text):
'''Remove os acentos da string "text". Usada somente na função pre_process
'''
return normalize('NFKD', text).encode('ASCII', 'ignore').decode('ASCII')
def pre_process(text):
'''Realiza um pré processamento da string de entrada "text".
Retira espaços em branco extras e retira caracteres não alfanuméricos
'''
text = re.sub('\s{2,}',' ',text).strip().lower()
doc = nlp(text)
#Retira numeros
text = ' '.join([token.text for token in doc if token.is_alpha == True
and token.pos_ != 'PUNCT'])
return remover_acentos(text)
def bi_trigram_counter(sentence_list):
"""Retorna uma tupla com o numero de bigramas e trigramas.
Recebe como entrada o texto segmentado em uma lista de sentencas.
"""
bi_sent_list = []
tri_sent_list = []
for sentence in sentence_list:
proc_sent = pre_process(sentence).lower().split()
bigram_sentence = bigram_model[proc_sent]
bi_sent_list.append(bigram_sentence)
for bi_sent in bi_sent_list:
tri_sent = trigram_model[bi_sent]
tri_sent_list.append(tri_sent)
return(bigram_number(bi_sent_list),trigram_number(tri_sent_list))
def bigram_number(bigram_sent_list):
'''Conta o número de bigramas encontrados na redação. Recebe uma lista de
sentenças que configuram a redação.
'''
count = 0
for sent in bigram_sent_list:
for token in sent:
if re.search(u'_',token):
count += 1
return count
def trigram_number(trigram_sent_list):
'''Conta o número de trigramas encontrados na redação. Recebe uma lista de
sentenças que configuram a redação
'''
count = 0
for sent in trigram_sent_list:
for token in sent:
if re.search('(?<=_).+_',token):
count += 1
return count
def n_most_freq_pos_tag_seq(sent_list):
''' Procura na lista de sentenças a sequências de pos_tag mais frequentes e
retorna a quantidade encontrada.
'''
n = 0
pos_list = []
for i in sent_list:
sent_nlp = nlp(i)
sent_pos = []
for token in sent_nlp:
sent_pos.append(token.pos_)
pos_list.append(sent_pos)
for line in pos_list:
if len(line) < 7:
continue
if len(line) >= 7:
while len(line) >= 7:
t = tuple(line[0:7])
if t in freq_pos_tag:
n+=1
line.pop(0)
return n
def subj_n_elements(sentence_list):
''' Recebe a lista de sentenças da redação. Conta a quantidade de elementos
abaixo do sujeito na árvore sintática gerada pelo "dependecy parser" do
Spacy. Retorna o número de sujeitos que possuem uma quantidade de elementos
maior que 7 e também o número total de elementos que fazem parte de um
sujeito em toda a redação.
'''
r_list = []
for spacy_doc in nlp.pipe(sentence_list):
big_subj = 0
subj_el_total = 0
for token in spacy_doc:
if token.dep_ == 'nsubj':
size = len([desc for desc in token.subtree if desc.is_alpha])
if size >= 7:
big_subj += 1
subj_el_total += size
r_list.append((big_subj,subj_el_total))
return tuple([sum(i) for i in zip(*r_list)])
def synset_count(sent_list, lang='por', pos='NOUN'):
i = 0
for spacy_doc in nlp.pipe(sent_list):
for token in spacy_doc:
if token.pos_ == pos:
i += len(wn.synsets(token.text, lang=lang))
return (i, i/len(sent_list))
def hypo_hyper_count(sent_list):
hyper = []
hypo = []
size = len(sent_list)
for sent in nlp.pipe(sent_list):
ss = [my_lesk(sent,token.text) for token in sent if token.pos_=='NOUN']
for s in ss:
try:
hyper.append(len(s.hypernyms()))
hypo.append(len(s.hyponyms()))
except AttributeError:
continue
h_er_sum = sum(hyper)
h_o_sum = sum(hypo)
return(h_er_sum,h_er_sum/size, h_o_sum,h_o_sum/size)
|
def corpus_yeeter(path):
'''Similar ao corpus_reader. Recebe um caminho para a pasta contendo o
corpus e cria um generator. Cada iteração retorna uma tupla contendo um
caminho para o arquivo .xml e o objeto Document criado a partir do mesmo
'''
prog = re.compile('(\.xml)$')
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
if re.search(prog,filename):
path = os.path.normpath(os.path.join(dirpath,filename))
yield (path, Document(path))
| 51 | 61 |
# -*- coding: utf-8 -*-
"""
Criado por Lucas Fonseca Lage em 04/03/2020
"""
import re, os, spacy
import numpy as np
from my_wsd import my_lesk
from unicodedata import normalize
from document import Document
from gensim.models import Phrases
# Carregamento do modelo Spacy
nlp = spacy.load('pt_core_news_lg')
# Carregamento dos modelos de bigramas e trigramas
#bigram_model = Phrases.load('./n_gram_models/bigram_gen_model')
#trigram_model = Phrases.load('./n_gram_models/trigram_gen_model')
freq_pos_tag = [('DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN'),
('VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'NOUN'),
('VERB', 'DET', 'NOUN', 'ADP', 'DET', 'NOUN', 'PUNCT'),
('DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'NOUN', 'PUNCT'),
('NOUN', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN', 'PUNCT'),
('VERB', 'ADP', 'DET', 'NOUN', 'ADP', 'NOUN', 'PUNCT'),
('VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'DET'),
('DET', 'NOUN', 'ADP', 'DET', 'NOUN', 'ADP', 'NOUN'),
('NOUN', 'ADP', 'DET', 'NOUN', 'ADP', 'NOUN', 'PUNCT'),
('VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADJ', 'PUNCT')]
def corpus_reader(path):
'''Lê as extensões dos arquivos .xml no caminho especificado como path e
retorna uma tupla com duas listas.Uma lista contém os paths para os arquivos
.xml e a outra contém os arquivos Document gerados para aquele arquilo .xml
'''
prog = re.compile('(\.xml)$')
doc_list = []
f = []
fps = []
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
fps.append(os.path.normpath(os.path.join(dirpath,filename)))
for path in fps:
if re.search(prog,path):
f.append(path)
doc_list.append(Document(path))
return (f, doc_list)
def corpus_yeeter(path):
'''Similar ao corpus_reader. Recebe um caminho para a pasta contendo o
corpus e cria um generator. Cada iteração retorna uma tupla contendo um
caminho para o arquivo .xml e o objeto Document criado a partir do mesmo
'''
prog = re.compile('(\.xml)$')
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
if re.search(prog,filename):
path = os.path.normpath(os.path.join(dirpath,filename))
yield (path, Document(path))
def all_fps(path_to_dir):
'''Recebe o caminho para o diretório e retorna uma lista com os caminhos
absolutos para os arquivos que estão nele
'''
fps = []
for dirpath, dirnames, filenames in os.walk(path_to_dir):
for filename in filenames:
fps.append(os.path.normpath(os.path.join(dirpath,filename)))
return fps
def remover_acentos(text):
'''Remove os acentos da string "text". Usada somente na função pre_process
'''
return normalize('NFKD', text).encode('ASCII', 'ignore').decode('ASCII')
def pre_process(text):
'''Realiza um pré processamento da string de entrada "text".
Retira espaços em branco extras e retira caracteres não alfanuméricos
'''
text = re.sub('\s{2,}',' ',text).strip().lower()
doc = nlp(text)
#Retira numeros
text = ' '.join([token.text for token in doc if token.is_alpha == True
and token.pos_ != 'PUNCT'])
return remover_acentos(text)
def bi_trigram_counter(sentence_list):
"""Retorna uma tupla com o numero de bigramas e trigramas.
Recebe como entrada o texto segmentado em uma lista de sentencas.
"""
bi_sent_list = []
tri_sent_list = []
for sentence in sentence_list:
proc_sent = pre_process(sentence).lower().split()
bigram_sentence = bigram_model[proc_sent]
bi_sent_list.append(bigram_sentence)
for bi_sent in bi_sent_list:
tri_sent = trigram_model[bi_sent]
tri_sent_list.append(tri_sent)
return(bigram_number(bi_sent_list),trigram_number(tri_sent_list))
def bigram_number(bigram_sent_list):
'''Conta o número de bigramas encontrados na redação. Recebe uma lista de
sentenças que configuram a redação.
'''
count = 0
for sent in bigram_sent_list:
for token in sent:
if re.search(u'_',token):
count += 1
return count
def trigram_number(trigram_sent_list):
'''Conta o número de trigramas encontrados na redação. Recebe uma lista de
sentenças que configuram a redação
'''
count = 0
for sent in trigram_sent_list:
for token in sent:
if re.search('(?<=_).+_',token):
count += 1
return count
def n_most_freq_pos_tag_seq(sent_list):
''' Procura na lista de sentenças a sequências de pos_tag mais frequentes e
retorna a quantidade encontrada.
'''
n = 0
pos_list = []
for i in sent_list:
sent_nlp = nlp(i)
sent_pos = []
for token in sent_nlp:
sent_pos.append(token.pos_)
pos_list.append(sent_pos)
for line in pos_list:
if len(line) < 7:
continue
if len(line) >= 7:
while len(line) >= 7:
t = tuple(line[0:7])
if t in freq_pos_tag:
n+=1
line.pop(0)
return n
def subj_n_elements(sentence_list):
''' Recebe a lista de sentenças da redação. Conta a quantidade de elementos
abaixo do sujeito na árvore sintática gerada pelo "dependecy parser" do
Spacy. Retorna o número de sujeitos que possuem uma quantidade de elementos
maior que 7 e também o número total de elementos que fazem parte de um
sujeito em toda a redação.
'''
r_list = []
for spacy_doc in nlp.pipe(sentence_list):
big_subj = 0
subj_el_total = 0
for token in spacy_doc:
if token.dep_ == 'nsubj':
size = len([desc for desc in token.subtree if desc.is_alpha])
if size >= 7:
big_subj += 1
subj_el_total += size
r_list.append((big_subj,subj_el_total))
return tuple([sum(i) for i in zip(*r_list)])
def synset_count(sent_list, lang='por', pos='NOUN'):
i = 0
for spacy_doc in nlp.pipe(sent_list):
for token in spacy_doc:
if token.pos_ == pos:
i += len(wn.synsets(token.text, lang=lang))
return (i, i/len(sent_list))
def hypo_hyper_count(sent_list):
hyper = []
hypo = []
size = len(sent_list)
for sent in nlp.pipe(sent_list):
ss = [my_lesk(sent,token.text) for token in sent if token.pos_=='NOUN']
for s in ss:
try:
hyper.append(len(s.hypernyms()))
hypo.append(len(s.hyponyms()))
except AttributeError:
continue
h_er_sum = sum(hyper)
h_o_sum = sum(hypo)
return(h_er_sum,h_er_sum/size, h_o_sum,h_o_sum/size)
|
subj_n_elements
|
Recebe a lista de sentenças da redação. Conta a quantidade de elementos
abaixo do sujeito na árvore sintática gerada pelo "dependecy parser" do
Spacy. Retorna o número de sujeitos que possuem uma quantidade de elementos
maior que 7 e também o número total de elementos que fazem parte de um
sujeito em toda a redação.
|
# -*- coding: utf-8 -*-
"""
Criado por Lucas Fonseca Lage em 04/03/2020
"""
import re, os, spacy
import numpy as np
from my_wsd import my_lesk
from unicodedata import normalize
from document import Document
from gensim.models import Phrases
# Carregamento do modelo Spacy
nlp = spacy.load('pt_core_news_lg')
# Carregamento dos modelos de bigramas e trigramas
#bigram_model = Phrases.load('./n_gram_models/bigram_gen_model')
#trigram_model = Phrases.load('./n_gram_models/trigram_gen_model')
freq_pos_tag = [('DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN'),
('VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'NOUN'),
('VERB', 'DET', 'NOUN', 'ADP', 'DET', 'NOUN', 'PUNCT'),
('DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'NOUN', 'PUNCT'),
('NOUN', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN', 'PUNCT'),
('VERB', 'ADP', 'DET', 'NOUN', 'ADP', 'NOUN', 'PUNCT'),
('VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'DET'),
('DET', 'NOUN', 'ADP', 'DET', 'NOUN', 'ADP', 'NOUN'),
('NOUN', 'ADP', 'DET', 'NOUN', 'ADP', 'NOUN', 'PUNCT'),
('VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADJ', 'PUNCT')]
def corpus_reader(path):
'''Lê as extensões dos arquivos .xml no caminho especificado como path e
retorna uma tupla com duas listas.Uma lista contém os paths para os arquivos
.xml e a outra contém os arquivos Document gerados para aquele arquilo .xml
'''
prog = re.compile('(\.xml)$')
doc_list = []
f = []
fps = []
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
fps.append(os.path.normpath(os.path.join(dirpath,filename)))
for path in fps:
if re.search(prog,path):
f.append(path)
doc_list.append(Document(path))
return (f, doc_list)
def corpus_yeeter(path):
'''Similar ao corpus_reader. Recebe um caminho para a pasta contendo o
corpus e cria um generator. Cada iteração retorna uma tupla contendo um
caminho para o arquivo .xml e o objeto Document criado a partir do mesmo
'''
prog = re.compile('(\.xml)$')
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
if re.search(prog,filename):
path = os.path.normpath(os.path.join(dirpath,filename))
yield (path, Document(path))
def all_fps(path_to_dir):
'''Recebe o caminho para o diretório e retorna uma lista com os caminhos
absolutos para os arquivos que estão nele
'''
fps = []
for dirpath, dirnames, filenames in os.walk(path_to_dir):
for filename in filenames:
fps.append(os.path.normpath(os.path.join(dirpath,filename)))
return fps
def remover_acentos(text):
'''Remove os acentos da string "text". Usada somente na função pre_process
'''
return normalize('NFKD', text).encode('ASCII', 'ignore').decode('ASCII')
def pre_process(text):
'''Realiza um pré processamento da string de entrada "text".
Retira espaços em branco extras e retira caracteres não alfanuméricos
'''
text = re.sub('\s{2,}',' ',text).strip().lower()
doc = nlp(text)
#Retira numeros
text = ' '.join([token.text for token in doc if token.is_alpha == True
and token.pos_ != 'PUNCT'])
return remover_acentos(text)
def bi_trigram_counter(sentence_list):
"""Retorna uma tupla com o numero de bigramas e trigramas.
Recebe como entrada o texto segmentado em uma lista de sentencas.
"""
bi_sent_list = []
tri_sent_list = []
for sentence in sentence_list:
proc_sent = pre_process(sentence).lower().split()
bigram_sentence = bigram_model[proc_sent]
bi_sent_list.append(bigram_sentence)
for bi_sent in bi_sent_list:
tri_sent = trigram_model[bi_sent]
tri_sent_list.append(tri_sent)
return(bigram_number(bi_sent_list),trigram_number(tri_sent_list))
def bigram_number(bigram_sent_list):
'''Conta o número de bigramas encontrados na redação. Recebe uma lista de
sentenças que configuram a redação.
'''
count = 0
for sent in bigram_sent_list:
for token in sent:
if re.search(u'_',token):
count += 1
return count
def trigram_number(trigram_sent_list):
'''Conta o número de trigramas encontrados na redação. Recebe uma lista de
sentenças que configuram a redação
'''
count = 0
for sent in trigram_sent_list:
for token in sent:
if re.search('(?<=_).+_',token):
count += 1
return count
def n_most_freq_pos_tag_seq(sent_list):
''' Procura na lista de sentenças a sequências de pos_tag mais frequentes e
retorna a quantidade encontrada.
'''
n = 0
pos_list = []
for i in sent_list:
sent_nlp = nlp(i)
sent_pos = []
for token in sent_nlp:
sent_pos.append(token.pos_)
pos_list.append(sent_pos)
for line in pos_list:
if len(line) < 7:
continue
if len(line) >= 7:
while len(line) >= 7:
t = tuple(line[0:7])
if t in freq_pos_tag:
n+=1
line.pop(0)
return n
# MASKED: subj_n_elements function (lines 157-175)
def synset_count(sent_list, lang='por', pos='NOUN'):
i = 0
for spacy_doc in nlp.pipe(sent_list):
for token in spacy_doc:
if token.pos_ == pos:
i += len(wn.synsets(token.text, lang=lang))
return (i, i/len(sent_list))
def hypo_hyper_count(sent_list):
hyper = []
hypo = []
size = len(sent_list)
for sent in nlp.pipe(sent_list):
ss = [my_lesk(sent,token.text) for token in sent if token.pos_=='NOUN']
for s in ss:
try:
hyper.append(len(s.hypernyms()))
hypo.append(len(s.hyponyms()))
except AttributeError:
continue
h_er_sum = sum(hyper)
h_o_sum = sum(hypo)
return(h_er_sum,h_er_sum/size, h_o_sum,h_o_sum/size)
|
def subj_n_elements(sentence_list):
''' Recebe a lista de sentenças da redação. Conta a quantidade de elementos
abaixo do sujeito na árvore sintática gerada pelo "dependecy parser" do
Spacy. Retorna o número de sujeitos que possuem uma quantidade de elementos
maior que 7 e também o número total de elementos que fazem parte de um
sujeito em toda a redação.
'''
r_list = []
for spacy_doc in nlp.pipe(sentence_list):
big_subj = 0
subj_el_total = 0
for token in spacy_doc:
if token.dep_ == 'nsubj':
size = len([desc for desc in token.subtree if desc.is_alpha])
if size >= 7:
big_subj += 1
subj_el_total += size
r_list.append((big_subj,subj_el_total))
return tuple([sum(i) for i in zip(*r_list)])
| 157 | 175 |
# -*- coding: utf-8 -*-
"""
Criado por Lucas Fonseca Lage em 04/03/2020
"""
import re, os, spacy
import numpy as np
from my_wsd import my_lesk
from unicodedata import normalize
from document import Document
from gensim.models import Phrases
# Carregamento do modelo Spacy
nlp = spacy.load('pt_core_news_lg')
# Carregamento dos modelos de bigramas e trigramas
#bigram_model = Phrases.load('./n_gram_models/bigram_gen_model')
#trigram_model = Phrases.load('./n_gram_models/trigram_gen_model')
freq_pos_tag = [('DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN'),
('VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'NOUN'),
('VERB', 'DET', 'NOUN', 'ADP', 'DET', 'NOUN', 'PUNCT'),
('DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'NOUN', 'PUNCT'),
('NOUN', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN', 'PUNCT'),
('VERB', 'ADP', 'DET', 'NOUN', 'ADP', 'NOUN', 'PUNCT'),
('VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'DET'),
('DET', 'NOUN', 'ADP', 'DET', 'NOUN', 'ADP', 'NOUN'),
('NOUN', 'ADP', 'DET', 'NOUN', 'ADP', 'NOUN', 'PUNCT'),
('VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADJ', 'PUNCT')]
def corpus_reader(path):
'''Lê as extensões dos arquivos .xml no caminho especificado como path e
retorna uma tupla com duas listas.Uma lista contém os paths para os arquivos
.xml e a outra contém os arquivos Document gerados para aquele arquilo .xml
'''
prog = re.compile('(\.xml)$')
doc_list = []
f = []
fps = []
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
fps.append(os.path.normpath(os.path.join(dirpath,filename)))
for path in fps:
if re.search(prog,path):
f.append(path)
doc_list.append(Document(path))
return (f, doc_list)
def corpus_yeeter(path):
'''Similar ao corpus_reader. Recebe um caminho para a pasta contendo o
corpus e cria um generator. Cada iteração retorna uma tupla contendo um
caminho para o arquivo .xml e o objeto Document criado a partir do mesmo
'''
prog = re.compile('(\.xml)$')
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
if re.search(prog,filename):
path = os.path.normpath(os.path.join(dirpath,filename))
yield (path, Document(path))
def all_fps(path_to_dir):
'''Recebe o caminho para o diretório e retorna uma lista com os caminhos
absolutos para os arquivos que estão nele
'''
fps = []
for dirpath, dirnames, filenames in os.walk(path_to_dir):
for filename in filenames:
fps.append(os.path.normpath(os.path.join(dirpath,filename)))
return fps
def remover_acentos(text):
'''Remove os acentos da string "text". Usada somente na função pre_process
'''
return normalize('NFKD', text).encode('ASCII', 'ignore').decode('ASCII')
def pre_process(text):
'''Realiza um pré processamento da string de entrada "text".
Retira espaços em branco extras e retira caracteres não alfanuméricos
'''
text = re.sub('\s{2,}',' ',text).strip().lower()
doc = nlp(text)
#Retira numeros
text = ' '.join([token.text for token in doc if token.is_alpha == True
and token.pos_ != 'PUNCT'])
return remover_acentos(text)
def bi_trigram_counter(sentence_list):
"""Retorna uma tupla com o numero de bigramas e trigramas.
Recebe como entrada o texto segmentado em uma lista de sentencas.
"""
bi_sent_list = []
tri_sent_list = []
for sentence in sentence_list:
proc_sent = pre_process(sentence).lower().split()
bigram_sentence = bigram_model[proc_sent]
bi_sent_list.append(bigram_sentence)
for bi_sent in bi_sent_list:
tri_sent = trigram_model[bi_sent]
tri_sent_list.append(tri_sent)
return(bigram_number(bi_sent_list),trigram_number(tri_sent_list))
def bigram_number(bigram_sent_list):
'''Conta o número de bigramas encontrados na redação. Recebe uma lista de
sentenças que configuram a redação.
'''
count = 0
for sent in bigram_sent_list:
for token in sent:
if re.search(u'_',token):
count += 1
return count
def trigram_number(trigram_sent_list):
'''Conta o número de trigramas encontrados na redação. Recebe uma lista de
sentenças que configuram a redação
'''
count = 0
for sent in trigram_sent_list:
for token in sent:
if re.search('(?<=_).+_',token):
count += 1
return count
def n_most_freq_pos_tag_seq(sent_list):
''' Procura na lista de sentenças a sequências de pos_tag mais frequentes e
retorna a quantidade encontrada.
'''
n = 0
pos_list = []
for i in sent_list:
sent_nlp = nlp(i)
sent_pos = []
for token in sent_nlp:
sent_pos.append(token.pos_)
pos_list.append(sent_pos)
for line in pos_list:
if len(line) < 7:
continue
if len(line) >= 7:
while len(line) >= 7:
t = tuple(line[0:7])
if t in freq_pos_tag:
n+=1
line.pop(0)
return n
def subj_n_elements(sentence_list):
''' Recebe a lista de sentenças da redação. Conta a quantidade de elementos
abaixo do sujeito na árvore sintática gerada pelo "dependecy parser" do
Spacy. Retorna o número de sujeitos que possuem uma quantidade de elementos
maior que 7 e também o número total de elementos que fazem parte de um
sujeito em toda a redação.
'''
r_list = []
for spacy_doc in nlp.pipe(sentence_list):
big_subj = 0
subj_el_total = 0
for token in spacy_doc:
if token.dep_ == 'nsubj':
size = len([desc for desc in token.subtree if desc.is_alpha])
if size >= 7:
big_subj += 1
subj_el_total += size
r_list.append((big_subj,subj_el_total))
return tuple([sum(i) for i in zip(*r_list)])
def synset_count(sent_list, lang='por', pos='NOUN'):
i = 0
for spacy_doc in nlp.pipe(sent_list):
for token in spacy_doc:
if token.pos_ == pos:
i += len(wn.synsets(token.text, lang=lang))
return (i, i/len(sent_list))
def hypo_hyper_count(sent_list):
hyper = []
hypo = []
size = len(sent_list)
for sent in nlp.pipe(sent_list):
ss = [my_lesk(sent,token.text) for token in sent if token.pos_=='NOUN']
for s in ss:
try:
hyper.append(len(s.hypernyms()))
hypo.append(len(s.hyponyms()))
except AttributeError:
continue
h_er_sum = sum(hyper)
h_o_sum = sum(hypo)
return(h_er_sum,h_er_sum/size, h_o_sum,h_o_sum/size)
|
__init__
|
Initialize the DnsEntry object.
Closely represent the TransIP dnsEntry object
:param content: content (rdata) corresponding to the record type
(e.g. ip), defaults to None
:type content: str, optional
:param expire: Time To Live (TTL) of the record, defaults to None
:type expire: int, optional
:param name: name of the record, defaults to None
:type name: str, optional
:param rtype: one of the (allowed) record types (see DNS_RECORD_TYPES),
defaults to None
:type rtype: str, optional
|
# MIT License, Copyright (c) 2020 Bob van den Heuvel
# https://github.com/bheuvel/transip/blob/main/LICENSE
"""Interface with the TransIP API, specifically DNS record management."""
import logging
from enum import Enum
from pathlib import Path
from time import sleep
from typing import Dict, Union
import requests
from transip_dns import __project__, __version__
from transip_dns.accesstoken import AccessToken
logger = logging.getLogger(__name__)
DNS_RECORD_TYPES = ["A", "AAAA", "CNAME", "MX", "NS", "TXT", "SRV", "SSHFP", "TLSA"]
class DnsEntry(object):
"""Class matching the TransIP dnsEntry."""
# MASKED: __init__ function (lines 23-49)
def __repr__(self) -> str:
"""Represent the TransIP definition of a dnsEntry object.
The dnsEntry object is specified as a JSON object
:return: JSON representation of the record according to the dnsEntry
:rtype: str
"""
return {
"dnsEntry": {
"name": self.name,
"expire": self.expire,
"type": self.rtype,
"content": self.content,
}
}
class RecordState(Enum):
"""Enumeration of record states.
When searching for records, these are the possible states.
NOTFOUND: The record is not present
FOUND_SAME: Record is present and the content is (already) the same
FOUND_DIFFERENT: Record is present, but with different content
FOUND_NO_REQUEST_DATA: If the content of the (requested) dns_record is empty.
This may occur when deleting a record (just) by name.
:param Enum: Parent class to create an enumeration
:type Enum: Enum
"""
FOUND_SAME = 1
FOUND_DIFFERENT = 2
FOUND_NO_REQUEST_DATA = 4
NOTFOUND = 3
class DnsRecord(DnsEntry):
"""DNS Record encapsulation with ip query and data checking.
Initializes the object, potentially search for the IP address and
check if the record type is allowed.
:param DnsEntry: Parent class to enhance
:type DnsEntry: DnsEntry
"""
def __init__(
self,
name: str,
rtype: str,
expire: str,
content: str,
zone: str,
query_data: Union[str, None] = None,
) -> None:
"""Initialize the DnsRecord object with safety checks.
:param name: name of the DNS record
:type name: str
:param rtype: type of the DNS record
:type rtype: str
:param expire: TTL of the DNS record
:type expire: str
:param content: content of the DNS record
:type content: str
:param zone: Zone or domain of the DNS record
:type zone: str
:param query_data: url which produces the exact data to be used as
content, defaults to None
:type query_data: Union[str, None], optional
:raises ValueError: Raise an error if an invalid record type is specified
"""
if rtype is not None:
if not rtype.upper() in DNS_RECORD_TYPES:
raise ValueError(
f"Type '{rtype}' is not one of the "
f"allowed record types ({DNS_RECORD_TYPES})"
)
super().__init__(content=content, expire=expire, name=name, rtype=rtype)
self.zone = zone
self.fqdn = f"{self.name}.{self.zone}"
if query_data:
self.content = DnsRecord.query_for_content(query_data)
logger.info(f"Resolved record data to be used: '{self.content}'")
self.record_state = None
@property
def dnsentry(self):
"""Return the TransIP representation of the dnsEntry object."""
return super().__repr__()
@staticmethod
def query_for_content(query_url: str) -> str:
"""Retrieve the ip address from the "current" location.
By default it will query for an ip (v4/v6) address,
but may be used for other data as well
:param query_url: url which produces the exact data
to be used as content
:type query_url: str
:raises RequestsRaisedException: raised for connection errors with the server
:raises Non200Response: raised when server does not respond "OK" (200)
:return: the resolved ip address, or whatever may be
returned by a custom provided url
:rtype: str
"""
my_ip = None
try:
ip_query = requests.get(query_url)
except Exception as e:
raise RequestsRaisedException(
"Error in request for Internet ip address; "
) from e
if ip_query.status_code == 200:
my_ip = ip_query.text.strip()
else:
raise Non200Response(
(
"Could not resolve Internet ip address (non 200 response); "
f"{ip_query.status_code}: {ip_query.reason}"
)
)
return my_ip
class KeyFileLoadException(Exception):
"""Provided private_key is is not a valid path, nor a valid key format."""
pass
class RequestsRaisedException(Exception):
"""Error occurred in requesting an url for the Internet ip address."""
pass
class Non200Response(Exception):
"""Request for the Internet ip address resulted in a non 200 response."""
pass
class TransipInterface:
"""Encapsulation of connection with TransIP."""
def __init__(
self,
login: str = None,
private_key_pem: str = None,
private_key_pem_file: Path = None,
access_token: str = None,
expiration_time: int = 60,
read_only: bool = False,
global_key: bool = False,
label: str = f"{__project__} {__version__}",
authentication_url: str = "https://api.transip.nl/v6/auth",
root_endpoint: str = "https://api.transip.nl/v6",
connection_timeout: int = 30,
retry: int = 3,
retry_delay: float = 5,
):
"""Initialize the interface with TransIP.
:param login: the TransIP login name, defaults to None
:type login: str, optional
:param private_key_pem: the private key as string, defaults to None
:type private_key_pem: str, optional
:param private_key_pem_file: file location of the private key, defaults to None
:type private_key_pem_file: Path, optional
:param access_token: JSON Web Token, defaults to None
:type access_token: str, optional
:param expiration_time: expiration time (TTL) of the access token,
defaults to 60
:type expiration_time: int, optional
:param read_only: key/token allows to change objects or only read,
defaults to False
:type read_only: bool, optional
:param global_key: key may only be used from whitelisted ip addresses,
defaults to False
:type global_key: bool, optional
:param label: textual identifier for the access token,
defaults to "__project__ __version__"
:type label: str, optional
:param authentication_url: TransIP authentication url,
defaults to "https://api.transip.nl/v6/auth"
:type authentication_url: str, optional
:param root_endpoint: TransIP root of endpoints,
defaults to "https://api.transip.nl/v6"
:type root_endpoint: str, optional
:param connection_timeout: timeout for the network response, defaults to 30
:type connection_timeout: int, optional
:param retry: retry when the call fails due to zone
being saved or locked (409), defaults to 3
:type retry: int, optional
:param retry_delay: time in seconds to wait between retries,
defaults to 5
:type retry_delay: float, optional
"""
if login is not None and access_token is not None:
raise ValueError(
"Either login and private_key or access token must be used, not both."
)
self.attempts = retry + 1
self.retry_delay = retry_delay
self.root_endpoint = root_endpoint
self.connection_timeout = connection_timeout
if access_token is None:
self._token = AccessToken(
login=login,
private_key=private_key_pem,
private_key_file=private_key_pem_file,
expiration_time=expiration_time,
read_only=read_only,
global_key=global_key,
label=label,
authentication_url=authentication_url,
connection_timeout=connection_timeout,
)
else:
self._token = access_token
@property
def headers(self) -> Dict:
"""Generate the default headers.
Note the the reference to "self._token" will allways
provide a valid (and renewed if needed) token
:return: default headers, including the authentication token
:rtype: Dict
"""
return {
"Content-Type": "application/json",
"Authorization": f"Bearer {self._token}",
"User-Agent": f"{__project__} {__version__}",
}
def execute_dns_entry(self, method: str, rest_path: str, dnsentry: dict):
"""Execute the requested action, with retry on 409.
409: ~ "DNS Entries are currently being saved"
409: ~ "is locked"
:param method: get, post, patch, delete
:type method: str
:param zone_name: respective DNS zone
:type zone_name: str
:param dnsentry: DNS entry to manage
:type dnsentry: dict
:raises requests.exceptions.HTTPError: Raise an error
if a 400 or 500 response is returned
:return: the requests response
:rtype: requests.models.Response
"""
endpoint = f"{self.root_endpoint}{rest_path}"
request = getattr(requests, method)
response = None
for attempt in range(1, self.attempts + 1):
response = request(
url=endpoint,
json=dnsentry,
headers=self.headers,
timeout=self.connection_timeout,
)
if response.status_code != 409:
response.raise_for_status()
logger.debug(f"API request returned {response.status_code}")
return response
logger.debug(
(
f"API request returned {response.status_code}: "
f"{response.text}, atttempt {attempt} of {self.attempts}"
)
)
sleep(self.retry_delay)
# raises requests.exceptions.HTTPError
response.raise_for_status()
def domains(self) -> list:
"""Get a listing of all available domains.
[extended_summary]
:return: List of available domains
:rtype: list
"""
return self.execute_dns_entry("get", "/domains", None)
def get_dns_entry(self, dns_zone_name: str) -> Dict:
"""Get a listing of the respective domain."""
response = self.execute_dns_entry(
"get", rest_path=f"/domains/{dns_zone_name}/dns", dnsentry=None
)
return response
def post_dns_entry(self, dns_record: DnsRecord):
"""Add a dnsEntry to the respective domain."""
return self.execute_dns_entry(
"post",
rest_path=f"/domains/{dns_record.zone}/dns",
dnsentry=dns_record.dnsentry,
)
def patch_dns_entry(self, dns_record: DnsRecord):
"""Adjust a record in the respective domain."""
return self.execute_dns_entry(
"patch",
rest_path=f"/domains/{dns_record.zone}/dns",
dnsentry=dns_record.dnsentry,
)
def delete_dns_entry(self, dns_record: DnsRecord):
"""Delete an entry in the respective domain."""
return self.execute_dns_entry(
"delete",
rest_path=f"/domains/{dns_record.zone}/dns",
dnsentry=dns_record.dnsentry,
)
|
def __init__(
self,
content: str = None,
expire: int = None,
name: str = None,
rtype: str = None,
):
"""Initialize the DnsEntry object.
Closely represent the TransIP dnsEntry object
:param content: content (rdata) corresponding to the record type
(e.g. ip), defaults to None
:type content: str, optional
:param expire: Time To Live (TTL) of the record, defaults to None
:type expire: int, optional
:param name: name of the record, defaults to None
:type name: str, optional
:param rtype: one of the (allowed) record types (see DNS_RECORD_TYPES),
defaults to None
:type rtype: str, optional
"""
self.content = content
self.expire = expire
self.name = name
self.rtype = None
self.rtype = rtype
| 23 | 49 |
# MIT License, Copyright (c) 2020 Bob van den Heuvel
# https://github.com/bheuvel/transip/blob/main/LICENSE
"""Interface with the TransIP API, specifically DNS record management."""
import logging
from enum import Enum
from pathlib import Path
from time import sleep
from typing import Dict, Union
import requests
from transip_dns import __project__, __version__
from transip_dns.accesstoken import AccessToken
logger = logging.getLogger(__name__)
DNS_RECORD_TYPES = ["A", "AAAA", "CNAME", "MX", "NS", "TXT", "SRV", "SSHFP", "TLSA"]
class DnsEntry(object):
"""Class matching the TransIP dnsEntry."""
def __init__(
self,
content: str = None,
expire: int = None,
name: str = None,
rtype: str = None,
):
"""Initialize the DnsEntry object.
Closely represent the TransIP dnsEntry object
:param content: content (rdata) corresponding to the record type
(e.g. ip), defaults to None
:type content: str, optional
:param expire: Time To Live (TTL) of the record, defaults to None
:type expire: int, optional
:param name: name of the record, defaults to None
:type name: str, optional
:param rtype: one of the (allowed) record types (see DNS_RECORD_TYPES),
defaults to None
:type rtype: str, optional
"""
self.content = content
self.expire = expire
self.name = name
self.rtype = None
self.rtype = rtype
def __repr__(self) -> str:
"""Represent the TransIP definition of a dnsEntry object.
The dnsEntry object is specified as a JSON object
:return: JSON representation of the record according to the dnsEntry
:rtype: str
"""
return {
"dnsEntry": {
"name": self.name,
"expire": self.expire,
"type": self.rtype,
"content": self.content,
}
}
class RecordState(Enum):
"""Enumeration of record states.
When searching for records, these are the possible states.
NOTFOUND: The record is not present
FOUND_SAME: Record is present and the content is (already) the same
FOUND_DIFFERENT: Record is present, but with different content
FOUND_NO_REQUEST_DATA: If the content of the (requested) dns_record is empty.
This may occur when deleting a record (just) by name.
:param Enum: Parent class to create an enumeration
:type Enum: Enum
"""
FOUND_SAME = 1
FOUND_DIFFERENT = 2
FOUND_NO_REQUEST_DATA = 4
NOTFOUND = 3
class DnsRecord(DnsEntry):
"""DNS Record encapsulation with ip query and data checking.
Initializes the object, potentially search for the IP address and
check if the record type is allowed.
:param DnsEntry: Parent class to enhance
:type DnsEntry: DnsEntry
"""
def __init__(
self,
name: str,
rtype: str,
expire: str,
content: str,
zone: str,
query_data: Union[str, None] = None,
) -> None:
"""Initialize the DnsRecord object with safety checks.
:param name: name of the DNS record
:type name: str
:param rtype: type of the DNS record
:type rtype: str
:param expire: TTL of the DNS record
:type expire: str
:param content: content of the DNS record
:type content: str
:param zone: Zone or domain of the DNS record
:type zone: str
:param query_data: url which produces the exact data to be used as
content, defaults to None
:type query_data: Union[str, None], optional
:raises ValueError: Raise an error if an invalid record type is specified
"""
if rtype is not None:
if not rtype.upper() in DNS_RECORD_TYPES:
raise ValueError(
f"Type '{rtype}' is not one of the "
f"allowed record types ({DNS_RECORD_TYPES})"
)
super().__init__(content=content, expire=expire, name=name, rtype=rtype)
self.zone = zone
self.fqdn = f"{self.name}.{self.zone}"
if query_data:
self.content = DnsRecord.query_for_content(query_data)
logger.info(f"Resolved record data to be used: '{self.content}'")
self.record_state = None
@property
def dnsentry(self):
"""Return the TransIP representation of the dnsEntry object."""
return super().__repr__()
@staticmethod
def query_for_content(query_url: str) -> str:
"""Retrieve the ip address from the "current" location.
By default it will query for an ip (v4/v6) address,
but may be used for other data as well
:param query_url: url which produces the exact data
to be used as content
:type query_url: str
:raises RequestsRaisedException: raised for connection errors with the server
:raises Non200Response: raised when server does not respond "OK" (200)
:return: the resolved ip address, or whatever may be
returned by a custom provided url
:rtype: str
"""
my_ip = None
try:
ip_query = requests.get(query_url)
except Exception as e:
raise RequestsRaisedException(
"Error in request for Internet ip address; "
) from e
if ip_query.status_code == 200:
my_ip = ip_query.text.strip()
else:
raise Non200Response(
(
"Could not resolve Internet ip address (non 200 response); "
f"{ip_query.status_code}: {ip_query.reason}"
)
)
return my_ip
class KeyFileLoadException(Exception):
"""Provided private_key is is not a valid path, nor a valid key format."""
pass
class RequestsRaisedException(Exception):
"""Error occurred in requesting an url for the Internet ip address."""
pass
class Non200Response(Exception):
"""Request for the Internet ip address resulted in a non 200 response."""
pass
class TransipInterface:
"""Encapsulation of connection with TransIP."""
def __init__(
self,
login: str = None,
private_key_pem: str = None,
private_key_pem_file: Path = None,
access_token: str = None,
expiration_time: int = 60,
read_only: bool = False,
global_key: bool = False,
label: str = f"{__project__} {__version__}",
authentication_url: str = "https://api.transip.nl/v6/auth",
root_endpoint: str = "https://api.transip.nl/v6",
connection_timeout: int = 30,
retry: int = 3,
retry_delay: float = 5,
):
"""Initialize the interface with TransIP.
:param login: the TransIP login name, defaults to None
:type login: str, optional
:param private_key_pem: the private key as string, defaults to None
:type private_key_pem: str, optional
:param private_key_pem_file: file location of the private key, defaults to None
:type private_key_pem_file: Path, optional
:param access_token: JSON Web Token, defaults to None
:type access_token: str, optional
:param expiration_time: expiration time (TTL) of the access token,
defaults to 60
:type expiration_time: int, optional
:param read_only: key/token allows to change objects or only read,
defaults to False
:type read_only: bool, optional
:param global_key: key may only be used from whitelisted ip addresses,
defaults to False
:type global_key: bool, optional
:param label: textual identifier for the access token,
defaults to "__project__ __version__"
:type label: str, optional
:param authentication_url: TransIP authentication url,
defaults to "https://api.transip.nl/v6/auth"
:type authentication_url: str, optional
:param root_endpoint: TransIP root of endpoints,
defaults to "https://api.transip.nl/v6"
:type root_endpoint: str, optional
:param connection_timeout: timeout for the network response, defaults to 30
:type connection_timeout: int, optional
:param retry: retry when the call fails due to zone
being saved or locked (409), defaults to 3
:type retry: int, optional
:param retry_delay: time in seconds to wait between retries,
defaults to 5
:type retry_delay: float, optional
"""
if login is not None and access_token is not None:
raise ValueError(
"Either login and private_key or access token must be used, not both."
)
self.attempts = retry + 1
self.retry_delay = retry_delay
self.root_endpoint = root_endpoint
self.connection_timeout = connection_timeout
if access_token is None:
self._token = AccessToken(
login=login,
private_key=private_key_pem,
private_key_file=private_key_pem_file,
expiration_time=expiration_time,
read_only=read_only,
global_key=global_key,
label=label,
authentication_url=authentication_url,
connection_timeout=connection_timeout,
)
else:
self._token = access_token
@property
def headers(self) -> Dict:
"""Generate the default headers.
Note the the reference to "self._token" will allways
provide a valid (and renewed if needed) token
:return: default headers, including the authentication token
:rtype: Dict
"""
return {
"Content-Type": "application/json",
"Authorization": f"Bearer {self._token}",
"User-Agent": f"{__project__} {__version__}",
}
def execute_dns_entry(self, method: str, rest_path: str, dnsentry: dict):
"""Execute the requested action, with retry on 409.
409: ~ "DNS Entries are currently being saved"
409: ~ "is locked"
:param method: get, post, patch, delete
:type method: str
:param zone_name: respective DNS zone
:type zone_name: str
:param dnsentry: DNS entry to manage
:type dnsentry: dict
:raises requests.exceptions.HTTPError: Raise an error
if a 400 or 500 response is returned
:return: the requests response
:rtype: requests.models.Response
"""
endpoint = f"{self.root_endpoint}{rest_path}"
request = getattr(requests, method)
response = None
for attempt in range(1, self.attempts + 1):
response = request(
url=endpoint,
json=dnsentry,
headers=self.headers,
timeout=self.connection_timeout,
)
if response.status_code != 409:
response.raise_for_status()
logger.debug(f"API request returned {response.status_code}")
return response
logger.debug(
(
f"API request returned {response.status_code}: "
f"{response.text}, atttempt {attempt} of {self.attempts}"
)
)
sleep(self.retry_delay)
# raises requests.exceptions.HTTPError
response.raise_for_status()
def domains(self) -> list:
"""Get a listing of all available domains.
[extended_summary]
:return: List of available domains
:rtype: list
"""
return self.execute_dns_entry("get", "/domains", None)
def get_dns_entry(self, dns_zone_name: str) -> Dict:
"""Get a listing of the respective domain."""
response = self.execute_dns_entry(
"get", rest_path=f"/domains/{dns_zone_name}/dns", dnsentry=None
)
return response
def post_dns_entry(self, dns_record: DnsRecord):
"""Add a dnsEntry to the respective domain."""
return self.execute_dns_entry(
"post",
rest_path=f"/domains/{dns_record.zone}/dns",
dnsentry=dns_record.dnsentry,
)
def patch_dns_entry(self, dns_record: DnsRecord):
"""Adjust a record in the respective domain."""
return self.execute_dns_entry(
"patch",
rest_path=f"/domains/{dns_record.zone}/dns",
dnsentry=dns_record.dnsentry,
)
def delete_dns_entry(self, dns_record: DnsRecord):
"""Delete an entry in the respective domain."""
return self.execute_dns_entry(
"delete",
rest_path=f"/domains/{dns_record.zone}/dns",
dnsentry=dns_record.dnsentry,
)
|
extract_valid_cpu_usage_data
|
This method it to extract the valid cpu usage data according to the poll_interval
1. Find the index for the max one for every poll interval,
2. Discard the data if the index is on the edge(0 o the length of program_to_check_cpu_usage -1)
3. If the index is closed in the neighbour interval, only keep the former one
4. Return all indexes
For example:
poll_interval = 10
7, 1, 0, 1, 0, 1, 5, 1, 1,2, 0, 1, 0, 1, 0, 6, 1, 1, 1,2
return [15]
0, 1, 0, 1, 0, 1, 0, 1, 0, 8, 7, 1, 0, 1, 0, 6, 1, 1, 1,2
return [9]
|
import logging
import pytest
from collections import namedtuple, Counter
from tests.platform_tests.counterpoll.cpu_memory_helper import restore_counter_poll # lgtm [py/unused-import]
from tests.platform_tests.counterpoll.cpu_memory_helper import counterpoll_type # lgtm [py/unused-import]
from tests.platform_tests.counterpoll.counterpoll_helper import ConterpollHelper
from tests.platform_tests.counterpoll.counterpoll_constants import CounterpollConstants
from tests.common.mellanox_data import is_mellanox_device
pytestmark = [
pytest.mark.topology('any'),
pytest.mark.device_type('physical'),
]
def is_asan_image(duthosts, enum_rand_one_per_hwsku_hostname):
duthost = duthosts[enum_rand_one_per_hwsku_hostname]
asan_val_from_sonic_ver_cmd = "sonic-cfggen -y /etc/sonic/sonic_version.yml -v asan"
asan_val = duthost.command(asan_val_from_sonic_ver_cmd)['stdout']
is_asan = False
if asan_val == "yes":
logging.info("The current sonic image is a ASAN image")
is_asan = True
return is_asan
@pytest.fixture(scope='module')
def setup_thresholds(duthosts, enum_rand_one_per_hwsku_hostname):
duthost = duthosts[enum_rand_one_per_hwsku_hostname]
cpu_threshold = 50
memory_threshold = 60
high_cpu_consume_procs = {}
is_asan = is_asan_image(duthosts, enum_rand_one_per_hwsku_hostname)
if duthost.facts['platform'] in ('x86_64-arista_7050_qx32', 'x86_64-kvm_x86_64-r0') or is_asan:
memory_threshold = 90
if duthost.facts['platform'] in ('x86_64-arista_7260cx3_64'):
high_cpu_consume_procs['syncd'] = 80
# The CPU usage of `sx_sdk` on mellanox is expected to be higher, and the actual CPU usage
# is correlated with the number of ports. So we ignore the check of CPU for sx_sdk
if duthost.facts["asic_type"] == 'mellanox':
high_cpu_consume_procs['sx_sdk'] = 90
return memory_threshold, cpu_threshold, high_cpu_consume_procs
def test_cpu_memory_usage(duthosts, enum_rand_one_per_hwsku_hostname, setup_thresholds):
"""Check DUT memory usage and process cpu usage are within threshold."""
duthost = duthosts[enum_rand_one_per_hwsku_hostname]
MonitResult = namedtuple('MonitResult', ['processes', 'memory'])
monit_results = duthost.monit_process(iterations=24)['monit_results']
memory_threshold, normal_cpu_threshold, high_cpu_consume_procs = setup_thresholds
persist_threshold = 8
outstanding_mem_polls = {}
outstanding_procs = {}
outstanding_procs_counter = Counter()
for i, monit_result in enumerate(MonitResult(*_) for _ in monit_results):
logging.debug("------ Iteration %d ------", i)
check_memory(i, memory_threshold, monit_result, outstanding_mem_polls)
for proc in monit_result.processes:
cpu_threshold = normal_cpu_threshold
if proc['name'] in high_cpu_consume_procs:
cpu_threshold = high_cpu_consume_procs[proc['name']]
check_cpu_usage(cpu_threshold, outstanding_procs, outstanding_procs_counter, proc)
analyse_monitoring_results(cpu_threshold, memory_threshold, outstanding_mem_polls, outstanding_procs,
outstanding_procs_counter, persist_threshold)
def analyse_monitoring_results(cpu_threshold, memory_threshold, outstanding_mem_polls, outstanding_procs,
outstanding_procs_counter, persist_threshold):
persist_outstanding_procs = []
for pid, freq in outstanding_procs_counter.most_common():
if freq <= persist_threshold:
break
persist_outstanding_procs.append(pid)
if outstanding_mem_polls or persist_outstanding_procs:
if outstanding_mem_polls:
logging.error("system memory usage exceeds %d%%", memory_threshold)
if persist_outstanding_procs:
logging.error(
"processes that persistently exceeds cpu usage %d%%: %s",
cpu_threshold,
[outstanding_procs[p] for p in persist_outstanding_procs]
)
pytest.fail("system cpu and memory usage check fails")
@pytest.fixture(scope='module')
def counterpoll_cpu_threshold(duthosts, request):
counterpoll_cpu_usage_threshold = {"port-buffer-drop": request.config.getoption("--port_buffer_drop_cpu_usage_threshold")}
return counterpoll_cpu_usage_threshold
def test_cpu_memory_usage_counterpoll(duthosts, enum_rand_one_per_hwsku_hostname,
setup_thresholds, restore_counter_poll, counterpoll_type, counterpoll_cpu_threshold):
"""Check DUT memory usage and process cpu usage are within threshold.
Disable all counterpoll types except tested one
Collect memory and CPUs usage for 60 secs
Compare the memory usage with the memory threshold
Compare the average cpu usage with the cpu threshold for the specified progress
Restore counterpolls status
"""
duthost = duthosts[enum_rand_one_per_hwsku_hostname]
program_to_check = get_manufacturer_program_to_check(duthost)
if program_to_check is None:
pytest.skip("Skip no program is offered to check")
memory_threshold, _, _ = setup_thresholds
counterpoll_cpu_usage_threshold = counterpoll_cpu_threshold[counterpoll_type]
MonitResult = namedtuple('MonitResult', ['processes', 'memory'])
disable_all_counterpoll_type_except_tested(duthost, counterpoll_type)
monit_results = duthost.monit_process(iterations=60, delay_interval=1)['monit_results']
poll_interval = CounterpollConstants.COUNTERPOLL_INTERVAL[counterpoll_type] // 1000
outstanding_mem_polls = {}
outstanding_procs = {}
outstanding_procs_counter = Counter()
cpu_usage_program_to_check = []
prepare_ram_cpu_usage_results(MonitResult, counterpoll_cpu_usage_threshold, memory_threshold, monit_results, outstanding_mem_polls,
outstanding_procs, outstanding_procs_counter, program_to_check,
cpu_usage_program_to_check)
log_cpu_usage_by_vendor(cpu_usage_program_to_check, counterpoll_type)
cpu_usage_average = caculate_cpu_usge_average_value(extract_valid_cpu_usage_data(cpu_usage_program_to_check, poll_interval), cpu_usage_program_to_check)
logging.info("Average cpu_usage is {}".format(cpu_usage_average))
assert cpu_usage_average < counterpoll_cpu_usage_threshold, "cpu_usage_average of {} exceeds the cpu threshold:{}".format(program_to_check, counterpoll_cpu_usage_threshold)
assert not outstanding_mem_polls, " Memory {} exceeds the memory threshold {} ".format(outstanding_mem_polls, memory_threshold)
def log_cpu_usage_by_vendor(cpu_usage_program_to_check, counterpoll_type):
if cpu_usage_program_to_check:
logging.info('CPU usage for counterpoll type {} : {}'.format(counterpoll_type, cpu_usage_program_to_check))
def get_manufacturer_program_to_check(duthost):
if is_mellanox_device(duthost):
return CounterpollConstants.SX_SDK
def prepare_ram_cpu_usage_results(MonitResult, cpu_threshold, memory_threshold, monit_results, outstanding_mem_polls,
outstanding_procs, outstanding_procs_counter, program_to_check,
program_to_check_cpu_usage):
for i, monit_result in enumerate(MonitResult(*_) for _ in monit_results):
logging.debug("------ Iteration %d ------", i)
check_memory(i, memory_threshold, monit_result, outstanding_mem_polls)
for proc in monit_result.processes:
update_cpu_usage_desired_program(proc, program_to_check, program_to_check_cpu_usage)
# MASKED: extract_valid_cpu_usage_data function (lines 156-192)
def caculate_cpu_usge_average_value(valid_cpu_usage_center_index_list, program_to_check_cpu_usage):
len_valid_cpu_usage = len(valid_cpu_usage_center_index_list)
cpu_usage_average = 0.0
for i in valid_cpu_usage_center_index_list:
cpu_usage_average += sum(program_to_check_cpu_usage[i - 1: i + 2])
logging.info("cpu usage center index:{}: cpu usage:{}".format(i, program_to_check_cpu_usage[i - 1:i + 2]))
return cpu_usage_average / len_valid_cpu_usage / 3.0 if len_valid_cpu_usage != 0 else 0
def check_cpu_usage(cpu_threshold, outstanding_procs, outstanding_procs_counter, proc):
if proc['cpu_percent'] >= cpu_threshold:
logging.debug("process %s(%d) cpu usage exceeds %d%%.",
proc['name'], proc['pid'], cpu_threshold)
outstanding_procs[proc['pid']] = proc.get('cmdline', proc['name'])
outstanding_procs_counter[proc['pid']] += 1
def update_cpu_usage_desired_program(proc, program_to_check, program_to_check_cpu_usage):
if program_to_check:
if proc['name'] == program_to_check:
program_to_check_cpu_usage.append(proc['cpu_percent'])
def check_memory(i, memory_threshold, monit_result, outstanding_mem_polls):
if monit_result.memory['used_percent'] > memory_threshold:
logging.debug("system memory usage exceeds %d%%: %s",
memory_threshold, monit_result.memory)
outstanding_mem_polls[i] = monit_result.memory
def disable_all_counterpoll_type_except_tested(duthost, counterpoll_type):
available_types = ConterpollHelper.get_available_counterpoll_types(duthost)
available_types.remove(counterpoll_type)
ConterpollHelper.disable_counterpoll(duthost, available_types)
|
def extract_valid_cpu_usage_data(program_to_check_cpu_usage, poll_interval):
"""
This method it to extract the valid cpu usage data according to the poll_interval
1. Find the index for the max one for every poll interval,
2. Discard the data if the index is on the edge(0 o the length of program_to_check_cpu_usage -1)
3. If the index is closed in the neighbour interval, only keep the former one
4. Return all indexes
For example:
poll_interval = 10
7, 1, 0, 1, 0, 1, 5, 1, 1,2, 0, 1, 0, 1, 0, 6, 1, 1, 1,2
return [15]
0, 1, 0, 1, 0, 1, 0, 1, 0, 8, 7, 1, 0, 1, 0, 6, 1, 1, 1,2
return [9]
"""
valid_cpu_usage_center_index_list = []
poll_number = len(program_to_check_cpu_usage) // poll_interval
def find_max_cpu_usage(cpu_usage_list, poll_times):
max_cpu_usage = cpu_usage_list[0]
max_cpu_usage_index = 0
for i, cpu_usage in enumerate(cpu_usage_list):
if cpu_usage > max_cpu_usage:
max_cpu_usage = cpu_usage
max_cpu_usage_index = i
return [max_cpu_usage, max_cpu_usage_index + poll_times * poll_interval]
for i in range(0, poll_number):
max_cpu_usage, max_cpu_usage_index = find_max_cpu_usage(
program_to_check_cpu_usage[poll_interval * i:poll_interval * (i + 1)], i)
if max_cpu_usage_index == 0 or max_cpu_usage_index == len(program_to_check_cpu_usage) - 1:
logging.info("The data is on the edge:{}, discard it ".format(max_cpu_usage_index))
else:
if valid_cpu_usage_center_index_list and valid_cpu_usage_center_index_list[-1] + 1 == max_cpu_usage_index:
continue
valid_cpu_usage_center_index_list.append(max_cpu_usage_index)
return valid_cpu_usage_center_index_list
| 156 | 192 |
import logging
import pytest
from collections import namedtuple, Counter
from tests.platform_tests.counterpoll.cpu_memory_helper import restore_counter_poll # lgtm [py/unused-import]
from tests.platform_tests.counterpoll.cpu_memory_helper import counterpoll_type # lgtm [py/unused-import]
from tests.platform_tests.counterpoll.counterpoll_helper import ConterpollHelper
from tests.platform_tests.counterpoll.counterpoll_constants import CounterpollConstants
from tests.common.mellanox_data import is_mellanox_device
pytestmark = [
pytest.mark.topology('any'),
pytest.mark.device_type('physical'),
]
def is_asan_image(duthosts, enum_rand_one_per_hwsku_hostname):
duthost = duthosts[enum_rand_one_per_hwsku_hostname]
asan_val_from_sonic_ver_cmd = "sonic-cfggen -y /etc/sonic/sonic_version.yml -v asan"
asan_val = duthost.command(asan_val_from_sonic_ver_cmd)['stdout']
is_asan = False
if asan_val == "yes":
logging.info("The current sonic image is a ASAN image")
is_asan = True
return is_asan
@pytest.fixture(scope='module')
def setup_thresholds(duthosts, enum_rand_one_per_hwsku_hostname):
duthost = duthosts[enum_rand_one_per_hwsku_hostname]
cpu_threshold = 50
memory_threshold = 60
high_cpu_consume_procs = {}
is_asan = is_asan_image(duthosts, enum_rand_one_per_hwsku_hostname)
if duthost.facts['platform'] in ('x86_64-arista_7050_qx32', 'x86_64-kvm_x86_64-r0') or is_asan:
memory_threshold = 90
if duthost.facts['platform'] in ('x86_64-arista_7260cx3_64'):
high_cpu_consume_procs['syncd'] = 80
# The CPU usage of `sx_sdk` on mellanox is expected to be higher, and the actual CPU usage
# is correlated with the number of ports. So we ignore the check of CPU for sx_sdk
if duthost.facts["asic_type"] == 'mellanox':
high_cpu_consume_procs['sx_sdk'] = 90
return memory_threshold, cpu_threshold, high_cpu_consume_procs
def test_cpu_memory_usage(duthosts, enum_rand_one_per_hwsku_hostname, setup_thresholds):
"""Check DUT memory usage and process cpu usage are within threshold."""
duthost = duthosts[enum_rand_one_per_hwsku_hostname]
MonitResult = namedtuple('MonitResult', ['processes', 'memory'])
monit_results = duthost.monit_process(iterations=24)['monit_results']
memory_threshold, normal_cpu_threshold, high_cpu_consume_procs = setup_thresholds
persist_threshold = 8
outstanding_mem_polls = {}
outstanding_procs = {}
outstanding_procs_counter = Counter()
for i, monit_result in enumerate(MonitResult(*_) for _ in monit_results):
logging.debug("------ Iteration %d ------", i)
check_memory(i, memory_threshold, monit_result, outstanding_mem_polls)
for proc in monit_result.processes:
cpu_threshold = normal_cpu_threshold
if proc['name'] in high_cpu_consume_procs:
cpu_threshold = high_cpu_consume_procs[proc['name']]
check_cpu_usage(cpu_threshold, outstanding_procs, outstanding_procs_counter, proc)
analyse_monitoring_results(cpu_threshold, memory_threshold, outstanding_mem_polls, outstanding_procs,
outstanding_procs_counter, persist_threshold)
def analyse_monitoring_results(cpu_threshold, memory_threshold, outstanding_mem_polls, outstanding_procs,
outstanding_procs_counter, persist_threshold):
persist_outstanding_procs = []
for pid, freq in outstanding_procs_counter.most_common():
if freq <= persist_threshold:
break
persist_outstanding_procs.append(pid)
if outstanding_mem_polls or persist_outstanding_procs:
if outstanding_mem_polls:
logging.error("system memory usage exceeds %d%%", memory_threshold)
if persist_outstanding_procs:
logging.error(
"processes that persistently exceeds cpu usage %d%%: %s",
cpu_threshold,
[outstanding_procs[p] for p in persist_outstanding_procs]
)
pytest.fail("system cpu and memory usage check fails")
@pytest.fixture(scope='module')
def counterpoll_cpu_threshold(duthosts, request):
counterpoll_cpu_usage_threshold = {"port-buffer-drop": request.config.getoption("--port_buffer_drop_cpu_usage_threshold")}
return counterpoll_cpu_usage_threshold
def test_cpu_memory_usage_counterpoll(duthosts, enum_rand_one_per_hwsku_hostname,
setup_thresholds, restore_counter_poll, counterpoll_type, counterpoll_cpu_threshold):
"""Check DUT memory usage and process cpu usage are within threshold.
Disable all counterpoll types except tested one
Collect memory and CPUs usage for 60 secs
Compare the memory usage with the memory threshold
Compare the average cpu usage with the cpu threshold for the specified progress
Restore counterpolls status
"""
duthost = duthosts[enum_rand_one_per_hwsku_hostname]
program_to_check = get_manufacturer_program_to_check(duthost)
if program_to_check is None:
pytest.skip("Skip no program is offered to check")
memory_threshold, _, _ = setup_thresholds
counterpoll_cpu_usage_threshold = counterpoll_cpu_threshold[counterpoll_type]
MonitResult = namedtuple('MonitResult', ['processes', 'memory'])
disable_all_counterpoll_type_except_tested(duthost, counterpoll_type)
monit_results = duthost.monit_process(iterations=60, delay_interval=1)['monit_results']
poll_interval = CounterpollConstants.COUNTERPOLL_INTERVAL[counterpoll_type] // 1000
outstanding_mem_polls = {}
outstanding_procs = {}
outstanding_procs_counter = Counter()
cpu_usage_program_to_check = []
prepare_ram_cpu_usage_results(MonitResult, counterpoll_cpu_usage_threshold, memory_threshold, monit_results, outstanding_mem_polls,
outstanding_procs, outstanding_procs_counter, program_to_check,
cpu_usage_program_to_check)
log_cpu_usage_by_vendor(cpu_usage_program_to_check, counterpoll_type)
cpu_usage_average = caculate_cpu_usge_average_value(extract_valid_cpu_usage_data(cpu_usage_program_to_check, poll_interval), cpu_usage_program_to_check)
logging.info("Average cpu_usage is {}".format(cpu_usage_average))
assert cpu_usage_average < counterpoll_cpu_usage_threshold, "cpu_usage_average of {} exceeds the cpu threshold:{}".format(program_to_check, counterpoll_cpu_usage_threshold)
assert not outstanding_mem_polls, " Memory {} exceeds the memory threshold {} ".format(outstanding_mem_polls, memory_threshold)
def log_cpu_usage_by_vendor(cpu_usage_program_to_check, counterpoll_type):
if cpu_usage_program_to_check:
logging.info('CPU usage for counterpoll type {} : {}'.format(counterpoll_type, cpu_usage_program_to_check))
def get_manufacturer_program_to_check(duthost):
if is_mellanox_device(duthost):
return CounterpollConstants.SX_SDK
def prepare_ram_cpu_usage_results(MonitResult, cpu_threshold, memory_threshold, monit_results, outstanding_mem_polls,
outstanding_procs, outstanding_procs_counter, program_to_check,
program_to_check_cpu_usage):
for i, monit_result in enumerate(MonitResult(*_) for _ in monit_results):
logging.debug("------ Iteration %d ------", i)
check_memory(i, memory_threshold, monit_result, outstanding_mem_polls)
for proc in monit_result.processes:
update_cpu_usage_desired_program(proc, program_to_check, program_to_check_cpu_usage)
def extract_valid_cpu_usage_data(program_to_check_cpu_usage, poll_interval):
"""
This method it to extract the valid cpu usage data according to the poll_interval
1. Find the index for the max one for every poll interval,
2. Discard the data if the index is on the edge(0 o the length of program_to_check_cpu_usage -1)
3. If the index is closed in the neighbour interval, only keep the former one
4. Return all indexes
For example:
poll_interval = 10
7, 1, 0, 1, 0, 1, 5, 1, 1,2, 0, 1, 0, 1, 0, 6, 1, 1, 1,2
return [15]
0, 1, 0, 1, 0, 1, 0, 1, 0, 8, 7, 1, 0, 1, 0, 6, 1, 1, 1,2
return [9]
"""
valid_cpu_usage_center_index_list = []
poll_number = len(program_to_check_cpu_usage) // poll_interval
def find_max_cpu_usage(cpu_usage_list, poll_times):
max_cpu_usage = cpu_usage_list[0]
max_cpu_usage_index = 0
for i, cpu_usage in enumerate(cpu_usage_list):
if cpu_usage > max_cpu_usage:
max_cpu_usage = cpu_usage
max_cpu_usage_index = i
return [max_cpu_usage, max_cpu_usage_index + poll_times * poll_interval]
for i in range(0, poll_number):
max_cpu_usage, max_cpu_usage_index = find_max_cpu_usage(
program_to_check_cpu_usage[poll_interval * i:poll_interval * (i + 1)], i)
if max_cpu_usage_index == 0 or max_cpu_usage_index == len(program_to_check_cpu_usage) - 1:
logging.info("The data is on the edge:{}, discard it ".format(max_cpu_usage_index))
else:
if valid_cpu_usage_center_index_list and valid_cpu_usage_center_index_list[-1] + 1 == max_cpu_usage_index:
continue
valid_cpu_usage_center_index_list.append(max_cpu_usage_index)
return valid_cpu_usage_center_index_list
def caculate_cpu_usge_average_value(valid_cpu_usage_center_index_list, program_to_check_cpu_usage):
len_valid_cpu_usage = len(valid_cpu_usage_center_index_list)
cpu_usage_average = 0.0
for i in valid_cpu_usage_center_index_list:
cpu_usage_average += sum(program_to_check_cpu_usage[i - 1: i + 2])
logging.info("cpu usage center index:{}: cpu usage:{}".format(i, program_to_check_cpu_usage[i - 1:i + 2]))
return cpu_usage_average / len_valid_cpu_usage / 3.0 if len_valid_cpu_usage != 0 else 0
def check_cpu_usage(cpu_threshold, outstanding_procs, outstanding_procs_counter, proc):
if proc['cpu_percent'] >= cpu_threshold:
logging.debug("process %s(%d) cpu usage exceeds %d%%.",
proc['name'], proc['pid'], cpu_threshold)
outstanding_procs[proc['pid']] = proc.get('cmdline', proc['name'])
outstanding_procs_counter[proc['pid']] += 1
def update_cpu_usage_desired_program(proc, program_to_check, program_to_check_cpu_usage):
if program_to_check:
if proc['name'] == program_to_check:
program_to_check_cpu_usage.append(proc['cpu_percent'])
def check_memory(i, memory_threshold, monit_result, outstanding_mem_polls):
if monit_result.memory['used_percent'] > memory_threshold:
logging.debug("system memory usage exceeds %d%%: %s",
memory_threshold, monit_result.memory)
outstanding_mem_polls[i] = monit_result.memory
def disable_all_counterpoll_type_except_tested(duthost, counterpoll_type):
available_types = ConterpollHelper.get_available_counterpoll_types(duthost)
available_types.remove(counterpoll_type)
ConterpollHelper.disable_counterpoll(duthost, available_types)
|
reset_module
|
reset all local vars
Args:
None
Returns:
None
|
# blender imports
import bpy
# utility imports
import numpy as np
import csv
import random
import importlib
from src.TSSBase import TSSBase
class TSSMeshHandle(TSSBase):
"""docstring for TSSMeshHandle"""
def __init__(self):
super(TSSMeshHandle, self).__init__()
# class vars ###################################################################################################
self._mesh_list = [] # list of mesh [list]
self._mesh_obj_list = [] # list of mesh nodes [list]
############################################################################################ end of class vars #
# MASKED: reset_module function (lines 22-41)
def activate_pass(self,pass_name, pass_cfg, keyframe=-1):
""" enables specific pass
Args:
pass_name: name of pass to activate [string]
pass_cfg: specific parameters for the pass [dict]
keyframe: current frame number; if value > -1, this should enable also the setting of a keyframe [int]
Returns:
None
"""
for mesh in self._mesh_obj_list:
mesh.activate_pass(pass_name=pass_name,pass_cfg=pass_cfg,keyframe=keyframe)
def create(self,stage_dict):
""" create function
Args:
stage_dict: dict of stages [dict]
Returns:
None
"""
self._create_meshes(cfg=self._cfg["MESHES"],
general_cfg=self._cfg["GENERAL"],
stage_dict=stage_dict)
def _create_meshes(self,cfg,general_cfg,stage_dict):
""" create function
Args:
cfg: list of mesh cfgs [list]
general_cfg: general cfg [dict]
stage_dict: dict of stages [dict]
Returns:
success code [boolean]
"""
_current_instance_label_count = 0
for ii, mesh in enumerate(cfg):
try:
# import module and create class #######################################################################
_module_name = "src.assets.meshes." + mesh["type"]
_module = importlib.import_module(_module_name)
_class = getattr(_module, mesh["type"])
_mesh = _class()
################################################################ end of import module and create class #
# set pass params and create pass ######################################################################
# set general cfg
_mesh.set_general_cfg(cfg=general_cfg)
_mesh.set_stage_dict(stage_dict=stage_dict)
# save name of material
mesh['meshParams']['name'] = mesh["name"]
# update mesh cfg
_mesh.update_cfg(cfg=mesh["meshParams"])
# create material
_instance_count, _instance_label_count = _mesh.create(instance_id_offset=_current_instance_label_count)
_current_instance_label_count += _instance_label_count
############################################################### end of set pass params and create pass #
# add pass to list
self._mesh_obj_list.append(_mesh)
self._mesh_list.append(_mesh.get_meshes())
except ImportError:
# manage import error
raise Exception("Cannot add mesh")
return -1
return 0
def get_meshes(self):
""" get all meshes
Args:
None
Returns:
list of meshes [list]
"""
return self._mesh_list
def get_mesh_objs(self):
""" get all mesh objects
Args:
None
Returns:
list of mesh objects [list]
"""
return self._mesh_obj_list
|
def reset_module(self):
""" reset all local vars
Args:
None
Returns:
None
"""
# reset all mesh ############################################################################################
for mesh in self._mesh_obj_list:
# reset mesh
mesh.reset_module()
# maybe obsolete in future versions
del mesh
##################################################################################### end of reset all mesh #
self.reset_base()
self._mesh_list = []
self._mesh_obj_list = []
| 22 | 41 |
# blender imports
import bpy
# utility imports
import numpy as np
import csv
import random
import importlib
from src.TSSBase import TSSBase
class TSSMeshHandle(TSSBase):
"""docstring for TSSMeshHandle"""
def __init__(self):
super(TSSMeshHandle, self).__init__()
# class vars ###################################################################################################
self._mesh_list = [] # list of mesh [list]
self._mesh_obj_list = [] # list of mesh nodes [list]
############################################################################################ end of class vars #
def reset_module(self):
""" reset all local vars
Args:
None
Returns:
None
"""
# reset all mesh ############################################################################################
for mesh in self._mesh_obj_list:
# reset mesh
mesh.reset_module()
# maybe obsolete in future versions
del mesh
##################################################################################### end of reset all mesh #
self.reset_base()
self._mesh_list = []
self._mesh_obj_list = []
def activate_pass(self,pass_name, pass_cfg, keyframe=-1):
""" enables specific pass
Args:
pass_name: name of pass to activate [string]
pass_cfg: specific parameters for the pass [dict]
keyframe: current frame number; if value > -1, this should enable also the setting of a keyframe [int]
Returns:
None
"""
for mesh in self._mesh_obj_list:
mesh.activate_pass(pass_name=pass_name,pass_cfg=pass_cfg,keyframe=keyframe)
def create(self,stage_dict):
""" create function
Args:
stage_dict: dict of stages [dict]
Returns:
None
"""
self._create_meshes(cfg=self._cfg["MESHES"],
general_cfg=self._cfg["GENERAL"],
stage_dict=stage_dict)
def _create_meshes(self,cfg,general_cfg,stage_dict):
""" create function
Args:
cfg: list of mesh cfgs [list]
general_cfg: general cfg [dict]
stage_dict: dict of stages [dict]
Returns:
success code [boolean]
"""
_current_instance_label_count = 0
for ii, mesh in enumerate(cfg):
try:
# import module and create class #######################################################################
_module_name = "src.assets.meshes." + mesh["type"]
_module = importlib.import_module(_module_name)
_class = getattr(_module, mesh["type"])
_mesh = _class()
################################################################ end of import module and create class #
# set pass params and create pass ######################################################################
# set general cfg
_mesh.set_general_cfg(cfg=general_cfg)
_mesh.set_stage_dict(stage_dict=stage_dict)
# save name of material
mesh['meshParams']['name'] = mesh["name"]
# update mesh cfg
_mesh.update_cfg(cfg=mesh["meshParams"])
# create material
_instance_count, _instance_label_count = _mesh.create(instance_id_offset=_current_instance_label_count)
_current_instance_label_count += _instance_label_count
############################################################### end of set pass params and create pass #
# add pass to list
self._mesh_obj_list.append(_mesh)
self._mesh_list.append(_mesh.get_meshes())
except ImportError:
# manage import error
raise Exception("Cannot add mesh")
return -1
return 0
def get_meshes(self):
""" get all meshes
Args:
None
Returns:
list of meshes [list]
"""
return self._mesh_list
def get_mesh_objs(self):
""" get all mesh objects
Args:
None
Returns:
list of mesh objects [list]
"""
return self._mesh_obj_list
|
_create_meshes
|
create function
Args:
cfg: list of mesh cfgs [list]
general_cfg: general cfg [dict]
stage_dict: dict of stages [dict]
Returns:
success code [boolean]
|
# blender imports
import bpy
# utility imports
import numpy as np
import csv
import random
import importlib
from src.TSSBase import TSSBase
class TSSMeshHandle(TSSBase):
"""docstring for TSSMeshHandle"""
def __init__(self):
super(TSSMeshHandle, self).__init__()
# class vars ###################################################################################################
self._mesh_list = [] # list of mesh [list]
self._mesh_obj_list = [] # list of mesh nodes [list]
############################################################################################ end of class vars #
def reset_module(self):
""" reset all local vars
Args:
None
Returns:
None
"""
# reset all mesh ############################################################################################
for mesh in self._mesh_obj_list:
# reset mesh
mesh.reset_module()
# maybe obsolete in future versions
del mesh
##################################################################################### end of reset all mesh #
self.reset_base()
self._mesh_list = []
self._mesh_obj_list = []
def activate_pass(self,pass_name, pass_cfg, keyframe=-1):
""" enables specific pass
Args:
pass_name: name of pass to activate [string]
pass_cfg: specific parameters for the pass [dict]
keyframe: current frame number; if value > -1, this should enable also the setting of a keyframe [int]
Returns:
None
"""
for mesh in self._mesh_obj_list:
mesh.activate_pass(pass_name=pass_name,pass_cfg=pass_cfg,keyframe=keyframe)
def create(self,stage_dict):
""" create function
Args:
stage_dict: dict of stages [dict]
Returns:
None
"""
self._create_meshes(cfg=self._cfg["MESHES"],
general_cfg=self._cfg["GENERAL"],
stage_dict=stage_dict)
# MASKED: _create_meshes function (lines 71-122)
def get_meshes(self):
""" get all meshes
Args:
None
Returns:
list of meshes [list]
"""
return self._mesh_list
def get_mesh_objs(self):
""" get all mesh objects
Args:
None
Returns:
list of mesh objects [list]
"""
return self._mesh_obj_list
|
def _create_meshes(self,cfg,general_cfg,stage_dict):
""" create function
Args:
cfg: list of mesh cfgs [list]
general_cfg: general cfg [dict]
stage_dict: dict of stages [dict]
Returns:
success code [boolean]
"""
_current_instance_label_count = 0
for ii, mesh in enumerate(cfg):
try:
# import module and create class #######################################################################
_module_name = "src.assets.meshes." + mesh["type"]
_module = importlib.import_module(_module_name)
_class = getattr(_module, mesh["type"])
_mesh = _class()
################################################################ end of import module and create class #
# set pass params and create pass ######################################################################
# set general cfg
_mesh.set_general_cfg(cfg=general_cfg)
_mesh.set_stage_dict(stage_dict=stage_dict)
# save name of material
mesh['meshParams']['name'] = mesh["name"]
# update mesh cfg
_mesh.update_cfg(cfg=mesh["meshParams"])
# create material
_instance_count, _instance_label_count = _mesh.create(instance_id_offset=_current_instance_label_count)
_current_instance_label_count += _instance_label_count
############################################################### end of set pass params and create pass #
# add pass to list
self._mesh_obj_list.append(_mesh)
self._mesh_list.append(_mesh.get_meshes())
except ImportError:
# manage import error
raise Exception("Cannot add mesh")
return -1
return 0
| 71 | 122 |
# blender imports
import bpy
# utility imports
import numpy as np
import csv
import random
import importlib
from src.TSSBase import TSSBase
class TSSMeshHandle(TSSBase):
"""docstring for TSSMeshHandle"""
def __init__(self):
super(TSSMeshHandle, self).__init__()
# class vars ###################################################################################################
self._mesh_list = [] # list of mesh [list]
self._mesh_obj_list = [] # list of mesh nodes [list]
############################################################################################ end of class vars #
def reset_module(self):
""" reset all local vars
Args:
None
Returns:
None
"""
# reset all mesh ############################################################################################
for mesh in self._mesh_obj_list:
# reset mesh
mesh.reset_module()
# maybe obsolete in future versions
del mesh
##################################################################################### end of reset all mesh #
self.reset_base()
self._mesh_list = []
self._mesh_obj_list = []
def activate_pass(self,pass_name, pass_cfg, keyframe=-1):
""" enables specific pass
Args:
pass_name: name of pass to activate [string]
pass_cfg: specific parameters for the pass [dict]
keyframe: current frame number; if value > -1, this should enable also the setting of a keyframe [int]
Returns:
None
"""
for mesh in self._mesh_obj_list:
mesh.activate_pass(pass_name=pass_name,pass_cfg=pass_cfg,keyframe=keyframe)
def create(self,stage_dict):
""" create function
Args:
stage_dict: dict of stages [dict]
Returns:
None
"""
self._create_meshes(cfg=self._cfg["MESHES"],
general_cfg=self._cfg["GENERAL"],
stage_dict=stage_dict)
def _create_meshes(self,cfg,general_cfg,stage_dict):
""" create function
Args:
cfg: list of mesh cfgs [list]
general_cfg: general cfg [dict]
stage_dict: dict of stages [dict]
Returns:
success code [boolean]
"""
_current_instance_label_count = 0
for ii, mesh in enumerate(cfg):
try:
# import module and create class #######################################################################
_module_name = "src.assets.meshes." + mesh["type"]
_module = importlib.import_module(_module_name)
_class = getattr(_module, mesh["type"])
_mesh = _class()
################################################################ end of import module and create class #
# set pass params and create pass ######################################################################
# set general cfg
_mesh.set_general_cfg(cfg=general_cfg)
_mesh.set_stage_dict(stage_dict=stage_dict)
# save name of material
mesh['meshParams']['name'] = mesh["name"]
# update mesh cfg
_mesh.update_cfg(cfg=mesh["meshParams"])
# create material
_instance_count, _instance_label_count = _mesh.create(instance_id_offset=_current_instance_label_count)
_current_instance_label_count += _instance_label_count
############################################################### end of set pass params and create pass #
# add pass to list
self._mesh_obj_list.append(_mesh)
self._mesh_list.append(_mesh.get_meshes())
except ImportError:
# manage import error
raise Exception("Cannot add mesh")
return -1
return 0
def get_meshes(self):
""" get all meshes
Args:
None
Returns:
list of meshes [list]
"""
return self._mesh_list
def get_mesh_objs(self):
""" get all mesh objects
Args:
None
Returns:
list of mesh objects [list]
"""
return self._mesh_obj_list
|
_set_voldb_empty_at_startup_indicator
|
Determine if the Cinder volume DB is empty.
A check of the volume DB is done to determine whether it is empty or
not at this point.
:param ctxt: our working context
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume manager manages creating, attaching, detaching, and persistent storage.
Persistent storage volumes keep their state independent of instances. You can
attach to an instance, terminate the instance, spawn a new instance (even
one from a different image) and re-attach the volume with the same data
intact.
**Related Flags**
:volume_topic: What :mod:`rpc` topic to listen to (default: `cinder-volume`).
:volume_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.volume.manager.Manager`).
:volume_driver: Used by :class:`Manager`. Defaults to
:class:`cinder.volume.drivers.lvm.LVMVolumeDriver`.
:volume_group: Name of the group that will contain exported volumes (default:
`cinder-volumes`)
:num_shell_tries: Number of times to attempt to run commands (default: 3)
"""
import requests
import time
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
profiler = importutils.try_import('osprofiler.profiler')
import six
from taskflow import exceptions as tfe
from cinder import compute
from cinder import context
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import cache as image_cache
from cinder.image import glance
from cinder import manager
from cinder.message import api as message_api
from cinder.message import defined_messages
from cinder.message import resource_types
from cinder import objects
from cinder.objects import fields
from cinder import quota
from cinder import utils
from cinder import volume as cinder_volume
from cinder.volume import configuration as config
from cinder.volume.flows.manager import create_volume
from cinder.volume.flows.manager import manage_existing
from cinder.volume.flows.manager import manage_existing_snapshot
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
CGQUOTAS = quota.CGQUOTAS
VALID_REMOVE_VOL_FROM_CG_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_ADD_VOL_TO_CG_STATUS = (
'available',
'in-use')
VALID_CREATE_CG_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
VALID_CREATE_CG_SRC_CG_STATUS = ('available',)
volume_manager_opts = [
cfg.StrOpt('volume_driver',
default='cinder.volume.drivers.lvm.LVMVolumeDriver',
help='Driver to use for volume creation'),
cfg.IntOpt('migration_create_volume_timeout_secs',
default=300,
help='Timeout for creating the volume to migrate to '
'when performing volume migration (seconds)'),
cfg.BoolOpt('volume_service_inithost_offload',
default=False,
help='Offload pending volume delete during '
'volume service startup'),
cfg.StrOpt('zoning_mode',
help='FC Zoning mode configured'),
cfg.StrOpt('extra_capabilities',
default='{}',
help='User defined capabilities, a JSON formatted string '
'specifying key/value pairs. The key/value pairs can '
'be used by the CapabilitiesFilter to select between '
'backends when requests specify volume types. For '
'example, specifying a service level or the geographical '
'location of a backend, then creating a volume type to '
'allow the user to select by these different '
'properties.'),
cfg.BoolOpt('suppress_requests_ssl_warnings',
default=False,
help='Suppress requests library SSL certificate warnings.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_manager_opts)
MAPPING = {
'cinder.volume.drivers.huawei.huawei_18000.Huawei18000ISCSIDriver':
'cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver',
'cinder.volume.drivers.huawei.huawei_driver.Huawei18000ISCSIDriver':
'cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver',
'cinder.volume.drivers.huawei.huawei_18000.Huawei18000FCDriver':
'cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver',
'cinder.volume.drivers.huawei.huawei_driver.Huawei18000FCDriver':
'cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver',
'cinder.volume.drivers.fujitsu_eternus_dx_fc.FJDXFCDriver':
'cinder.volume.drivers.fujitsu.eternus_dx_fc.FJDXFCDriver',
'cinder.volume.drivers.fujitsu_eternus_dx_iscsi.FJDXISCSIDriver':
'cinder.volume.drivers.fujitsu.eternus_dx_iscsi.FJDXISCSIDriver',
'cinder.volume.drivers.hds.nfs.HDSNFSDriver':
'cinder.volume.drivers.hitachi.hnas_nfs.HDSNFSDriver',
'cinder.volume.drivers.hds.iscsi.HDSISCSIDriver':
'cinder.volume.drivers.hitachi.hnas_iscsi.HDSISCSIDriver',
'cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver':
'cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver',
'cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver':
'cinder.volume.drivers.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver',
'cinder.volume.drivers.san.hp.hp_lefthand_iscsi.HPLeftHandISCSIDriver':
'cinder.volume.drivers.hpe.hpe_lefthand_iscsi.HPELeftHandISCSIDriver',
'cinder.volume.drivers.san.hp.hp_xp_fc.HPXPFCDriver':
'cinder.volume.drivers.hpe.hpe_xp_fc.HPEXPFCDriver',
}
def locked_volume_operation(f):
"""Lock decorator for volume operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the volume. This lock can then be used
by other operations to avoid operation conflicts on shared volumes.
Example use:
If a volume operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
volume e.g. delete VolA while create volume VolB from VolA is in progress.
"""
def lvo_inner1(inst, context, volume_id, **kwargs):
@utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True)
def lvo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lvo_inner2(inst, context, volume_id, **kwargs)
return lvo_inner1
def locked_detach_operation(f):
"""Lock decorator for volume detach operations.
Takes a named lock prior to executing the detach call. The lock is named
with the operation executed and the id of the volume. This lock can then
be used by other operations to avoid operation conflicts on shared volumes.
This locking mechanism is only for detach calls. We can't use the
locked_volume_operation, because detach requires an additional
attachment_id in the parameter list.
"""
def ldo_inner1(inst, context, volume_id, attachment_id=None, **kwargs):
@utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True)
def ldo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return ldo_inner2(inst, context, volume_id, attachment_id, **kwargs)
return ldo_inner1
def locked_snapshot_operation(f):
"""Lock decorator for snapshot operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the snapshot. This lock can then be
used by other operations to avoid operation conflicts on shared snapshots.
Example use:
If a snapshot operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
snapshot e.g. delete SnapA while create volume VolA from SnapA is in
progress.
"""
def lso_inner1(inst, context, snapshot, **kwargs):
@utils.synchronized("%s-%s" % (snapshot.id, f.__name__), external=True)
def lso_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lso_inner2(inst, context, snapshot, **kwargs)
return lso_inner1
class VolumeManager(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = '2.0'
target = messaging.Target(version=RPC_API_VERSION)
# On cloning a volume, we shouldn't copy volume_type, consistencygroup
# and volume_attachment, because the db sets that according to [field]_id,
# which we do copy. We also skip some other values that are set during
# creation of Volume object.
_VOLUME_CLONE_SKIP_PROPERTIES = {
'id', '_name_id', 'name_id', 'name', 'status',
'attach_status', 'migration_status', 'volume_type',
'consistencygroup', 'volume_attachment'}
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_name='volume',
*args, **kwargs)
self.configuration = config.Configuration(volume_manager_opts,
config_group=service_name)
self.stats = {}
if not volume_driver:
# Get from configuration, which will get the default
# if its not using the multi backend
volume_driver = self.configuration.volume_driver
if volume_driver in MAPPING:
LOG.warning(_LW("Driver path %s is deprecated, update your "
"configuration to the new path."), volume_driver)
volume_driver = MAPPING[volume_driver]
vol_db_empty = self._set_voldb_empty_at_startup_indicator(
context.get_admin_context())
LOG.debug("Cinder Volume DB check: vol_db_empty=%s", vol_db_empty)
# We pass the current setting for service.active_backend_id to
# the driver on init, incase there was a restart or something
curr_active_backend_id = None
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
service = objects.Service.get_by_args(
context.get_admin_context(),
svc_host,
'cinder-volume')
except exception.ServiceNotFound:
# NOTE(jdg): This is to solve problems with unit tests
LOG.info(_LI("Service not found for updating "
"active_backend_id, assuming default "
"for driver init."))
else:
curr_active_backend_id = service.active_backend_id
if self.configuration.suppress_requests_ssl_warnings:
LOG.warning(_LW("Suppressing requests library SSL Warnings"))
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecurePlatformWarning)
self.driver = importutils.import_object(
volume_driver,
configuration=self.configuration,
db=self.db,
host=self.host,
is_vol_db_empty=vol_db_empty,
active_backend_id=curr_active_backend_id)
self.message_api = message_api.API()
if CONF.profiler.enabled and profiler is not None:
self.driver = profiler.trace_cls("driver")(self.driver)
try:
self.extra_capabilities = jsonutils.loads(
self.driver.configuration.extra_capabilities)
except AttributeError:
self.extra_capabilities = {}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Invalid JSON: %s"),
self.driver.configuration.extra_capabilities)
if self.driver.configuration.safe_get(
'image_volume_cache_enabled'):
max_cache_size = self.driver.configuration.safe_get(
'image_volume_cache_max_size_gb')
max_cache_entries = self.driver.configuration.safe_get(
'image_volume_cache_max_count')
self.image_volume_cache = image_cache.ImageVolumeCache(
self.db,
cinder_volume.API(),
max_cache_size,
max_cache_entries
)
LOG.info(_LI('Image-volume cache enabled for host %(host)s.'),
{'host': self.host})
else:
LOG.info(_LI('Image-volume cache disabled for host %(host)s.'),
{'host': self.host})
self.image_volume_cache = None
def _count_allocated_capacity(self, ctxt, volume):
pool = vol_utils.extract_host(volume['host'], 'pool')
if pool is None:
# No pool name encoded in host, so this is a legacy
# volume created before pool is introduced, ask
# driver to provide pool info if it has such
# knowledge and update the DB.
try:
pool = self.driver.get_pool(volume)
except Exception:
LOG.exception(_LE('Fetch volume pool name failed.'),
resource=volume)
return
if pool:
new_host = vol_utils.append_host(volume['host'],
pool)
self.db.volume_update(ctxt, volume['id'],
{'host': new_host})
else:
# Otherwise, put them into a special fixed pool with
# volume_backend_name being the pool name, if
# volume_backend_name is None, use default pool name.
# This is only for counting purpose, doesn't update DB.
pool = (self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume['host'], 'pool', True))
try:
pool_stat = self.stats['pools'][pool]
except KeyError:
# First volume in the pool
self.stats['pools'][pool] = dict(
allocated_capacity_gb=0)
pool_stat = self.stats['pools'][pool]
pool_sum = pool_stat['allocated_capacity_gb']
pool_sum += volume['size']
self.stats['pools'][pool]['allocated_capacity_gb'] = pool_sum
self.stats['allocated_capacity_gb'] += volume['size']
# MASKED: _set_voldb_empty_at_startup_indicator function (lines 362-377)
def _sync_provider_info(self, ctxt, volumes, snapshots):
# NOTE(jdg): For now this just updates provider_id, we can add more
# add more items to the update if theyr'e releveant but we need
# to be safe in what we allow and add a list of allowed keys
# things that make sense are provider_*, replication_status etc
updates, snapshot_updates = self.driver.update_provider_info(
volumes, snapshots)
if updates:
for volume in volumes:
# NOTE(JDG): Make sure returned item is in this hosts volumes
update = (
[updt for updt in updates if updt['id'] ==
volume['id']][0])
if update:
self.db.volume_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
# NOTE(jdg): snapshots are slighty harder, because
# we do not have a host column and of course no get
# all by host, so we use a get_all and bounce our
# response off of it
if snapshot_updates:
cinder_snaps = self.db.snapshot_get_all(ctxt)
for snap in cinder_snaps:
# NOTE(jdg): For now we only update those that have no entry
if not snap.get('provider_id', None):
update = (
[updt for updt in snapshot_updates if updt['id'] ==
snap['id']][0])
if update:
self.db.snapshot_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
def init_host(self):
"""Perform any required initialization."""
ctxt = context.get_admin_context()
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception:
LOG.exception(_LE("Failed to initialize driver."),
resource={'type': 'driver',
'id': self.__class__.__name__})
# we don't want to continue since we failed
# to initialize the driver correctly.
return
# Initialize backend capabilities list
self.driver.init_capabilities()
volumes = objects.VolumeList.get_all_by_host(ctxt, self.host)
snapshots = self.db.snapshot_get_by_host(ctxt, self.host)
self._sync_provider_info(ctxt, volumes, snapshots)
# FIXME volume count for exporting is wrong
try:
self.stats['pools'] = {}
self.stats.update({'allocated_capacity_gb': 0})
for volume in volumes:
# available volume should also be counted into allocated
if volume['status'] in ['in-use', 'available']:
# calculate allocated capacity for driver
self._count_allocated_capacity(ctxt, volume)
try:
if volume['status'] in ['in-use']:
self.driver.ensure_export(ctxt, volume)
except Exception:
LOG.exception(_LE("Failed to re-export volume, "
"setting to ERROR."),
resource=volume)
volume.status = 'error'
volume.save()
elif volume['status'] in ('downloading', 'creating'):
LOG.warning(_LW("Detected volume stuck "
"in %(curr_status)s "
"status, setting to ERROR."),
{'curr_status': volume['status']},
resource=volume)
if volume['status'] == 'downloading':
self.driver.clear_download(ctxt, volume)
volume.status = 'error'
volume.save()
elif volume.status == 'uploading':
# Set volume status to available or in-use.
self.db.volume_update_status_based_on_attachment(
ctxt, volume.id)
else:
pass
snapshots = objects.SnapshotList.get_by_host(
ctxt, self.host, {'status': fields.SnapshotStatus.CREATING})
for snapshot in snapshots:
LOG.warning(_LW("Detected snapshot stuck in creating "
"status, setting to ERROR."), resource=snapshot)
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
except Exception:
LOG.exception(_LE("Error during re-export on driver init."),
resource=volume)
return
self.driver.set_throttle()
# at this point the driver is considered initialized.
# NOTE(jdg): Careful though because that doesn't mean
# that an entry exists in the service table
self.driver.set_initialized()
for volume in volumes:
if volume['status'] == 'deleting':
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
# from being blocked.
self._add_to_threadpool(self.delete_volume, ctxt,
volume['id'], volume=volume)
else:
# By default, delete volumes sequentially
self.delete_volume(ctxt, volume['id'], volume=volume)
LOG.info(_LI("Resume volume delete completed successfully."),
resource=volume)
# collect and publish service capabilities
self.publish_service_capabilities(ctxt)
LOG.info(_LI("Driver initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
def init_host_with_rpc(self):
LOG.info(_LI("Initializing RPC dependent components of volume "
"driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
stats = self.driver.get_volume_stats(refresh=True)
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
service = objects.Service.get_by_args(
context.get_admin_context(),
svc_host,
'cinder-volume')
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Service not found for updating "
"replication_status."))
if service.replication_status != (
fields.ReplicationStatus.FAILED_OVER):
if stats and stats.get('replication_enabled', False):
service.replication_status = fields.ReplicationStatus.ENABLED
else:
service.replication_status = fields.ReplicationStatus.DISABLED
service.save()
LOG.info(_LI("Driver post RPC initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
def is_working(self):
"""Return if Manager is ready to accept requests.
This is to inform Service class that in case of volume driver
initialization failure the manager is actually down and not ready to
accept any requests.
"""
return self.driver.initialized
def create_volume(self, context, volume_id, request_spec=None,
filter_properties=None, allow_reschedule=True,
volume=None):
"""Creates the volume."""
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
context_elevated = context.elevated()
if filter_properties is None:
filter_properties = {}
if request_spec is None:
request_spec = {}
try:
# NOTE(flaper87): Driver initialization is
# verified by the task itself.
flow_engine = create_volume.get_flow(
context_elevated,
self,
self.db,
self.driver,
self.scheduler_rpcapi,
self.host,
volume.id,
allow_reschedule,
context,
request_spec,
filter_properties,
image_volume_cache=self.image_volume_cache,
)
except Exception:
msg = _("Create manager volume flow failed.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
raise exception.CinderException(msg)
snapshot_id = request_spec.get('snapshot_id')
source_volid = request_spec.get('source_volid')
source_replicaid = request_spec.get('source_replicaid')
if snapshot_id is not None:
# Make sure the snapshot is not deleted until we are done with it.
locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot')
elif source_volid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_volid, 'delete_volume')
elif source_replicaid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_replicaid, 'delete_volume')
else:
locked_action = None
def _run_flow():
# This code executes create volume flow. If something goes wrong,
# flow reverts all job that was done and reraises an exception.
# Otherwise, all data that was generated by flow becomes available
# in flow engine's storage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
@utils.synchronized(locked_action, external=True)
def _run_flow_locked():
_run_flow()
# NOTE(dulek): Flag to indicate if volume was rescheduled. Used to
# decide if allocated_capacity should be incremented.
rescheduled = False
vol_ref = None
try:
if locked_action is None:
_run_flow()
else:
_run_flow_locked()
finally:
try:
vol_ref = flow_engine.storage.fetch('volume_ref')
except tfe.NotFound:
# If there's no vol_ref, then flow is reverted. Lets check out
# if rescheduling occurred.
try:
rescheduled = flow_engine.storage.get_revert_result(
create_volume.OnFailureRescheduleTask.make_name(
[create_volume.ACTION]))
except tfe.NotFound:
pass
if not rescheduled:
if not vol_ref:
# Flow was reverted and not rescheduled, fetching
# volume_ref from the DB, because it will be needed.
vol_ref = objects.Volume.get_by_id(context, volume.id)
# NOTE(dulek): Volume wasn't rescheduled so we need to update
# volume stats as these are decremented on delete.
self._update_allocated_capacity(vol_ref)
LOG.info(_LI("Created volume successfully."), resource=vol_ref)
return vol_ref.id
@locked_volume_operation
def delete_volume(self, context, volume_id,
unmanage_only=False,
volume=None,
cascade=False):
"""Deletes and unexports volume.
1. Delete a volume(normal case)
Delete a volume and update quotas.
2. Delete a migration volume
If deleting the volume in a migration, we want to skip
quotas but we need database updates for the volume.
"""
context = context.elevated()
try:
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
volume = objects.Volume.get_by_id(context, volume_id)
else:
volume.refresh()
except exception.VolumeNotFound:
# NOTE(thingee): It could be possible for a volume to
# be deleted when resuming deletes from init_host().
LOG.debug("Attempted delete of non-existent volume: %s", volume_id)
return
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
if volume['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_id)
if vol_utils.extract_host(volume.host) != self.host:
raise exception.InvalidVolume(
reason=_("volume is not local to this node"))
if unmanage_only and cascade:
# This could be done, but is ruled out for now just
# for simplicity.
raise exception.Invalid(
reason=_("Unmanage and cascade delete options "
"are mutually exclusive."))
# The status 'deleting' is not included, because it only applies to
# the source volume to be deleted after a migration. No quota
# needs to be handled for it.
is_migrating = volume.migration_status not in (None, 'error',
'success')
is_migrating_dest = (is_migrating and
volume.migration_status.startswith(
'target:'))
self._notify_about_volume_usage(context, volume, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context, volume)
if unmanage_only:
self.driver.unmanage(volume)
elif cascade:
LOG.debug('Performing cascade delete.')
snapshots = objects.SnapshotList.get_all_for_volume(context,
volume.id)
for s in snapshots:
if s.status != 'deleting':
self._clear_db(context, is_migrating_dest, volume,
'error_deleting')
msg = (_("Snapshot %(id)s was found in state "
"%(state)s rather than 'deleting' during "
"cascade delete.") % {'id': s.id,
'state': s.status})
raise exception.InvalidSnapshot(reason=msg)
self.delete_snapshot(context, s)
LOG.debug('Snapshots deleted, issuing volume delete')
self.driver.delete_volume(volume)
else:
self.driver.delete_volume(volume)
except exception.VolumeIsBusy:
LOG.error(_LE("Unable to delete busy volume."),
resource=volume)
# If this is a destination volume, we have to clear the database
# record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume,
'available')
return
except Exception:
with excutils.save_and_reraise_exception():
# If this is a destination volume, we have to clear the
# database record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume,
'error_deleting')
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
# Get reservations
try:
reserve_opts = {'volumes': -1,
'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting volume."),
resource=volume)
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
volume.destroy()
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
self._notify_about_volume_usage(context, volume, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
pool = vol_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume.host, 'pool', True)
size = volume.size
try:
self.stats['pools'][pool]['allocated_capacity_gb'] -= size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=-size)
self.publish_service_capabilities(context)
LOG.info(_LI("Deleted volume successfully."), resource=volume)
def _clear_db(self, context, is_migrating_dest, volume_ref, status):
# This method is called when driver.unmanage() or
# driver.delete_volume() fails in delete_volume(), so it is already
# in the exception handling part.
if is_migrating_dest:
volume_ref.destroy()
LOG.error(_LE("Unable to delete the destination volume "
"during volume migration, (NOTE: database "
"record needs to be deleted)."), resource=volume_ref)
else:
volume_ref.status = status
volume_ref.save()
def create_snapshot(self, context, volume_id, snapshot):
"""Creates and exports the snapshot."""
context = context.elevated()
self._notify_about_snapshot_usage(
context, snapshot, "create.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
model_update = self.driver.create_snapshot(snapshot)
if model_update:
snapshot.update(model_update)
snapshot.save()
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot.id, volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.exception(_LE("Failed updating snapshot"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id}, resource=snapshot)
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.progress = '100%'
snapshot.save()
self._notify_about_snapshot_usage(context, snapshot, "create.end")
LOG.info(_LI("Create snapshot completed successfully"),
resource=snapshot)
return snapshot.id
@locked_snapshot_operation
def delete_snapshot(self, context, snapshot, unmanage_only=False):
"""Deletes and unexports snapshot."""
context = context.elevated()
snapshot._context = context
project_id = snapshot.project_id
self._notify_about_snapshot_usage(
context, snapshot, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
snapshot.save()
if unmanage_only:
self.driver.unmanage_snapshot(snapshot)
else:
self.driver.delete_snapshot(snapshot)
except exception.SnapshotIsBusy:
LOG.error(_LE("Delete snapshot failed, due to snapshot busy."),
resource=snapshot)
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.save()
return
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR_DELETING
snapshot.save()
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot.volume_size,
}
volume_ref = self.db.volume_get(context, snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Update snapshot usages failed."),
resource=snapshot)
self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id)
snapshot.destroy()
self._notify_about_snapshot_usage(context, snapshot, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
LOG.info(_LI("Delete snapshot completed successfully"),
resource=snapshot)
def attach_volume(self, context, volume_id, instance_uuid, host_name,
mountpoint, mode):
"""Updates db to show volume is attached."""
@utils.synchronized(volume_id, external=True)
def do_attach():
# check the volume status before attaching
volume = self.db.volume_get(context, volume_id)
volume_metadata = self.db.volume_admin_metadata_get(
context.elevated(), volume_id)
if volume['status'] == 'attaching':
if (volume_metadata.get('attached_mode') and
volume_metadata.get('attached_mode') != mode):
raise exception.InvalidVolume(
reason=_("being attached by different mode"))
if (volume['status'] == 'in-use' and not volume['multiattach']
and not volume['migration_status']):
raise exception.InvalidVolume(
reason=_("volume is already attached"))
host_name_sanitized = utils.sanitize_hostname(
host_name) if host_name else None
if instance_uuid:
attachments = \
self.db.volume_attachment_get_all_by_instance_uuid(
context, volume_id, instance_uuid)
else:
attachments = (
self.db.volume_attachment_get_all_by_host(
context,
volume_id,
host_name_sanitized))
if attachments:
self.db.volume_update(context, volume_id,
{'status': 'in-use'})
return
self._notify_about_volume_usage(context, volume,
"attach.start")
values = {'volume_id': volume_id,
'attach_status': 'attaching', }
attachment = self.db.volume_attach(context.elevated(), values)
volume_metadata = self.db.volume_admin_metadata_update(
context.elevated(), volume_id,
{"attached_mode": mode}, False)
attachment_id = attachment['id']
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
self.db.volume_attachment_update(context, attachment_id,
{'attach_status':
'error_attaching'})
raise exception.InvalidUUID(uuid=instance_uuid)
volume = self.db.volume_get(context, volume_id)
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
self.db.volume_update(context, volume_id,
{'status': 'error_attaching'})
self.message_api.create(
context, defined_messages.ATTACH_READONLY_VOLUME,
context.project_id, resource_type=resource_types.VOLUME,
resource_uuid=volume_id)
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.debug('Attaching volume %(volume_id)s to instance '
'%(instance)s at mountpoint %(mount)s on host '
'%(host)s.',
{'volume_id': volume_id, 'instance': instance_uuid,
'mount': mountpoint, 'host': host_name_sanitized},
resource=volume)
self.driver.attach_volume(context,
volume,
instance_uuid,
host_name_sanitized,
mountpoint)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment_id,
{'attach_status': 'error_attaching'})
volume = self.db.volume_attached(context.elevated(),
attachment_id,
instance_uuid,
host_name_sanitized,
mountpoint,
mode)
self._notify_about_volume_usage(context, volume, "attach.end")
LOG.info(_LI("Attach volume completed successfully."),
resource=volume)
return self.db.volume_attachment_get(context, attachment_id)
return do_attach()
@locked_detach_operation
def detach_volume(self, context, volume_id, attachment_id=None):
"""Updates db to show volume is detached."""
# TODO(vish): refactor this into a more general "unreserve"
volume = self.db.volume_get(context, volume_id)
attachment = None
if attachment_id:
try:
attachment = self.db.volume_attachment_get(context,
attachment_id)
except exception.VolumeAttachmentNotFound:
LOG.info(_LI("Volume detach called, but volume not attached."),
resource=volume)
# We need to make sure the volume status is set to the correct
# status. It could be in detaching status now, and we don't
# want to leave it there.
self.db.volume_detached(context, volume_id, attachment_id)
return
else:
# We can try and degrade gracefully here by trying to detach
# a volume without the attachment_id here if the volume only has
# one attachment. This is for backwards compatibility.
attachments = self.db.volume_attachment_get_all_by_volume_id(
context, volume_id)
if len(attachments) > 1:
# There are more than 1 attachments for this volume
# we have to have an attachment id.
msg = _("Detach volume failed: More than one attachment, "
"but no attachment_id provided.")
LOG.error(msg, resource=volume)
raise exception.InvalidVolume(reason=msg)
elif len(attachments) == 1:
attachment = attachments[0]
else:
# there aren't any attachments for this volume.
# so set the status to available and move on.
LOG.info(_LI("Volume detach called, but volume not attached."),
resource=volume)
self.db.volume_update(context, volume_id,
{'status': 'available',
'attach_status': 'detached'})
return
self._notify_about_volume_usage(context, volume, "detach.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.debug('Detaching volume %(volume_id)s from instance '
'%(instance)s.',
{'volume_id': volume_id,
'instance': attachment.get('instance_uuid')},
resource=volume)
self.driver.detach_volume(context, volume, attachment)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment.get('id'),
{'attach_status': 'error_detaching'})
self.db.volume_detached(context.elevated(), volume_id,
attachment.get('id'))
self.db.volume_admin_metadata_delete(context.elevated(), volume_id,
'attached_mode')
# NOTE(jdg): We used to do an ensure export here to
# catch upgrades while volumes were attached (E->F)
# this was necessary to convert in-use volumes from
# int ID's to UUID's. Don't need this any longer
# We're going to remove the export here
# (delete the iscsi target)
volume = self.db.volume_get(context, volume_id)
try:
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context.elevated(), volume)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Detach volume failed, due to "
"uninitialized driver."),
resource=volume)
except Exception as ex:
LOG.exception(_LE("Detach volume failed, due to "
"remove-export failure."),
resource=volume)
raise exception.RemoveExportException(volume=volume_id,
reason=six.text_type(ex))
self._notify_about_volume_usage(context, volume, "detach.end")
LOG.info(_LI("Detach volume completed successfully."), resource=volume)
def _create_image_cache_volume_entry(self, ctx, volume_ref,
image_id, image_meta):
"""Create a new image-volume and cache entry for it.
This assumes that the image has already been downloaded and stored
in the volume described by the volume_ref.
"""
image_volume = None
try:
if not self.image_volume_cache.ensure_space(
ctx,
volume_ref['size'],
volume_ref['host']):
LOG.warning(_LW('Unable to ensure space for image-volume in'
' cache. Will skip creating entry for image'
' %(image)s on host %(host)s.'),
{'image': image_id, 'host': volume_ref['host']})
return
image_volume = self._clone_image_volume(ctx,
volume_ref,
image_meta)
if not image_volume:
LOG.warning(_LW('Unable to clone image_volume for image '
'%(image_id)s will not create cache entry.'),
{'image_id': image_id})
return
self.image_volume_cache.create_cache_entry(
ctx,
image_volume,
image_id,
image_meta
)
except exception.CinderException as e:
LOG.warning(_LW('Failed to create new image-volume cache entry.'
' Error: %(exception)s'), {'exception': e})
if image_volume:
self.delete_volume(ctx, image_volume.id)
def _clone_image_volume(self, ctx, volume, image_meta):
volume_type_id = volume.get('volume_type_id')
reserve_opts = {'volumes': 1, 'gigabytes': volume.size}
QUOTAS.add_volume_type_opts(ctx, reserve_opts, volume_type_id)
reservations = QUOTAS.reserve(ctx, **reserve_opts)
try:
new_vol_values = {k: volume[k] for k in set(volume.keys()) -
self._VOLUME_CLONE_SKIP_PROPERTIES}
new_vol_values['volume_type_id'] = volume_type_id
new_vol_values['attach_status'] = 'detached'
new_vol_values['status'] = 'creating'
new_vol_values['project_id'] = ctx.project_id
new_vol_values['display_name'] = 'image-%s' % image_meta['id']
new_vol_values['source_volid'] = volume.id
LOG.debug('Creating image volume entry: %s.', new_vol_values)
image_volume = objects.Volume(context=ctx, **new_vol_values)
image_volume.create()
except Exception as ex:
LOG.exception(_LE('Create clone_image_volume: %(volume_id)s'
'for image %(image_id)s, '
'failed (Exception: %(except)s)'),
{'volume_id': volume.id,
'image_id': image_meta['id'],
'except': ex})
QUOTAS.rollback(ctx, reservations)
return
QUOTAS.commit(ctx, reservations,
project_id=new_vol_values['project_id'])
try:
self.create_volume(ctx, image_volume.id,
allow_reschedule=False, volume=image_volume)
image_volume = self.db.volume_get(ctx, image_volume.id)
if image_volume.status != 'available':
raise exception.InvalidVolume(_('Volume is not available.'))
self.db.volume_admin_metadata_update(ctx.elevated(),
image_volume.id,
{'readonly': 'True'},
False)
return image_volume
except exception.CinderException:
LOG.exception(_LE('Failed to clone volume %(volume_id)s for '
'image %(image_id)s.'),
{'volume_id': volume.id,
'image_id': image_meta['id']})
try:
self.delete_volume(ctx, image_volume.id)
except exception.CinderException:
LOG.exception(_LE('Could not delete the image volume %(id)s.'),
{'id': volume.id})
return
def _clone_image_volume_and_add_location(self, ctx, volume, image_service,
image_meta):
"""Create a cloned volume and register its location to the image."""
if (image_meta['disk_format'] != 'raw' or
image_meta['container_format'] != 'bare'):
return False
image_volume_context = ctx
if self.driver.configuration.image_upload_use_internal_tenant:
internal_ctx = context.get_internal_tenant_context()
if internal_ctx:
image_volume_context = internal_ctx
image_volume = self._clone_image_volume(image_volume_context,
volume,
image_meta)
if not image_volume:
return False
uri = 'cinder://%s' % image_volume.id
image_registered = None
try:
image_registered = image_service.add_location(
ctx, image_meta['id'], uri, {})
except (exception.NotAuthorized, exception.Invalid,
exception.NotFound):
LOG.exception(_LE('Failed to register image volume location '
'%(uri)s.'), {'uri': uri})
if not image_registered:
LOG.warning(_LW('Registration of image volume URI %(uri)s '
'to image %(image_id)s failed.'),
{'uri': uri, 'image_id': image_meta['id']})
try:
self.delete_volume(image_volume_context, image_volume)
except exception.CinderException:
LOG.exception(_LE('Could not delete failed image volume '
'%(id)s.'), {'id': image_volume.id})
return False
image_volume_meta = {'glance_image_id': image_meta['id'],
'image_owner': ctx.project_id}
self.db.volume_metadata_update(image_volume_context,
image_volume.id,
image_volume_meta,
False)
return True
def copy_volume_to_image(self, context, volume_id, image_meta):
"""Uploads the specified volume to Glance.
image_meta is a dictionary containing the following keys:
'id', 'container_format', 'disk_format'
"""
payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
image_service = None
try:
volume = self.db.volume_get(context, volume_id)
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
image_service, image_id = \
glance.get_remote_image_service(context, image_meta['id'])
if (self.driver.configuration.image_upload_use_cinder_backend
and self._clone_image_volume_and_add_location(
context, volume, image_service, image_meta)):
LOG.debug("Registered image volume location to glance "
"image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
else:
self.driver.copy_volume_to_image(context, volume,
image_service, image_meta)
LOG.debug("Uploaded volume to glance image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
except Exception as error:
LOG.error(_LE("Upload volume to image encountered an error "
"(image-id: %(image_id)s)."),
{'image_id': image_meta['id']},
resource=volume)
if image_service is not None:
# Deletes the image if it is in queued or saving state
self._delete_image(context, image_meta['id'], image_service)
with excutils.save_and_reraise_exception():
payload['message'] = six.text_type(error)
if isinstance(error, exception.ImageLimitExceeded):
self.message_api.create(
context, defined_messages.IMAGE_FROM_VOLUME_OVER_QUOTA,
context.project_id,
resource_type=resource_types.VOLUME,
resource_uuid=volume_id)
finally:
self.db.volume_update_status_based_on_attachment(context,
volume_id)
LOG.info(_LI("Copy volume to image completed successfully."),
resource=volume)
def _delete_image(self, context, image_id, image_service):
"""Deletes an image stuck in queued or saving state."""
try:
image_meta = image_service.show(context, image_id)
image_status = image_meta.get('status')
if image_status == 'queued' or image_status == 'saving':
LOG.warning(_LW("Deleting image in unexpected status: "
"%(image_status)s."),
{'image_status': image_status},
resource={'type': 'image', 'id': image_id})
image_service.delete(context, image_id)
except Exception:
LOG.warning(_LW("Image delete encountered an error."),
exc_info=True, resource={'type': 'image',
'id': image_id})
def initialize_connection(self, context, volume_id, connector):
"""Prepare volume for connection from host represented by connector.
This method calls the driver initialize_connection and returns
it to the caller. The connector parameter is a dictionary with
information about the host that will connect to the volume in the
following format::
{
'ip': ip,
'initiator': initiator,
}
ip: the ip address of the connecting machine
initiator: the iscsi initiator name of the connecting machine.
This can be None if the connecting machine does not support iscsi
connections.
driver is responsible for doing any necessary security setup and
returning a connection_info dictionary in the following format::
{
'driver_volume_type': driver_volume_type,
'data': data,
}
driver_volume_type: a string to identify the type of volume. This
can be used by the calling code to determine the
strategy for connecting to the volume. This could
be 'iscsi', 'rbd', 'sheepdog', etc.
data: this is the data that the calling code will use to connect
to the volume. Keep in mind that this will be serialized to
json in various places, so it should not contain any non-json
data types.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume = self.db.volume_get(context, volume_id)
model_update = None
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate volume connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
model_update = self.driver.create_export(context.elevated(),
volume, connector)
except exception.CinderException:
err_msg = (_("Create export for volume failed."))
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
if model_update:
volume = self.db.volume_update(context,
volume_id,
model_update)
except exception.CinderException as ex:
LOG.exception(_LE("Model update failed."), resource=volume)
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn_info = self.driver.initialize_connection(volume, connector)
except Exception as err:
err_msg = (_("Driver initialize connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
self.driver.remove_export(context.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
# Add qos_specs to connection info
typeid = volume['volume_type_id']
specs = None
if typeid:
res = volume_types.get_volume_type_qos_specs(typeid)
qos = res['qos_specs']
# only pass qos_specs that is designated to be consumed by
# front-end, or both front-end and back-end.
if qos and qos.get('consumer') in ['front-end', 'both']:
specs = qos.get('specs')
qos_spec = dict(qos_specs=specs)
conn_info['data'].update(qos_spec)
# Add access_mode to connection info
volume_metadata = self.db.volume_admin_metadata_get(context.elevated(),
volume_id)
access_mode = volume_metadata.get('attached_mode')
if access_mode is None:
# NOTE(zhiyan): client didn't call 'os-attach' before
access_mode = ('ro'
if volume_metadata.get('readonly') == 'True'
else 'rw')
conn_info['data']['access_mode'] = access_mode
# Add encrypted flag to connection_info if not set in the driver.
if conn_info['data'].get('encrypted') is None:
encrypted = bool(volume.get('encryption_key_id'))
conn_info['data']['encrypted'] = encrypted
# Add discard flag to connection_info if not set in the driver and
# configured to be reported.
if conn_info['data'].get('discard') is None:
discard_supported = (self.driver.configuration
.safe_get('report_discard_supported'))
if discard_supported:
conn_info['data']['discard'] = True
LOG.info(_LI("Initialize volume connection completed successfully."),
resource=volume)
return conn_info
def terminate_connection(self, context, volume_id, connector, force=False):
"""Cleanup connection from host represented by connector.
The format of connector is the same as for initialize_connection.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.terminate_connection(volume_ref, connector,
force=force)
except Exception as err:
err_msg = (_('Terminate volume connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info(_LI("Terminate volume connection completed successfully."),
resource=volume_ref)
def remove_export(self, context, volume_id):
"""Removes an export for a volume."""
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.remove_export(context, volume_ref)
except Exception:
msg = _("Remove volume export failed.")
LOG.exception(msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("Remove volume export completed successfully."),
resource=volume_ref)
def accept_transfer(self, context, volume_id, new_user, new_project):
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
# NOTE(jdg): need elevated context as we haven't "given" the vol
# yet
volume_ref = self.db.volume_get(context.elevated(), volume_id)
# NOTE(jdg): Some drivers tie provider info (CHAP) to tenant
# for those that do allow them to return updated model info
model_update = self.driver.accept_transfer(context,
volume_ref,
new_user,
new_project)
if model_update:
try:
self.db.volume_update(context.elevated(),
volume_id,
model_update)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Update volume model for "
"transfer operation failed."),
resource=volume_ref)
self.db.volume_update(context.elevated(),
volume_id,
{'status': 'error'})
LOG.info(_LI("Transfer volume completed successfully."),
resource=volume_ref)
return model_update
def _connect_device(self, conn):
use_multipath = self.configuration.use_multipath_for_image_xfer
device_scan_attempts = self.configuration.num_volume_device_scan_tries
protocol = conn['driver_volume_type']
connector = utils.brick_get_connector(
protocol,
use_multipath=use_multipath,
device_scan_attempts=device_scan_attempts,
conn=conn)
vol_handle = connector.connect_volume(conn['data'])
root_access = True
if not connector.check_valid_device(vol_handle['path'], root_access):
if isinstance(vol_handle['path'], six.string_types):
raise exception.DeviceUnavailable(
path=vol_handle['path'],
reason=(_("Unable to access the backend storage via the "
"path %(path)s.") %
{'path': vol_handle['path']}))
else:
raise exception.DeviceUnavailable(
path=None,
reason=(_("Unable to access the backend storage via file "
"handle.")))
return {'conn': conn, 'device': vol_handle, 'connector': connector}
def _attach_volume(self, ctxt, volume, properties, remote=False):
status = volume['status']
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
try:
conn = rpcapi.initialize_connection(ctxt, volume, properties)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach volume %(vol)s."),
{'vol': volume['id']})
self.db.volume_update(ctxt, volume['id'],
{'status': status})
else:
conn = self.initialize_connection(ctxt, volume['id'], properties)
return self._connect_device(conn)
def _detach_volume(self, ctxt, attach_info, volume, properties,
force=False, remote=False):
connector = attach_info['connector']
connector.disconnect_volume(attach_info['conn']['data'],
attach_info['device'])
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.terminate_connection(ctxt, volume, properties, force=force)
rpcapi.remove_export(ctxt, volume)
else:
try:
self.terminate_connection(ctxt, volume['id'], properties,
force=force)
self.remove_export(ctxt, volume['id'])
except Exception as err:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Unable to terminate volume connection: '
'%(err)s.') % {'err': err})
def _copy_volume_data(self, ctxt, src_vol, dest_vol, remote=None):
"""Copy data from src_vol to dest_vol."""
LOG.debug('copy_data_between_volumes %(src)s -> %(dest)s.',
{'src': src_vol['name'], 'dest': dest_vol['name']})
properties = utils.brick_get_connector_properties()
dest_remote = remote in ['dest', 'both']
dest_attach_info = self._attach_volume(ctxt, dest_vol, properties,
remote=dest_remote)
try:
src_remote = remote in ['src', 'both']
src_attach_info = self._attach_volume(ctxt, src_vol, properties,
remote=src_remote)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach source volume for copy."))
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, remote=dest_remote)
# Check the backend capabilities of migration destination host.
rpcapi = volume_rpcapi.VolumeAPI()
capabilities = rpcapi.get_capabilities(ctxt, dest_vol['host'],
False)
sparse_copy_volume = bool(capabilities and
capabilities.get('sparse_copy_volume',
False))
copy_error = True
try:
size_in_mb = int(src_vol['size']) * units.Ki # vol size is in GB
vol_utils.copy_volume(src_attach_info['device']['path'],
dest_attach_info['device']['path'],
size_in_mb,
self.configuration.volume_dd_blocksize,
sparse=sparse_copy_volume)
copy_error = False
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to copy volume %(src)s to %(dest)s."),
{'src': src_vol['id'], 'dest': dest_vol['id']})
finally:
try:
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, force=copy_error,
remote=dest_remote)
finally:
self._detach_volume(ctxt, src_attach_info, src_vol,
properties, force=copy_error,
remote=src_remote)
def _migrate_volume_generic(self, ctxt, volume, host, new_type_id):
rpcapi = volume_rpcapi.VolumeAPI()
# Create new volume on remote host
skip = self._VOLUME_CLONE_SKIP_PROPERTIES | {'host'}
new_vol_values = {k: volume[k] for k in set(volume.keys()) - skip}
if new_type_id:
new_vol_values['volume_type_id'] = new_type_id
new_volume = objects.Volume(
context=ctxt,
host=host['host'],
status='creating',
attach_status='detached',
migration_status='target:%s' % volume['id'],
**new_vol_values
)
new_volume.create()
rpcapi.create_volume(ctxt, new_volume, host['host'],
None, None, allow_reschedule=False)
# Wait for new_volume to become ready
starttime = time.time()
deadline = starttime + CONF.migration_create_volume_timeout_secs
# TODO(thangp): Replace get_by_id with refresh when it is available
new_volume = objects.Volume.get_by_id(ctxt, new_volume.id)
tries = 0
while new_volume.status != 'available':
tries += 1
now = time.time()
if new_volume.status == 'error':
msg = _("failed to create new_volume on destination host")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
elif now > deadline:
msg = _("timeout creating new_volume on destination host")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
else:
time.sleep(tries ** 2)
# TODO(thangp): Replace get_by_id with refresh when it is
# available
new_volume = objects.Volume.get_by_id(ctxt, new_volume.id)
# Copy the source volume to the destination volume
try:
attachments = volume.volume_attachment
if not attachments:
# Pre- and post-copy driver-specific actions
self.driver.before_volume_copy(ctxt, volume, new_volume,
remote='dest')
self._copy_volume_data(ctxt, volume, new_volume, remote='dest')
self.driver.after_volume_copy(ctxt, volume, new_volume,
remote='dest')
# The above call is synchronous so we complete the migration
self.migrate_volume_completion(ctxt, volume.id,
new_volume.id,
error=False,
volume=volume,
new_volume=new_volume)
else:
nova_api = compute.API()
# This is an async call to Nova, which will call the completion
# when it's done
for attachment in attachments:
instance_uuid = attachment['instance_uuid']
nova_api.update_server_volume(ctxt, instance_uuid,
volume.id,
new_volume.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE(
"Failed to copy volume %(vol1)s to %(vol2)s"), {
'vol1': volume.id, 'vol2': new_volume.id})
self._clean_temporary_volume(ctxt, volume,
new_volume)
def _clean_temporary_volume(self, ctxt, volume, new_volume,
clean_db_only=False):
# If we're in the migrating phase, we need to cleanup
# destination volume because source volume is remaining
if volume.migration_status == 'migrating':
try:
if clean_db_only:
# The temporary volume is not created, only DB data
# is created
new_volume.destroy()
else:
# The temporary volume is already created
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.delete_volume(ctxt, new_volume)
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find the temporary volume "
"%(vol)s in the database. There is no need "
"to clean up this volume."),
{'vol': new_volume.id})
else:
# If we're in the completing phase don't delete the
# destination because we may have already deleted the
# source! But the migration_status in database should
# be cleared to handle volume after migration failure
try:
new_volume.migration_status = None
new_volume.save()
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find destination volume "
"%(vol)s in the database. The entry might be "
"successfully deleted during migration "
"completion phase."),
{'vol': new_volume.id})
LOG.warning(_LW("Failed to migrate volume. The destination "
"volume %(vol)s is not deleted since the "
"source volume may have been deleted."),
{'vol': new_volume.id})
def migrate_volume_completion(self, ctxt, volume_id, new_volume_id,
error=False, volume=None, new_volume=None):
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None or new_volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(ctxt, volume_id)
new_volume = objects.Volume.get_by_id(ctxt, new_volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
LOG.debug("migrate_volume_completion: completing migration for "
"volume %(vol1)s (temporary volume %(vol2)s",
{'vol1': volume.id, 'vol2': new_volume.id})
rpcapi = volume_rpcapi.VolumeAPI()
orig_volume_status = volume.previous_status
if error:
LOG.info(_LI("migrate_volume_completion is cleaning up an error "
"for volume %(vol1)s (temporary volume %(vol2)s"),
{'vol1': volume['id'], 'vol2': new_volume.id})
rpcapi.delete_volume(ctxt, new_volume)
updates = {'migration_status': 'error',
'status': orig_volume_status}
volume.update(updates)
volume.save()
return volume.id
volume.migration_status = 'completing'
volume.save()
# Detach the source volume (if it fails, don't fail the migration)
try:
if orig_volume_status == 'in-use':
attachments = volume.volume_attachment
for attachment in attachments:
self.detach_volume(ctxt, volume.id, attachment['id'])
except Exception as ex:
LOG.error(_LE("Detach migration source volume failed: %(err)s"),
{'err': ex}, resource=volume)
# Give driver (new_volume) a chance to update things as needed
# after a successful migration.
# Note this needs to go through rpc to the host of the new volume
# the current host and driver object is for the "existing" volume.
rpcapi.update_migrated_volume(ctxt, volume, new_volume,
orig_volume_status)
volume.refresh()
new_volume.refresh()
# Swap src and dest DB records so we can continue using the src id and
# asynchronously delete the destination id
updated_new = volume.finish_volume_migration(new_volume)
updates = {'status': orig_volume_status,
'previous_status': volume.status,
'migration_status': 'success'}
if orig_volume_status == 'in-use':
attachments = volume.volume_attachment
for attachment in attachments:
rpcapi.attach_volume(ctxt, volume,
attachment['instance_uuid'],
attachment['attached_host'],
attachment['mountpoint'],
'rw')
volume.update(updates)
volume.save()
# Asynchronous deletion of the source volume in the back-end (now
# pointed by the target volume id)
try:
rpcapi.delete_volume(ctxt, updated_new)
except Exception as ex:
LOG.error(_LE('Failed to request async delete of migration source '
'vol %(vol)s: %(err)s'),
{'vol': volume.id, 'err': ex})
LOG.info(_LI("Complete-Migrate volume completed successfully."),
resource=volume)
return volume.id
def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False,
new_type_id=None, volume=None):
"""Migrate the volume to the specified host (called on source host)."""
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(ctxt, volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
model_update = None
moved = False
status_update = None
if volume.status in ('retyping', 'maintenance'):
status_update = {'status': volume.previous_status}
volume.migration_status = 'migrating'
volume.save()
if not force_host_copy and new_type_id is None:
try:
LOG.debug("Issue driver.migrate_volume.", resource=volume)
moved, model_update = self.driver.migrate_volume(ctxt,
volume,
host)
if moved:
updates = {'host': host['host'],
'migration_status': 'success',
'previous_status': volume.status}
if status_update:
updates.update(status_update)
if model_update:
updates.update(model_update)
volume.update(updates)
volume.save()
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
if not moved:
try:
self._migrate_volume_generic(ctxt, volume, host,
new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
LOG.info(_LI("Migrate volume completed successfully."),
resource=volume)
@periodic_task.periodic_task
def _report_driver_status(self, context):
if not self.driver.initialized:
if self.driver.configuration.config_group is None:
config_group = ''
else:
config_group = ('(config name %s)' %
self.driver.configuration.config_group)
LOG.warning(_LW("Update driver status failed: %(config_group)s "
"is uninitialized."),
{'config_group': config_group},
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
else:
volume_stats = self.driver.get_volume_stats(refresh=True)
if self.extra_capabilities:
volume_stats.update(self.extra_capabilities)
if volume_stats:
# Append volume stats with 'allocated_capacity_gb'
self._append_volume_stats(volume_stats)
# Append filter and goodness function if needed
volume_stats = (
self._append_filter_goodness_functions(volume_stats))
# queue it to be sent to the Schedulers.
self.update_service_capabilities(volume_stats)
def _append_volume_stats(self, vol_stats):
pools = vol_stats.get('pools', None)
if pools and isinstance(pools, list):
for pool in pools:
pool_name = pool['pool_name']
try:
pool_stats = self.stats['pools'][pool_name]
except KeyError:
# Pool not found in volume manager
pool_stats = dict(allocated_capacity_gb=0)
pool.update(pool_stats)
def _append_filter_goodness_functions(self, volume_stats):
"""Returns volume_stats updated as needed."""
# Append filter_function if needed
if 'filter_function' not in volume_stats:
volume_stats['filter_function'] = (
self.driver.get_filter_function())
# Append goodness_function if needed
if 'goodness_function' not in volume_stats:
volume_stats['goodness_function'] = (
self.driver.get_goodness_function())
return volume_stats
def publish_service_capabilities(self, context):
"""Collect driver status and then publish."""
self._report_driver_status(context)
self._publish_service_capabilities(context)
def _notify_about_volume_usage(self,
context,
volume,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_snapshot_usage(self,
context,
snapshot,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_consistencygroup_usage(self,
context,
group,
event_suffix,
volumes=None,
extra_usage_info=None):
vol_utils.notify_about_consistencygroup_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not volumes:
volumes = self.db.volume_get_all_by_group(context, group.id)
if volumes:
for volume in volumes:
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_cgsnapshot_usage(self,
context,
cgsnapshot,
event_suffix,
snapshots=None,
extra_usage_info=None):
vol_utils.notify_about_cgsnapshot_usage(
context, cgsnapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not snapshots:
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
if snapshots:
for snapshot in snapshots:
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def extend_volume(self, context, volume_id, new_size, reservations,
volume=None):
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.status = 'error_extending'
volume.save()
project_id = volume.project_id
size_increase = (int(new_size)) - volume.size
self._notify_about_volume_usage(context, volume, "resize.start")
try:
self.driver.extend_volume(volume, new_size)
except Exception:
LOG.exception(_LE("Extend volume failed."),
resource=volume)
try:
self.db.volume_update(context, volume.id,
{'status': 'error_extending'})
raise exception.CinderException(_("Volume %s: Error trying "
"to extend volume") %
volume.id)
finally:
QUOTAS.rollback(context, reservations, project_id=project_id)
return
QUOTAS.commit(context, reservations, project_id=project_id)
volume.update({'size': int(new_size), 'status': 'available'})
volume.save()
pool = vol_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume.host, 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += size_increase
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=size_increase)
self._notify_about_volume_usage(
context, volume, "resize.end",
extra_usage_info={'size': int(new_size)})
LOG.info(_LI("Extend volume completed successfully."),
resource=volume)
def retype(self, ctxt, volume_id, new_type_id, host,
migration_policy='never', reservations=None,
volume=None, old_reservations=None):
def _retype_error(context, volume, old_reservations,
new_reservations, status_update):
try:
volume.update(status_update)
volume.save()
finally:
QUOTAS.rollback(context, old_reservations)
QUOTAS.rollback(context, new_reservations)
context = ctxt.elevated()
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
status_update = {'status': volume.previous_status}
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
# NOTE(flaper87): Other exceptions in this method don't
# set the volume status to error. Should that be done
# here? Setting the volume back to it's original status
# for now.
volume.update(status_update)
volume.save()
# If old_reservations has been passed in from the API, we should
# skip quotas.
# TODO(ntpttr): These reservation checks are left in to be backwards
# compatible with Liberty and can be removed in N.
if not old_reservations:
# Get old reservations
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
# NOTE(wanghao): We don't need to reserve volumes and gigabytes
# quota for retyping operation since they didn't changed, just
# reserve volume_type and type gigabytes is fine.
reserve_opts.pop('volumes')
reserve_opts.pop('gigabytes')
old_reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
volume.update(status_update)
volume.save()
msg = _("Failed to update quota usage while retyping volume.")
LOG.exception(msg, resource=volume)
raise exception.CinderException(msg)
# We already got the new reservations
new_reservations = reservations
# If volume types have the same contents, no need to do anything
retyped = False
diff, all_equal = volume_types.volume_types_diff(
context, volume.volume_type_id, new_type_id)
if all_equal:
retyped = True
# Call driver to try and change the type
retype_model_update = None
# NOTE(jdg): Check to see if the destination host is the same
# as the current. If it's not don't call the driver.retype
# method, otherwise drivers that implement retype may report
# success, but it's invalid in the case of a migrate.
# We assume that those that support pools do this internally
# so we strip off the pools designation
if (not retyped and
vol_utils.hosts_are_equivalent(self.driver.host,
host['host'])):
try:
new_type = volume_types.get_volume_type(context, new_type_id)
ret = self.driver.retype(context,
volume,
new_type,
diff,
host)
# Check if the driver retype provided a model update or
# just a retype indication
if type(ret) == tuple:
retyped, retype_model_update = ret
else:
retyped = ret
if retyped:
LOG.info(_LI("Volume %s: retyped successfully"), volume.id)
except Exception:
retyped = False
LOG.exception(_LE("Volume %s: driver error when trying to "
"retype, falling back to generic "
"mechanism."), volume.id)
# We could not change the type, so we need to migrate the volume, where
# the destination volume will be of the new type
if not retyped:
if migration_policy == 'never':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Retype requires migration but is not allowed.")
raise exception.VolumeMigrationFailed(reason=msg)
snaps = objects.SnapshotList.get_all_for_volume(context,
volume.id)
if snaps:
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not have snapshots.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Don't allow volume with replicas to be migrated
rep_status = volume.replication_status
if rep_status is not None and rep_status != 'disabled':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not be replicated.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
volume.migration_status = 'starting'
volume.save()
try:
self.migrate_volume(context, volume.id, host,
new_type_id=new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
else:
model_update = {'volume_type_id': new_type_id,
'host': host['host'],
'status': status_update['status']}
if retype_model_update:
model_update.update(retype_model_update)
volume.update(model_update)
volume.save()
if old_reservations:
QUOTAS.commit(context, old_reservations, project_id=project_id)
if new_reservations:
QUOTAS.commit(context, new_reservations, project_id=project_id)
self._notify_about_volume_usage(
context, volume, "retype",
extra_usage_info={'volume_type': new_type_id})
self.publish_service_capabilities(context)
LOG.info(_LI("Retype volume completed successfully."),
resource=volume)
def manage_existing(self, ctxt, volume_id, ref=None):
try:
flow_engine = manage_existing.get_flow(
ctxt,
self.db,
self.driver,
self.host,
volume_id,
ref)
except Exception:
msg = _("Failed to create manage_existing flow.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume_id})
raise exception.CinderException(msg)
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# Fetch created volume from storage
vol_ref = flow_engine.storage.fetch('volume')
# Update volume stats
pool = vol_utils.extract_host(vol_ref['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
vol_ref['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] \
+= vol_ref['size']
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=vol_ref['size'])
LOG.info(_LI("Manage existing volume completed successfully."),
resource=vol_ref)
return vol_ref['id']
def promote_replica(self, ctxt, volume_id):
"""Promote volume replica secondary to be the primary volume."""
volume = self.db.volume_get(ctxt, volume_id)
model_update = None
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Promote volume replica failed."),
resource=volume)
try:
model_update = self.driver.promote_replica(ctxt, volume)
except exception.CinderException:
err_msg = (_('Error promoting secondary volume to primary'))
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
try:
if model_update:
volume = self.db.volume_update(ctxt,
volume_id,
model_update)
except exception.CinderException:
err_msg = (_("Failed updating model"
" with driver provided model %(model)s") %
{'model': model_update})
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
LOG.info(_LI("Promote volume replica completed successfully."),
resource=volume)
def reenable_replication(self, ctxt, volume_id):
"""Re-enable replication of secondary volume with primary volumes."""
volume = self.db.volume_get(ctxt, volume_id)
model_update = None
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Sync volume replica failed."),
resource=volume)
try:
model_update = self.driver.reenable_replication(ctxt, volume)
except exception.CinderException:
err_msg = (_("Synchronizing secondary volume to primary failed."))
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
try:
if model_update:
volume = self.db.volume_update(ctxt,
volume_id,
model_update)
except exception.CinderException:
err_msg = (_("Failed updating model"
" with driver provided model %(model)s") %
{'model': model_update})
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
def _update_replication_relationship_status(self, ctxt):
# Only want volumes that do not have a 'disabled' replication status
filters = {'replication_status': ['active', 'copying', 'error',
'active-stopped', 'inactive']}
volumes = self.db.volume_get_all_by_host(ctxt, self.host,
filters=filters)
for vol in volumes:
model_update = None
try:
model_update = self.driver.get_replication_status(
ctxt, vol)
if model_update:
self.db.volume_update(ctxt, vol['id'], model_update)
except Exception:
LOG.exception(_LE("Get replication status for volume failed."),
resource=vol)
def create_consistencygroup(self, context, group):
"""Creates the consistency group."""
context = context.elevated()
status = fields.ConsistencyGroupStatus.AVAILABLE
model_update = None
self._notify_about_consistencygroup_usage(
context, group, "create.start")
try:
utils.require_driver_initialized(self.driver)
LOG.info(_LI("Consistency group %s: creating"), group.name)
model_update = self.driver.create_consistencygroup(context,
group)
if model_update:
if (model_update['status'] ==
fields.ConsistencyGroupStatus.ERROR):
msg = (_('Create consistency group failed.'))
LOG.error(msg,
resource={'type': 'consistency_group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = fields.ConsistencyGroupStatus.ERROR
group.save()
LOG.error(_LE("Consistency group %s: create failed"),
group.name)
group.status = status
group.created_at = timeutils.utcnow()
group.save()
LOG.info(_LI("Consistency group %s: created successfully"),
group.name)
self._notify_about_consistencygroup_usage(
context, group, "create.end")
LOG.info(_LI("Create consistency group completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
return group
def create_consistencygroup_from_src(self, context, group,
cgsnapshot=None, source_cg=None):
"""Creates the consistency group from source.
The source can be a CG snapshot or a source CG.
"""
source_name = None
snapshots = None
source_vols = None
try:
volumes = self.db.volume_get_all_by_group(context, group.id)
if cgsnapshot:
try:
# Check if cgsnapshot still exists
cgsnapshot = objects.CGSnapshot.get_by_id(
context, cgsnapshot.id)
except exception.CgSnapshotNotFound:
LOG.error(_LE("Create consistency group "
"from snapshot-%(snap)s failed: "
"SnapshotNotFound."),
{'snap': cgsnapshot.id},
resource={'type': 'consistency_group',
'id': group.id})
raise
source_name = _("snapshot-%s") % cgsnapshot.id
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
for snap in snapshots:
if (snap.status not in
VALID_CREATE_CG_SRC_SNAP_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because snapshot %(snap)s is "
"not in a valid state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'snap': snap['id'],
'valid': VALID_CREATE_CG_SRC_SNAP_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
if source_cg:
try:
source_cg = objects.ConsistencyGroup.get_by_id(
context, source_cg.id)
except exception.ConsistencyGroupNotFound:
LOG.error(_LE("Create consistency group "
"from source cg-%(cg)s failed: "
"ConsistencyGroupNotFound."),
{'cg': source_cg.id},
resource={'type': 'consistency_group',
'id': group.id})
raise
source_name = _("cg-%s") % source_cg.id
source_vols = self.db.volume_get_all_by_group(
context, source_cg.id)
for source_vol in source_vols:
if (source_vol['status'] not in
VALID_CREATE_CG_SRC_CG_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because source volume "
"%(source_vol)s is not in a valid "
"state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'source_vol': source_vol['id'],
'valid': VALID_CREATE_CG_SRC_CG_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes.
sorted_snapshots = None
if cgsnapshot and snapshots:
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
# Sort source volumes so that they are in the same order as their
# corresponding target volumes.
sorted_source_vols = None
if source_cg and source_vols:
sorted_source_vols = self._sort_source_vols(volumes,
source_vols)
self._notify_about_consistencygroup_usage(
context, group, "create.start")
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
context, group, volumes, cgsnapshot,
sorted_snapshots, source_cg, sorted_source_vols))
if volumes_model_update:
for update in volumes_model_update:
self.db.volume_update(context, update['id'], update)
if model_update:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
LOG.error(_LE("Create consistency group "
"from source %(source)s failed."),
{'source': source_name},
resource={'type': 'consistency_group',
'id': group.id})
# Update volume status to 'error' as well.
for vol in volumes:
self.db.volume_update(
context, vol['id'], {'status': 'error'})
now = timeutils.utcnow()
status = 'available'
for vol in volumes:
update = {'status': status, 'created_at': now}
self._update_volume_from_src(context, vol, update, group=group)
self._update_allocated_capacity(vol)
group.status = status
group.created_at = now
group.save()
self._notify_about_consistencygroup_usage(
context, group, "create.end")
LOG.info(_LI("Create consistency group "
"from source-%(source)s completed successfully."),
{'source': source_name},
resource={'type': 'consistency_group',
'id': group.id})
return group
def _sort_snapshots(self, volumes, snapshots):
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes. Each source snapshot in the snapshots
# list should have a corresponding target volume in the volumes list.
if not volumes or not snapshots or len(volumes) != len(snapshots):
msg = _("Input volumes or snapshots are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_snapshots = []
for vol in volumes:
found_snaps = [snap for snap in snapshots
if snap['id'] == vol['snapshot_id']]
if not found_snaps:
LOG.error(_LE("Source snapshot cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.SnapshotNotFound(
snapshot_id=vol['snapshot_id'])
sorted_snapshots.extend(found_snaps)
return sorted_snapshots
def _sort_source_vols(self, volumes, source_vols):
# Sort source volumes so that they are in the same order as their
# corresponding target volumes. Each source volume in the source_vols
# list should have a corresponding target volume in the volumes list.
if not volumes or not source_vols or len(volumes) != len(source_vols):
msg = _("Input volumes or source volumes are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_source_vols = []
for vol in volumes:
found_source_vols = [source_vol for source_vol in source_vols
if source_vol['id'] == vol['source_volid']]
if not found_source_vols:
LOG.error(_LE("Source volumes cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.VolumeNotFound(
volume_id=vol['source_volid'])
sorted_source_vols.extend(found_source_vols)
return sorted_source_vols
def _update_volume_from_src(self, context, vol, update, group=None):
try:
snapshot_id = vol.get('snapshot_id')
if snapshot_id:
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
orig_vref = self.db.volume_get(context,
snapshot.volume_id)
if orig_vref.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_to_volume(
context, vol['id'], snapshot_id)
except exception.SnapshotNotFound:
LOG.error(_LE("Source snapshot %(snapshot_id)s cannot be found."),
{'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise
except exception.VolumeNotFound:
LOG.error(_LE("The source volume %(volume_id)s "
"cannot be found."),
{'volume_id': snapshot.volume_id})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise
except exception.CinderException as ex:
LOG.error(_LE("Failed to update %(volume_id)s"
" metadata using the provided snapshot"
" %(snapshot_id)s metadata."),
{'volume_id': vol['id'],
'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
self.db.volume_update(context, vol['id'], update)
def _update_allocated_capacity(self, vol):
# Update allocated capacity in volume stats
pool = vol_utils.extract_host(vol['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
vol['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += (
vol['size'])
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=vol['size'])
def delete_consistencygroup(self, context, group):
"""Deletes consistency group and the volumes in the group."""
context = context.elevated()
project_id = group.project_id
if context.project_id != group.project_id:
project_id = group.project_id
else:
project_id = context.project_id
volumes = self.db.volume_get_all_by_group(context, group.id)
for volume_ref in volumes:
if volume_ref['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_ref['id'])
# self.host is 'host@backend'
# volume_ref['host'] is 'host@backend#pool'
# Extract host before doing comparison
if volume_ref['host']:
new_host = vol_utils.extract_host(volume_ref['host'])
if new_host != self.host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node"))
self._notify_about_consistencygroup_usage(
context, group, "delete.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.delete_consistencygroup(context, group, volumes))
if volumes_model_update:
for volume in volumes_model_update:
update = {'status': volume['status']}
self.db.volume_update(context, volume['id'],
update)
# If we failed to delete a volume, make sure the status
# for the cg is set to error as well
if (volume['status'] in ['error_deleting', 'error'] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = volume['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Delete consistency group failed.'))
LOG.error(msg,
resource={'type': 'consistency_group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
# Update volume status to 'error' if driver returns
# None for volumes_model_update.
if not volumes_model_update:
for vol in volumes:
self.db.volume_update(
context, vol['id'], {'status': 'error'})
# Get reservations for group
try:
reserve_opts = {'consistencygroups': -1}
cgreservations = CGQUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
cgreservations = None
LOG.exception(_LE("Delete consistency group "
"failed to update usages."),
resource={'type': 'consistency_group',
'id': group.id})
for volume_ref in volumes:
# Get reservations for volume
try:
volume_id = volume_ref['id']
reserve_opts = {'volumes': -1,
'gigabytes': -volume_ref['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Delete consistency group "
"failed to update usages."),
resource={'type': 'consistency_group',
'id': group.id})
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
self.db.volume_destroy(context, volume_id)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= volume_ref['size']
if cgreservations:
CGQUOTAS.commit(context, cgreservations,
project_id=project_id)
group.destroy()
self._notify_about_consistencygroup_usage(
context, group, "delete.end", volumes)
self.publish_service_capabilities(context)
LOG.info(_LI("Delete consistency group "
"completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates consistency group.
Update consistency group by adding volumes to the group,
or removing volumes from the group.
"""
add_volumes_ref = []
remove_volumes_ref = []
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes_list = remove_volumes.split(',')
for add_vol in add_volumes_list:
try:
add_vol_ref = self.db.volume_get(context, add_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update consistency group "
"failed to add volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': add_vol_ref['id']},
resource={'type': 'consistency_group',
'id': group.id})
raise
if add_vol_ref['status'] not in VALID_ADD_VOL_TO_CG_STATUS:
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id,
'status': add_vol_ref['status'],
'valid': VALID_ADD_VOL_TO_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
# self.host is 'host@backend'
# volume_ref['host'] is 'host@backend#pool'
# Extract host before doing comparison
new_host = vol_utils.extract_host(add_vol_ref['host'])
if new_host != self.host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node."))
add_volumes_ref.append(add_vol_ref)
for remove_vol in remove_volumes_list:
try:
remove_vol_ref = self.db.volume_get(context, remove_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update consistency group "
"failed to remove volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': remove_vol_ref['id']},
resource={'type': 'consistency_group',
'id': group.id})
raise
if remove_vol_ref['status'] not in VALID_REMOVE_VOL_FROM_CG_STATUS:
msg = (_("Cannot remove volume %(volume_id)s from consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': remove_vol_ref['id'],
'group_id': group.id,
'status': remove_vol_ref['status'],
'valid': VALID_REMOVE_VOL_FROM_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
remove_volumes_ref.append(remove_vol_ref)
self._notify_about_consistencygroup_usage(
context, group, "update.start")
try:
utils.require_driver_initialized(self.driver)
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
if add_volumes_update:
for update in add_volumes_update:
self.db.volume_update(context, update['id'], update)
if remove_volumes_update:
for update in remove_volumes_update:
self.db.volume_update(context, update['id'], update)
if model_update:
if model_update['status'] in (
[fields.ConsistencyGroupStatus.ERROR]):
msg = (_('Error occurred when updating consistency group '
'%s.') % group.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group.update(model_update)
group.save()
except exception.VolumeDriverException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred in the volume driver when "
"updating consistency group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when updating consistency "
"group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
now = timeutils.utcnow()
group.status = 'available'
group.update_at = now
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'consistencygroup_id': group.id,
'updated_at': now})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'consistencygroup_id': None,
'updated_at': now})
self._notify_about_consistencygroup_usage(
context, group, "update.end")
LOG.info(_LI("Update consistency group "
"completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
def create_cgsnapshot(self, context, cgsnapshot):
"""Creates the cgsnapshot."""
caller_context = context
context = context.elevated()
LOG.info(_LI("Cgsnapshot %s: creating."), cgsnapshot.id)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "create.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Cgsnapshot %(cgsnap_id)s: creating.",
{'cgsnap_id': cgsnapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
model_update, snapshots_model_update = (
self.driver.create_cgsnapshot(context, cgsnapshot,
snapshots))
if snapshots_model_update:
for snap_model in snapshots_model_update:
# Update db for snapshot.
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
self.db.snapshot_update(context,
snap_model['id'],
snap_model)
if (snap_model['status'] in [
fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] == 'error':
msg = (_('Error occurred when creating cgsnapshot '
'%s.') % cgsnapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
cgsnapshot.update(model_update)
cgsnapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
cgsnapshot.status = 'error'
cgsnapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
volume_id = snapshot['volume_id']
snapshot_id = snapshot['id']
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot_id, volume_id)
except exception.CinderException as ex:
LOG.error(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
# TODO(thangp): Switch over to use snapshot.update()
# after cgsnapshot-objects bugs are fixed
self.db.snapshot_update(
context, snapshot_id, {
'status': fields.SnapshotStatus.ERROR})
raise exception.MetadataCopyFailure(
reason=six.text_type(ex))
self.db.snapshot_update(context,
snapshot['id'],
{'status': fields.SnapshotStatus.AVAILABLE,
'progress': '100%'})
cgsnapshot.status = 'available'
cgsnapshot.save()
LOG.info(_LI("cgsnapshot %s: created successfully"),
cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "create.end")
return cgsnapshot
def delete_cgsnapshot(self, context, cgsnapshot):
"""Deletes cgsnapshot."""
caller_context = context
context = context.elevated()
project_id = cgsnapshot.project_id
LOG.info(_LI("cgsnapshot %s: deleting"), cgsnapshot.id)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "delete.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("cgsnapshot %(cgsnap_id)s: deleting",
{'cgsnap_id': cgsnapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(context, cgsnapshot,
snapshots))
if snapshots_model_update:
for snap_model in snapshots_model_update:
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap = next((item for item in snapshots if
item.id == snap_model['id']), None)
if snap:
snap.status = snap_model['status']
snap.save()
if (snap_model['status'] in
[fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting cgsnapshot '
'%s.') % cgsnapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
cgsnapshot.update(model_update)
cgsnapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
cgsnapshot.status = 'error'
cgsnapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot['volume_size'],
}
volume_ref = self.db.volume_get(context, snapshot['volume_id'])
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot['id'])
# TODO(thangp): Switch over to use snapshot.destroy()
# after cgsnapshot-objects bugs are fixed
self.db.snapshot_destroy(context, snapshot['id'])
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
cgsnapshot.destroy()
LOG.info(_LI("cgsnapshot %s: deleted successfully"), cgsnapshot.id)
self._notify_about_cgsnapshot_usage(context, cgsnapshot, "delete.end",
snapshots)
def update_migrated_volume(self, ctxt, volume, new_volume, volume_status):
"""Finalize migration process on backend device."""
model_update = None
model_update_default = {'_name_id': new_volume.name_id,
'provider_location':
new_volume.provider_location}
try:
model_update = self.driver.update_migrated_volume(ctxt,
volume,
new_volume,
volume_status)
except NotImplementedError:
# If update_migrated_volume is not implemented for the driver,
# _name_id and provider_location will be set with the values
# from new_volume.
model_update = model_update_default
if model_update:
model_update_default.update(model_update)
# Swap keys that were changed in the source so we keep their values
# in the temporary volume's DB record.
# Need to convert 'metadata' and 'admin_metadata' since
# they are not keys of volume, their corresponding keys are
# 'volume_metadata' and 'volume_admin_metadata'.
model_update_new = dict()
for key in model_update:
if key == 'metadata':
if volume.get('volume_metadata'):
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_metadata}
elif key == 'admin_metadata':
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_admin_metadata}
else:
model_update_new[key] = volume[key]
with new_volume.obj_as_admin():
new_volume.update(model_update_new)
new_volume.save()
with volume.obj_as_admin():
volume.update(model_update_default)
volume.save()
# Replication V2.1 methods
def failover_host(self, context,
secondary_backend_id=None):
"""Failover a backend to a secondary replication target.
Instructs a replication capable/configured backend to failover
to one of it's secondary replication targets. host=None is
an acceptable input, and leaves it to the driver to failover
to the only configured target, or to choose a target on it's
own. All of the hosts volumes will be passed on to the driver
in order for it to determine the replicated volumes on the host,
if needed.
:param context: security context
:param secondary_backend_id: Specifies backend_id to fail over to
"""
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
'cinder-volume')
volumes = objects.VolumeList.get_all_by_host(context, self.host)
exception_encountered = False
try:
# expected form of volume_update_list:
# [{volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}},
# {volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}}]
(active_backend_id, volume_update_list) = (
self.driver.failover_host(
context,
volumes,
secondary_id=secondary_backend_id))
except exception.UnableToFailOver:
LOG.exception(_LE("Failed to perform replication failover"))
service.replication_status = (
fields.ReplicationStatus.FAILOVER_ERROR)
service.save()
exception_encountered = True
except exception.InvalidReplicationTarget:
LOG.exception(_LE("Invalid replication target specified "
"for failover"))
# Preserve the replication_status
if secondary_backend_id == "default":
service.replication_status = (
fields.ReplicationStatus.FAILED_OVER)
else:
service.replication_status = fields.ReplicationStatus.ENABLED
service.save()
exception_encountered = True
except exception.VolumeDriverException:
# NOTE(jdg): Drivers need to be aware if they fail during
# a failover sequence, we're expecting them to cleanup
# and make sure the driver state is such that the original
# backend is still set as primary as per driver memory
LOG.error(_LE("Driver reported error during "
"replication failover."))
service.status = 'error'
service.save()
exception_encountered = True
if exception_encountered:
LOG.error(
_LE("Error encountered during failover on host: "
"%(host)s invalid target ID %(backend_id)s"),
{'host': self.host, 'backend_id':
secondary_backend_id})
return
if secondary_backend_id == "default":
service.replication_status = fields.ReplicationStatus.ENABLED
service.active_backend_id = ""
if service.frozen:
service.disabled = True
service.disabled_reason = "frozen"
else:
service.disabled = False
service.disabled_reason = ""
service.save()
else:
service.replication_status = fields.ReplicationStatus.FAILED_OVER
service.active_backend_id = active_backend_id
service.disabled = True
service.disabled_reason = "failed-over"
service.save()
for update in volume_update_list:
# Response must include an id key: {volume_id: <cinder-uuid>}
if not update.get('volume_id'):
raise exception.UnableToFailOver(
reason=_("Update list, doesn't include volume_id"))
# Key things to consider (attaching failed-over volumes):
# provider_location
# provider_auth
# provider_id
# replication_status
vobj = objects.Volume.get_by_id(context, update['volume_id'])
vobj.update(update.get('updates', {}))
vobj.save()
LOG.info(_LI("Failed over to replication target successfully."))
def freeze_host(self, context):
"""Freeze management plane on this backend.
Basically puts the control/management plane into a
Read Only state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.freeze_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): In the case of freeze, we don't really
# need the backend's consent or anything, we'll just
# disable the service, so we can just log this and
# go about our business
LOG.warning(_LW('Error encountered on Cinder backend during '
'freeze operation, service is frozen, however '
'notification to driver has failed.'))
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
'cinder-volume')
service.disabled = True
service.disabled_reason = "frozen"
service.save()
LOG.info(_LI("Set backend status to frozen successfully."))
return True
def thaw_host(self, context):
"""UnFreeze management plane on this backend.
Basically puts the control/management plane back into
a normal state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.thaw_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): Thaw actually matters, if this call
# to the backend fails, we're stuck and can't re-enable
LOG.error(_LE('Error encountered on Cinder backend during '
'thaw operation, service will remain frozen.'))
return False
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
'cinder-volume')
service.disabled = False
service.disabled_reason = ""
service.save()
LOG.info(_LI("Thawed backend successfully."))
return True
def manage_existing_snapshot(self, ctxt, snapshot, ref=None):
LOG.debug('manage_existing_snapshot: managing %s.', ref)
try:
flow_engine = manage_existing_snapshot.get_flow(
ctxt,
self.db,
self.driver,
self.host,
snapshot.id,
ref)
except Exception:
msg = _LE("Failed to create manage_existing flow: "
"%(object_type)s %(object_id)s.")
LOG.exception(msg, {'object_type': 'snapshot',
'object_id': snapshot.id})
raise exception.CinderException(
_("Failed to create manage existing flow."))
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
return snapshot.id
def get_capabilities(self, context, discover):
"""Get capabilities of backend storage."""
if discover:
self.driver.init_capabilities()
capabilities = self.driver.capabilities
LOG.debug("Obtained capabilities list: %s.", capabilities)
return capabilities
def get_backup_device(self, ctxt, backup):
(backup_device, is_snapshot) = (
self.driver.get_backup_device(ctxt, backup))
secure_enabled = self.driver.secure_file_operations_enabled()
backup_device_dict = {'backup_device': backup_device,
'secure_enabled': secure_enabled,
'is_snapshot': is_snapshot, }
return backup_device_dict
def secure_file_operations_enabled(self, ctxt, volume):
secure_enabled = self.driver.secure_file_operations_enabled()
return secure_enabled
|
def _set_voldb_empty_at_startup_indicator(self, ctxt):
"""Determine if the Cinder volume DB is empty.
A check of the volume DB is done to determine whether it is empty or
not at this point.
:param ctxt: our working context
"""
vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None)
if len(vol_entries) == 0:
LOG.info(_LI("Determined volume DB was empty at startup."))
return True
else:
LOG.info(_LI("Determined volume DB was not empty at startup."))
return False
| 362 | 377 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume manager manages creating, attaching, detaching, and persistent storage.
Persistent storage volumes keep their state independent of instances. You can
attach to an instance, terminate the instance, spawn a new instance (even
one from a different image) and re-attach the volume with the same data
intact.
**Related Flags**
:volume_topic: What :mod:`rpc` topic to listen to (default: `cinder-volume`).
:volume_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.volume.manager.Manager`).
:volume_driver: Used by :class:`Manager`. Defaults to
:class:`cinder.volume.drivers.lvm.LVMVolumeDriver`.
:volume_group: Name of the group that will contain exported volumes (default:
`cinder-volumes`)
:num_shell_tries: Number of times to attempt to run commands (default: 3)
"""
import requests
import time
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
profiler = importutils.try_import('osprofiler.profiler')
import six
from taskflow import exceptions as tfe
from cinder import compute
from cinder import context
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import cache as image_cache
from cinder.image import glance
from cinder import manager
from cinder.message import api as message_api
from cinder.message import defined_messages
from cinder.message import resource_types
from cinder import objects
from cinder.objects import fields
from cinder import quota
from cinder import utils
from cinder import volume as cinder_volume
from cinder.volume import configuration as config
from cinder.volume.flows.manager import create_volume
from cinder.volume.flows.manager import manage_existing
from cinder.volume.flows.manager import manage_existing_snapshot
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
CGQUOTAS = quota.CGQUOTAS
VALID_REMOVE_VOL_FROM_CG_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_ADD_VOL_TO_CG_STATUS = (
'available',
'in-use')
VALID_CREATE_CG_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
VALID_CREATE_CG_SRC_CG_STATUS = ('available',)
volume_manager_opts = [
cfg.StrOpt('volume_driver',
default='cinder.volume.drivers.lvm.LVMVolumeDriver',
help='Driver to use for volume creation'),
cfg.IntOpt('migration_create_volume_timeout_secs',
default=300,
help='Timeout for creating the volume to migrate to '
'when performing volume migration (seconds)'),
cfg.BoolOpt('volume_service_inithost_offload',
default=False,
help='Offload pending volume delete during '
'volume service startup'),
cfg.StrOpt('zoning_mode',
help='FC Zoning mode configured'),
cfg.StrOpt('extra_capabilities',
default='{}',
help='User defined capabilities, a JSON formatted string '
'specifying key/value pairs. The key/value pairs can '
'be used by the CapabilitiesFilter to select between '
'backends when requests specify volume types. For '
'example, specifying a service level or the geographical '
'location of a backend, then creating a volume type to '
'allow the user to select by these different '
'properties.'),
cfg.BoolOpt('suppress_requests_ssl_warnings',
default=False,
help='Suppress requests library SSL certificate warnings.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_manager_opts)
MAPPING = {
'cinder.volume.drivers.huawei.huawei_18000.Huawei18000ISCSIDriver':
'cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver',
'cinder.volume.drivers.huawei.huawei_driver.Huawei18000ISCSIDriver':
'cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver',
'cinder.volume.drivers.huawei.huawei_18000.Huawei18000FCDriver':
'cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver',
'cinder.volume.drivers.huawei.huawei_driver.Huawei18000FCDriver':
'cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver',
'cinder.volume.drivers.fujitsu_eternus_dx_fc.FJDXFCDriver':
'cinder.volume.drivers.fujitsu.eternus_dx_fc.FJDXFCDriver',
'cinder.volume.drivers.fujitsu_eternus_dx_iscsi.FJDXISCSIDriver':
'cinder.volume.drivers.fujitsu.eternus_dx_iscsi.FJDXISCSIDriver',
'cinder.volume.drivers.hds.nfs.HDSNFSDriver':
'cinder.volume.drivers.hitachi.hnas_nfs.HDSNFSDriver',
'cinder.volume.drivers.hds.iscsi.HDSISCSIDriver':
'cinder.volume.drivers.hitachi.hnas_iscsi.HDSISCSIDriver',
'cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver':
'cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver',
'cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver':
'cinder.volume.drivers.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver',
'cinder.volume.drivers.san.hp.hp_lefthand_iscsi.HPLeftHandISCSIDriver':
'cinder.volume.drivers.hpe.hpe_lefthand_iscsi.HPELeftHandISCSIDriver',
'cinder.volume.drivers.san.hp.hp_xp_fc.HPXPFCDriver':
'cinder.volume.drivers.hpe.hpe_xp_fc.HPEXPFCDriver',
}
def locked_volume_operation(f):
"""Lock decorator for volume operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the volume. This lock can then be used
by other operations to avoid operation conflicts on shared volumes.
Example use:
If a volume operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
volume e.g. delete VolA while create volume VolB from VolA is in progress.
"""
def lvo_inner1(inst, context, volume_id, **kwargs):
@utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True)
def lvo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lvo_inner2(inst, context, volume_id, **kwargs)
return lvo_inner1
def locked_detach_operation(f):
"""Lock decorator for volume detach operations.
Takes a named lock prior to executing the detach call. The lock is named
with the operation executed and the id of the volume. This lock can then
be used by other operations to avoid operation conflicts on shared volumes.
This locking mechanism is only for detach calls. We can't use the
locked_volume_operation, because detach requires an additional
attachment_id in the parameter list.
"""
def ldo_inner1(inst, context, volume_id, attachment_id=None, **kwargs):
@utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True)
def ldo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return ldo_inner2(inst, context, volume_id, attachment_id, **kwargs)
return ldo_inner1
def locked_snapshot_operation(f):
"""Lock decorator for snapshot operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the snapshot. This lock can then be
used by other operations to avoid operation conflicts on shared snapshots.
Example use:
If a snapshot operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
snapshot e.g. delete SnapA while create volume VolA from SnapA is in
progress.
"""
def lso_inner1(inst, context, snapshot, **kwargs):
@utils.synchronized("%s-%s" % (snapshot.id, f.__name__), external=True)
def lso_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lso_inner2(inst, context, snapshot, **kwargs)
return lso_inner1
class VolumeManager(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = '2.0'
target = messaging.Target(version=RPC_API_VERSION)
# On cloning a volume, we shouldn't copy volume_type, consistencygroup
# and volume_attachment, because the db sets that according to [field]_id,
# which we do copy. We also skip some other values that are set during
# creation of Volume object.
_VOLUME_CLONE_SKIP_PROPERTIES = {
'id', '_name_id', 'name_id', 'name', 'status',
'attach_status', 'migration_status', 'volume_type',
'consistencygroup', 'volume_attachment'}
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_name='volume',
*args, **kwargs)
self.configuration = config.Configuration(volume_manager_opts,
config_group=service_name)
self.stats = {}
if not volume_driver:
# Get from configuration, which will get the default
# if its not using the multi backend
volume_driver = self.configuration.volume_driver
if volume_driver in MAPPING:
LOG.warning(_LW("Driver path %s is deprecated, update your "
"configuration to the new path."), volume_driver)
volume_driver = MAPPING[volume_driver]
vol_db_empty = self._set_voldb_empty_at_startup_indicator(
context.get_admin_context())
LOG.debug("Cinder Volume DB check: vol_db_empty=%s", vol_db_empty)
# We pass the current setting for service.active_backend_id to
# the driver on init, incase there was a restart or something
curr_active_backend_id = None
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
service = objects.Service.get_by_args(
context.get_admin_context(),
svc_host,
'cinder-volume')
except exception.ServiceNotFound:
# NOTE(jdg): This is to solve problems with unit tests
LOG.info(_LI("Service not found for updating "
"active_backend_id, assuming default "
"for driver init."))
else:
curr_active_backend_id = service.active_backend_id
if self.configuration.suppress_requests_ssl_warnings:
LOG.warning(_LW("Suppressing requests library SSL Warnings"))
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecurePlatformWarning)
self.driver = importutils.import_object(
volume_driver,
configuration=self.configuration,
db=self.db,
host=self.host,
is_vol_db_empty=vol_db_empty,
active_backend_id=curr_active_backend_id)
self.message_api = message_api.API()
if CONF.profiler.enabled and profiler is not None:
self.driver = profiler.trace_cls("driver")(self.driver)
try:
self.extra_capabilities = jsonutils.loads(
self.driver.configuration.extra_capabilities)
except AttributeError:
self.extra_capabilities = {}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Invalid JSON: %s"),
self.driver.configuration.extra_capabilities)
if self.driver.configuration.safe_get(
'image_volume_cache_enabled'):
max_cache_size = self.driver.configuration.safe_get(
'image_volume_cache_max_size_gb')
max_cache_entries = self.driver.configuration.safe_get(
'image_volume_cache_max_count')
self.image_volume_cache = image_cache.ImageVolumeCache(
self.db,
cinder_volume.API(),
max_cache_size,
max_cache_entries
)
LOG.info(_LI('Image-volume cache enabled for host %(host)s.'),
{'host': self.host})
else:
LOG.info(_LI('Image-volume cache disabled for host %(host)s.'),
{'host': self.host})
self.image_volume_cache = None
def _count_allocated_capacity(self, ctxt, volume):
pool = vol_utils.extract_host(volume['host'], 'pool')
if pool is None:
# No pool name encoded in host, so this is a legacy
# volume created before pool is introduced, ask
# driver to provide pool info if it has such
# knowledge and update the DB.
try:
pool = self.driver.get_pool(volume)
except Exception:
LOG.exception(_LE('Fetch volume pool name failed.'),
resource=volume)
return
if pool:
new_host = vol_utils.append_host(volume['host'],
pool)
self.db.volume_update(ctxt, volume['id'],
{'host': new_host})
else:
# Otherwise, put them into a special fixed pool with
# volume_backend_name being the pool name, if
# volume_backend_name is None, use default pool name.
# This is only for counting purpose, doesn't update DB.
pool = (self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume['host'], 'pool', True))
try:
pool_stat = self.stats['pools'][pool]
except KeyError:
# First volume in the pool
self.stats['pools'][pool] = dict(
allocated_capacity_gb=0)
pool_stat = self.stats['pools'][pool]
pool_sum = pool_stat['allocated_capacity_gb']
pool_sum += volume['size']
self.stats['pools'][pool]['allocated_capacity_gb'] = pool_sum
self.stats['allocated_capacity_gb'] += volume['size']
def _set_voldb_empty_at_startup_indicator(self, ctxt):
"""Determine if the Cinder volume DB is empty.
A check of the volume DB is done to determine whether it is empty or
not at this point.
:param ctxt: our working context
"""
vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None)
if len(vol_entries) == 0:
LOG.info(_LI("Determined volume DB was empty at startup."))
return True
else:
LOG.info(_LI("Determined volume DB was not empty at startup."))
return False
def _sync_provider_info(self, ctxt, volumes, snapshots):
# NOTE(jdg): For now this just updates provider_id, we can add more
# add more items to the update if theyr'e releveant but we need
# to be safe in what we allow and add a list of allowed keys
# things that make sense are provider_*, replication_status etc
updates, snapshot_updates = self.driver.update_provider_info(
volumes, snapshots)
if updates:
for volume in volumes:
# NOTE(JDG): Make sure returned item is in this hosts volumes
update = (
[updt for updt in updates if updt['id'] ==
volume['id']][0])
if update:
self.db.volume_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
# NOTE(jdg): snapshots are slighty harder, because
# we do not have a host column and of course no get
# all by host, so we use a get_all and bounce our
# response off of it
if snapshot_updates:
cinder_snaps = self.db.snapshot_get_all(ctxt)
for snap in cinder_snaps:
# NOTE(jdg): For now we only update those that have no entry
if not snap.get('provider_id', None):
update = (
[updt for updt in snapshot_updates if updt['id'] ==
snap['id']][0])
if update:
self.db.snapshot_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
def init_host(self):
"""Perform any required initialization."""
ctxt = context.get_admin_context()
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception:
LOG.exception(_LE("Failed to initialize driver."),
resource={'type': 'driver',
'id': self.__class__.__name__})
# we don't want to continue since we failed
# to initialize the driver correctly.
return
# Initialize backend capabilities list
self.driver.init_capabilities()
volumes = objects.VolumeList.get_all_by_host(ctxt, self.host)
snapshots = self.db.snapshot_get_by_host(ctxt, self.host)
self._sync_provider_info(ctxt, volumes, snapshots)
# FIXME volume count for exporting is wrong
try:
self.stats['pools'] = {}
self.stats.update({'allocated_capacity_gb': 0})
for volume in volumes:
# available volume should also be counted into allocated
if volume['status'] in ['in-use', 'available']:
# calculate allocated capacity for driver
self._count_allocated_capacity(ctxt, volume)
try:
if volume['status'] in ['in-use']:
self.driver.ensure_export(ctxt, volume)
except Exception:
LOG.exception(_LE("Failed to re-export volume, "
"setting to ERROR."),
resource=volume)
volume.status = 'error'
volume.save()
elif volume['status'] in ('downloading', 'creating'):
LOG.warning(_LW("Detected volume stuck "
"in %(curr_status)s "
"status, setting to ERROR."),
{'curr_status': volume['status']},
resource=volume)
if volume['status'] == 'downloading':
self.driver.clear_download(ctxt, volume)
volume.status = 'error'
volume.save()
elif volume.status == 'uploading':
# Set volume status to available or in-use.
self.db.volume_update_status_based_on_attachment(
ctxt, volume.id)
else:
pass
snapshots = objects.SnapshotList.get_by_host(
ctxt, self.host, {'status': fields.SnapshotStatus.CREATING})
for snapshot in snapshots:
LOG.warning(_LW("Detected snapshot stuck in creating "
"status, setting to ERROR."), resource=snapshot)
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
except Exception:
LOG.exception(_LE("Error during re-export on driver init."),
resource=volume)
return
self.driver.set_throttle()
# at this point the driver is considered initialized.
# NOTE(jdg): Careful though because that doesn't mean
# that an entry exists in the service table
self.driver.set_initialized()
for volume in volumes:
if volume['status'] == 'deleting':
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
# from being blocked.
self._add_to_threadpool(self.delete_volume, ctxt,
volume['id'], volume=volume)
else:
# By default, delete volumes sequentially
self.delete_volume(ctxt, volume['id'], volume=volume)
LOG.info(_LI("Resume volume delete completed successfully."),
resource=volume)
# collect and publish service capabilities
self.publish_service_capabilities(ctxt)
LOG.info(_LI("Driver initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
def init_host_with_rpc(self):
LOG.info(_LI("Initializing RPC dependent components of volume "
"driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
stats = self.driver.get_volume_stats(refresh=True)
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
service = objects.Service.get_by_args(
context.get_admin_context(),
svc_host,
'cinder-volume')
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Service not found for updating "
"replication_status."))
if service.replication_status != (
fields.ReplicationStatus.FAILED_OVER):
if stats and stats.get('replication_enabled', False):
service.replication_status = fields.ReplicationStatus.ENABLED
else:
service.replication_status = fields.ReplicationStatus.DISABLED
service.save()
LOG.info(_LI("Driver post RPC initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
def is_working(self):
"""Return if Manager is ready to accept requests.
This is to inform Service class that in case of volume driver
initialization failure the manager is actually down and not ready to
accept any requests.
"""
return self.driver.initialized
def create_volume(self, context, volume_id, request_spec=None,
filter_properties=None, allow_reschedule=True,
volume=None):
"""Creates the volume."""
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
context_elevated = context.elevated()
if filter_properties is None:
filter_properties = {}
if request_spec is None:
request_spec = {}
try:
# NOTE(flaper87): Driver initialization is
# verified by the task itself.
flow_engine = create_volume.get_flow(
context_elevated,
self,
self.db,
self.driver,
self.scheduler_rpcapi,
self.host,
volume.id,
allow_reschedule,
context,
request_spec,
filter_properties,
image_volume_cache=self.image_volume_cache,
)
except Exception:
msg = _("Create manager volume flow failed.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
raise exception.CinderException(msg)
snapshot_id = request_spec.get('snapshot_id')
source_volid = request_spec.get('source_volid')
source_replicaid = request_spec.get('source_replicaid')
if snapshot_id is not None:
# Make sure the snapshot is not deleted until we are done with it.
locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot')
elif source_volid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_volid, 'delete_volume')
elif source_replicaid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_replicaid, 'delete_volume')
else:
locked_action = None
def _run_flow():
# This code executes create volume flow. If something goes wrong,
# flow reverts all job that was done and reraises an exception.
# Otherwise, all data that was generated by flow becomes available
# in flow engine's storage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
@utils.synchronized(locked_action, external=True)
def _run_flow_locked():
_run_flow()
# NOTE(dulek): Flag to indicate if volume was rescheduled. Used to
# decide if allocated_capacity should be incremented.
rescheduled = False
vol_ref = None
try:
if locked_action is None:
_run_flow()
else:
_run_flow_locked()
finally:
try:
vol_ref = flow_engine.storage.fetch('volume_ref')
except tfe.NotFound:
# If there's no vol_ref, then flow is reverted. Lets check out
# if rescheduling occurred.
try:
rescheduled = flow_engine.storage.get_revert_result(
create_volume.OnFailureRescheduleTask.make_name(
[create_volume.ACTION]))
except tfe.NotFound:
pass
if not rescheduled:
if not vol_ref:
# Flow was reverted and not rescheduled, fetching
# volume_ref from the DB, because it will be needed.
vol_ref = objects.Volume.get_by_id(context, volume.id)
# NOTE(dulek): Volume wasn't rescheduled so we need to update
# volume stats as these are decremented on delete.
self._update_allocated_capacity(vol_ref)
LOG.info(_LI("Created volume successfully."), resource=vol_ref)
return vol_ref.id
@locked_volume_operation
def delete_volume(self, context, volume_id,
unmanage_only=False,
volume=None,
cascade=False):
"""Deletes and unexports volume.
1. Delete a volume(normal case)
Delete a volume and update quotas.
2. Delete a migration volume
If deleting the volume in a migration, we want to skip
quotas but we need database updates for the volume.
"""
context = context.elevated()
try:
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
volume = objects.Volume.get_by_id(context, volume_id)
else:
volume.refresh()
except exception.VolumeNotFound:
# NOTE(thingee): It could be possible for a volume to
# be deleted when resuming deletes from init_host().
LOG.debug("Attempted delete of non-existent volume: %s", volume_id)
return
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
if volume['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_id)
if vol_utils.extract_host(volume.host) != self.host:
raise exception.InvalidVolume(
reason=_("volume is not local to this node"))
if unmanage_only and cascade:
# This could be done, but is ruled out for now just
# for simplicity.
raise exception.Invalid(
reason=_("Unmanage and cascade delete options "
"are mutually exclusive."))
# The status 'deleting' is not included, because it only applies to
# the source volume to be deleted after a migration. No quota
# needs to be handled for it.
is_migrating = volume.migration_status not in (None, 'error',
'success')
is_migrating_dest = (is_migrating and
volume.migration_status.startswith(
'target:'))
self._notify_about_volume_usage(context, volume, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context, volume)
if unmanage_only:
self.driver.unmanage(volume)
elif cascade:
LOG.debug('Performing cascade delete.')
snapshots = objects.SnapshotList.get_all_for_volume(context,
volume.id)
for s in snapshots:
if s.status != 'deleting':
self._clear_db(context, is_migrating_dest, volume,
'error_deleting')
msg = (_("Snapshot %(id)s was found in state "
"%(state)s rather than 'deleting' during "
"cascade delete.") % {'id': s.id,
'state': s.status})
raise exception.InvalidSnapshot(reason=msg)
self.delete_snapshot(context, s)
LOG.debug('Snapshots deleted, issuing volume delete')
self.driver.delete_volume(volume)
else:
self.driver.delete_volume(volume)
except exception.VolumeIsBusy:
LOG.error(_LE("Unable to delete busy volume."),
resource=volume)
# If this is a destination volume, we have to clear the database
# record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume,
'available')
return
except Exception:
with excutils.save_and_reraise_exception():
# If this is a destination volume, we have to clear the
# database record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume,
'error_deleting')
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
# Get reservations
try:
reserve_opts = {'volumes': -1,
'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting volume."),
resource=volume)
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
volume.destroy()
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
self._notify_about_volume_usage(context, volume, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
pool = vol_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume.host, 'pool', True)
size = volume.size
try:
self.stats['pools'][pool]['allocated_capacity_gb'] -= size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=-size)
self.publish_service_capabilities(context)
LOG.info(_LI("Deleted volume successfully."), resource=volume)
def _clear_db(self, context, is_migrating_dest, volume_ref, status):
# This method is called when driver.unmanage() or
# driver.delete_volume() fails in delete_volume(), so it is already
# in the exception handling part.
if is_migrating_dest:
volume_ref.destroy()
LOG.error(_LE("Unable to delete the destination volume "
"during volume migration, (NOTE: database "
"record needs to be deleted)."), resource=volume_ref)
else:
volume_ref.status = status
volume_ref.save()
def create_snapshot(self, context, volume_id, snapshot):
"""Creates and exports the snapshot."""
context = context.elevated()
self._notify_about_snapshot_usage(
context, snapshot, "create.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
model_update = self.driver.create_snapshot(snapshot)
if model_update:
snapshot.update(model_update)
snapshot.save()
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot.id, volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.exception(_LE("Failed updating snapshot"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id}, resource=snapshot)
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.progress = '100%'
snapshot.save()
self._notify_about_snapshot_usage(context, snapshot, "create.end")
LOG.info(_LI("Create snapshot completed successfully"),
resource=snapshot)
return snapshot.id
@locked_snapshot_operation
def delete_snapshot(self, context, snapshot, unmanage_only=False):
"""Deletes and unexports snapshot."""
context = context.elevated()
snapshot._context = context
project_id = snapshot.project_id
self._notify_about_snapshot_usage(
context, snapshot, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
snapshot.save()
if unmanage_only:
self.driver.unmanage_snapshot(snapshot)
else:
self.driver.delete_snapshot(snapshot)
except exception.SnapshotIsBusy:
LOG.error(_LE("Delete snapshot failed, due to snapshot busy."),
resource=snapshot)
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.save()
return
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR_DELETING
snapshot.save()
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot.volume_size,
}
volume_ref = self.db.volume_get(context, snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Update snapshot usages failed."),
resource=snapshot)
self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id)
snapshot.destroy()
self._notify_about_snapshot_usage(context, snapshot, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
LOG.info(_LI("Delete snapshot completed successfully"),
resource=snapshot)
def attach_volume(self, context, volume_id, instance_uuid, host_name,
mountpoint, mode):
"""Updates db to show volume is attached."""
@utils.synchronized(volume_id, external=True)
def do_attach():
# check the volume status before attaching
volume = self.db.volume_get(context, volume_id)
volume_metadata = self.db.volume_admin_metadata_get(
context.elevated(), volume_id)
if volume['status'] == 'attaching':
if (volume_metadata.get('attached_mode') and
volume_metadata.get('attached_mode') != mode):
raise exception.InvalidVolume(
reason=_("being attached by different mode"))
if (volume['status'] == 'in-use' and not volume['multiattach']
and not volume['migration_status']):
raise exception.InvalidVolume(
reason=_("volume is already attached"))
host_name_sanitized = utils.sanitize_hostname(
host_name) if host_name else None
if instance_uuid:
attachments = \
self.db.volume_attachment_get_all_by_instance_uuid(
context, volume_id, instance_uuid)
else:
attachments = (
self.db.volume_attachment_get_all_by_host(
context,
volume_id,
host_name_sanitized))
if attachments:
self.db.volume_update(context, volume_id,
{'status': 'in-use'})
return
self._notify_about_volume_usage(context, volume,
"attach.start")
values = {'volume_id': volume_id,
'attach_status': 'attaching', }
attachment = self.db.volume_attach(context.elevated(), values)
volume_metadata = self.db.volume_admin_metadata_update(
context.elevated(), volume_id,
{"attached_mode": mode}, False)
attachment_id = attachment['id']
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
self.db.volume_attachment_update(context, attachment_id,
{'attach_status':
'error_attaching'})
raise exception.InvalidUUID(uuid=instance_uuid)
volume = self.db.volume_get(context, volume_id)
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
self.db.volume_update(context, volume_id,
{'status': 'error_attaching'})
self.message_api.create(
context, defined_messages.ATTACH_READONLY_VOLUME,
context.project_id, resource_type=resource_types.VOLUME,
resource_uuid=volume_id)
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.debug('Attaching volume %(volume_id)s to instance '
'%(instance)s at mountpoint %(mount)s on host '
'%(host)s.',
{'volume_id': volume_id, 'instance': instance_uuid,
'mount': mountpoint, 'host': host_name_sanitized},
resource=volume)
self.driver.attach_volume(context,
volume,
instance_uuid,
host_name_sanitized,
mountpoint)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment_id,
{'attach_status': 'error_attaching'})
volume = self.db.volume_attached(context.elevated(),
attachment_id,
instance_uuid,
host_name_sanitized,
mountpoint,
mode)
self._notify_about_volume_usage(context, volume, "attach.end")
LOG.info(_LI("Attach volume completed successfully."),
resource=volume)
return self.db.volume_attachment_get(context, attachment_id)
return do_attach()
@locked_detach_operation
def detach_volume(self, context, volume_id, attachment_id=None):
"""Updates db to show volume is detached."""
# TODO(vish): refactor this into a more general "unreserve"
volume = self.db.volume_get(context, volume_id)
attachment = None
if attachment_id:
try:
attachment = self.db.volume_attachment_get(context,
attachment_id)
except exception.VolumeAttachmentNotFound:
LOG.info(_LI("Volume detach called, but volume not attached."),
resource=volume)
# We need to make sure the volume status is set to the correct
# status. It could be in detaching status now, and we don't
# want to leave it there.
self.db.volume_detached(context, volume_id, attachment_id)
return
else:
# We can try and degrade gracefully here by trying to detach
# a volume without the attachment_id here if the volume only has
# one attachment. This is for backwards compatibility.
attachments = self.db.volume_attachment_get_all_by_volume_id(
context, volume_id)
if len(attachments) > 1:
# There are more than 1 attachments for this volume
# we have to have an attachment id.
msg = _("Detach volume failed: More than one attachment, "
"but no attachment_id provided.")
LOG.error(msg, resource=volume)
raise exception.InvalidVolume(reason=msg)
elif len(attachments) == 1:
attachment = attachments[0]
else:
# there aren't any attachments for this volume.
# so set the status to available and move on.
LOG.info(_LI("Volume detach called, but volume not attached."),
resource=volume)
self.db.volume_update(context, volume_id,
{'status': 'available',
'attach_status': 'detached'})
return
self._notify_about_volume_usage(context, volume, "detach.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.debug('Detaching volume %(volume_id)s from instance '
'%(instance)s.',
{'volume_id': volume_id,
'instance': attachment.get('instance_uuid')},
resource=volume)
self.driver.detach_volume(context, volume, attachment)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment.get('id'),
{'attach_status': 'error_detaching'})
self.db.volume_detached(context.elevated(), volume_id,
attachment.get('id'))
self.db.volume_admin_metadata_delete(context.elevated(), volume_id,
'attached_mode')
# NOTE(jdg): We used to do an ensure export here to
# catch upgrades while volumes were attached (E->F)
# this was necessary to convert in-use volumes from
# int ID's to UUID's. Don't need this any longer
# We're going to remove the export here
# (delete the iscsi target)
volume = self.db.volume_get(context, volume_id)
try:
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context.elevated(), volume)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Detach volume failed, due to "
"uninitialized driver."),
resource=volume)
except Exception as ex:
LOG.exception(_LE("Detach volume failed, due to "
"remove-export failure."),
resource=volume)
raise exception.RemoveExportException(volume=volume_id,
reason=six.text_type(ex))
self._notify_about_volume_usage(context, volume, "detach.end")
LOG.info(_LI("Detach volume completed successfully."), resource=volume)
def _create_image_cache_volume_entry(self, ctx, volume_ref,
image_id, image_meta):
"""Create a new image-volume and cache entry for it.
This assumes that the image has already been downloaded and stored
in the volume described by the volume_ref.
"""
image_volume = None
try:
if not self.image_volume_cache.ensure_space(
ctx,
volume_ref['size'],
volume_ref['host']):
LOG.warning(_LW('Unable to ensure space for image-volume in'
' cache. Will skip creating entry for image'
' %(image)s on host %(host)s.'),
{'image': image_id, 'host': volume_ref['host']})
return
image_volume = self._clone_image_volume(ctx,
volume_ref,
image_meta)
if not image_volume:
LOG.warning(_LW('Unable to clone image_volume for image '
'%(image_id)s will not create cache entry.'),
{'image_id': image_id})
return
self.image_volume_cache.create_cache_entry(
ctx,
image_volume,
image_id,
image_meta
)
except exception.CinderException as e:
LOG.warning(_LW('Failed to create new image-volume cache entry.'
' Error: %(exception)s'), {'exception': e})
if image_volume:
self.delete_volume(ctx, image_volume.id)
def _clone_image_volume(self, ctx, volume, image_meta):
volume_type_id = volume.get('volume_type_id')
reserve_opts = {'volumes': 1, 'gigabytes': volume.size}
QUOTAS.add_volume_type_opts(ctx, reserve_opts, volume_type_id)
reservations = QUOTAS.reserve(ctx, **reserve_opts)
try:
new_vol_values = {k: volume[k] for k in set(volume.keys()) -
self._VOLUME_CLONE_SKIP_PROPERTIES}
new_vol_values['volume_type_id'] = volume_type_id
new_vol_values['attach_status'] = 'detached'
new_vol_values['status'] = 'creating'
new_vol_values['project_id'] = ctx.project_id
new_vol_values['display_name'] = 'image-%s' % image_meta['id']
new_vol_values['source_volid'] = volume.id
LOG.debug('Creating image volume entry: %s.', new_vol_values)
image_volume = objects.Volume(context=ctx, **new_vol_values)
image_volume.create()
except Exception as ex:
LOG.exception(_LE('Create clone_image_volume: %(volume_id)s'
'for image %(image_id)s, '
'failed (Exception: %(except)s)'),
{'volume_id': volume.id,
'image_id': image_meta['id'],
'except': ex})
QUOTAS.rollback(ctx, reservations)
return
QUOTAS.commit(ctx, reservations,
project_id=new_vol_values['project_id'])
try:
self.create_volume(ctx, image_volume.id,
allow_reschedule=False, volume=image_volume)
image_volume = self.db.volume_get(ctx, image_volume.id)
if image_volume.status != 'available':
raise exception.InvalidVolume(_('Volume is not available.'))
self.db.volume_admin_metadata_update(ctx.elevated(),
image_volume.id,
{'readonly': 'True'},
False)
return image_volume
except exception.CinderException:
LOG.exception(_LE('Failed to clone volume %(volume_id)s for '
'image %(image_id)s.'),
{'volume_id': volume.id,
'image_id': image_meta['id']})
try:
self.delete_volume(ctx, image_volume.id)
except exception.CinderException:
LOG.exception(_LE('Could not delete the image volume %(id)s.'),
{'id': volume.id})
return
def _clone_image_volume_and_add_location(self, ctx, volume, image_service,
image_meta):
"""Create a cloned volume and register its location to the image."""
if (image_meta['disk_format'] != 'raw' or
image_meta['container_format'] != 'bare'):
return False
image_volume_context = ctx
if self.driver.configuration.image_upload_use_internal_tenant:
internal_ctx = context.get_internal_tenant_context()
if internal_ctx:
image_volume_context = internal_ctx
image_volume = self._clone_image_volume(image_volume_context,
volume,
image_meta)
if not image_volume:
return False
uri = 'cinder://%s' % image_volume.id
image_registered = None
try:
image_registered = image_service.add_location(
ctx, image_meta['id'], uri, {})
except (exception.NotAuthorized, exception.Invalid,
exception.NotFound):
LOG.exception(_LE('Failed to register image volume location '
'%(uri)s.'), {'uri': uri})
if not image_registered:
LOG.warning(_LW('Registration of image volume URI %(uri)s '
'to image %(image_id)s failed.'),
{'uri': uri, 'image_id': image_meta['id']})
try:
self.delete_volume(image_volume_context, image_volume)
except exception.CinderException:
LOG.exception(_LE('Could not delete failed image volume '
'%(id)s.'), {'id': image_volume.id})
return False
image_volume_meta = {'glance_image_id': image_meta['id'],
'image_owner': ctx.project_id}
self.db.volume_metadata_update(image_volume_context,
image_volume.id,
image_volume_meta,
False)
return True
def copy_volume_to_image(self, context, volume_id, image_meta):
"""Uploads the specified volume to Glance.
image_meta is a dictionary containing the following keys:
'id', 'container_format', 'disk_format'
"""
payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
image_service = None
try:
volume = self.db.volume_get(context, volume_id)
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
image_service, image_id = \
glance.get_remote_image_service(context, image_meta['id'])
if (self.driver.configuration.image_upload_use_cinder_backend
and self._clone_image_volume_and_add_location(
context, volume, image_service, image_meta)):
LOG.debug("Registered image volume location to glance "
"image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
else:
self.driver.copy_volume_to_image(context, volume,
image_service, image_meta)
LOG.debug("Uploaded volume to glance image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
except Exception as error:
LOG.error(_LE("Upload volume to image encountered an error "
"(image-id: %(image_id)s)."),
{'image_id': image_meta['id']},
resource=volume)
if image_service is not None:
# Deletes the image if it is in queued or saving state
self._delete_image(context, image_meta['id'], image_service)
with excutils.save_and_reraise_exception():
payload['message'] = six.text_type(error)
if isinstance(error, exception.ImageLimitExceeded):
self.message_api.create(
context, defined_messages.IMAGE_FROM_VOLUME_OVER_QUOTA,
context.project_id,
resource_type=resource_types.VOLUME,
resource_uuid=volume_id)
finally:
self.db.volume_update_status_based_on_attachment(context,
volume_id)
LOG.info(_LI("Copy volume to image completed successfully."),
resource=volume)
def _delete_image(self, context, image_id, image_service):
"""Deletes an image stuck in queued or saving state."""
try:
image_meta = image_service.show(context, image_id)
image_status = image_meta.get('status')
if image_status == 'queued' or image_status == 'saving':
LOG.warning(_LW("Deleting image in unexpected status: "
"%(image_status)s."),
{'image_status': image_status},
resource={'type': 'image', 'id': image_id})
image_service.delete(context, image_id)
except Exception:
LOG.warning(_LW("Image delete encountered an error."),
exc_info=True, resource={'type': 'image',
'id': image_id})
def initialize_connection(self, context, volume_id, connector):
"""Prepare volume for connection from host represented by connector.
This method calls the driver initialize_connection and returns
it to the caller. The connector parameter is a dictionary with
information about the host that will connect to the volume in the
following format::
{
'ip': ip,
'initiator': initiator,
}
ip: the ip address of the connecting machine
initiator: the iscsi initiator name of the connecting machine.
This can be None if the connecting machine does not support iscsi
connections.
driver is responsible for doing any necessary security setup and
returning a connection_info dictionary in the following format::
{
'driver_volume_type': driver_volume_type,
'data': data,
}
driver_volume_type: a string to identify the type of volume. This
can be used by the calling code to determine the
strategy for connecting to the volume. This could
be 'iscsi', 'rbd', 'sheepdog', etc.
data: this is the data that the calling code will use to connect
to the volume. Keep in mind that this will be serialized to
json in various places, so it should not contain any non-json
data types.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume = self.db.volume_get(context, volume_id)
model_update = None
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate volume connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
model_update = self.driver.create_export(context.elevated(),
volume, connector)
except exception.CinderException:
err_msg = (_("Create export for volume failed."))
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
if model_update:
volume = self.db.volume_update(context,
volume_id,
model_update)
except exception.CinderException as ex:
LOG.exception(_LE("Model update failed."), resource=volume)
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn_info = self.driver.initialize_connection(volume, connector)
except Exception as err:
err_msg = (_("Driver initialize connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
self.driver.remove_export(context.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
# Add qos_specs to connection info
typeid = volume['volume_type_id']
specs = None
if typeid:
res = volume_types.get_volume_type_qos_specs(typeid)
qos = res['qos_specs']
# only pass qos_specs that is designated to be consumed by
# front-end, or both front-end and back-end.
if qos and qos.get('consumer') in ['front-end', 'both']:
specs = qos.get('specs')
qos_spec = dict(qos_specs=specs)
conn_info['data'].update(qos_spec)
# Add access_mode to connection info
volume_metadata = self.db.volume_admin_metadata_get(context.elevated(),
volume_id)
access_mode = volume_metadata.get('attached_mode')
if access_mode is None:
# NOTE(zhiyan): client didn't call 'os-attach' before
access_mode = ('ro'
if volume_metadata.get('readonly') == 'True'
else 'rw')
conn_info['data']['access_mode'] = access_mode
# Add encrypted flag to connection_info if not set in the driver.
if conn_info['data'].get('encrypted') is None:
encrypted = bool(volume.get('encryption_key_id'))
conn_info['data']['encrypted'] = encrypted
# Add discard flag to connection_info if not set in the driver and
# configured to be reported.
if conn_info['data'].get('discard') is None:
discard_supported = (self.driver.configuration
.safe_get('report_discard_supported'))
if discard_supported:
conn_info['data']['discard'] = True
LOG.info(_LI("Initialize volume connection completed successfully."),
resource=volume)
return conn_info
def terminate_connection(self, context, volume_id, connector, force=False):
"""Cleanup connection from host represented by connector.
The format of connector is the same as for initialize_connection.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.terminate_connection(volume_ref, connector,
force=force)
except Exception as err:
err_msg = (_('Terminate volume connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info(_LI("Terminate volume connection completed successfully."),
resource=volume_ref)
def remove_export(self, context, volume_id):
"""Removes an export for a volume."""
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.remove_export(context, volume_ref)
except Exception:
msg = _("Remove volume export failed.")
LOG.exception(msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("Remove volume export completed successfully."),
resource=volume_ref)
def accept_transfer(self, context, volume_id, new_user, new_project):
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
# NOTE(jdg): need elevated context as we haven't "given" the vol
# yet
volume_ref = self.db.volume_get(context.elevated(), volume_id)
# NOTE(jdg): Some drivers tie provider info (CHAP) to tenant
# for those that do allow them to return updated model info
model_update = self.driver.accept_transfer(context,
volume_ref,
new_user,
new_project)
if model_update:
try:
self.db.volume_update(context.elevated(),
volume_id,
model_update)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Update volume model for "
"transfer operation failed."),
resource=volume_ref)
self.db.volume_update(context.elevated(),
volume_id,
{'status': 'error'})
LOG.info(_LI("Transfer volume completed successfully."),
resource=volume_ref)
return model_update
def _connect_device(self, conn):
use_multipath = self.configuration.use_multipath_for_image_xfer
device_scan_attempts = self.configuration.num_volume_device_scan_tries
protocol = conn['driver_volume_type']
connector = utils.brick_get_connector(
protocol,
use_multipath=use_multipath,
device_scan_attempts=device_scan_attempts,
conn=conn)
vol_handle = connector.connect_volume(conn['data'])
root_access = True
if not connector.check_valid_device(vol_handle['path'], root_access):
if isinstance(vol_handle['path'], six.string_types):
raise exception.DeviceUnavailable(
path=vol_handle['path'],
reason=(_("Unable to access the backend storage via the "
"path %(path)s.") %
{'path': vol_handle['path']}))
else:
raise exception.DeviceUnavailable(
path=None,
reason=(_("Unable to access the backend storage via file "
"handle.")))
return {'conn': conn, 'device': vol_handle, 'connector': connector}
def _attach_volume(self, ctxt, volume, properties, remote=False):
status = volume['status']
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
try:
conn = rpcapi.initialize_connection(ctxt, volume, properties)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach volume %(vol)s."),
{'vol': volume['id']})
self.db.volume_update(ctxt, volume['id'],
{'status': status})
else:
conn = self.initialize_connection(ctxt, volume['id'], properties)
return self._connect_device(conn)
def _detach_volume(self, ctxt, attach_info, volume, properties,
force=False, remote=False):
connector = attach_info['connector']
connector.disconnect_volume(attach_info['conn']['data'],
attach_info['device'])
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.terminate_connection(ctxt, volume, properties, force=force)
rpcapi.remove_export(ctxt, volume)
else:
try:
self.terminate_connection(ctxt, volume['id'], properties,
force=force)
self.remove_export(ctxt, volume['id'])
except Exception as err:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Unable to terminate volume connection: '
'%(err)s.') % {'err': err})
def _copy_volume_data(self, ctxt, src_vol, dest_vol, remote=None):
"""Copy data from src_vol to dest_vol."""
LOG.debug('copy_data_between_volumes %(src)s -> %(dest)s.',
{'src': src_vol['name'], 'dest': dest_vol['name']})
properties = utils.brick_get_connector_properties()
dest_remote = remote in ['dest', 'both']
dest_attach_info = self._attach_volume(ctxt, dest_vol, properties,
remote=dest_remote)
try:
src_remote = remote in ['src', 'both']
src_attach_info = self._attach_volume(ctxt, src_vol, properties,
remote=src_remote)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach source volume for copy."))
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, remote=dest_remote)
# Check the backend capabilities of migration destination host.
rpcapi = volume_rpcapi.VolumeAPI()
capabilities = rpcapi.get_capabilities(ctxt, dest_vol['host'],
False)
sparse_copy_volume = bool(capabilities and
capabilities.get('sparse_copy_volume',
False))
copy_error = True
try:
size_in_mb = int(src_vol['size']) * units.Ki # vol size is in GB
vol_utils.copy_volume(src_attach_info['device']['path'],
dest_attach_info['device']['path'],
size_in_mb,
self.configuration.volume_dd_blocksize,
sparse=sparse_copy_volume)
copy_error = False
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to copy volume %(src)s to %(dest)s."),
{'src': src_vol['id'], 'dest': dest_vol['id']})
finally:
try:
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, force=copy_error,
remote=dest_remote)
finally:
self._detach_volume(ctxt, src_attach_info, src_vol,
properties, force=copy_error,
remote=src_remote)
def _migrate_volume_generic(self, ctxt, volume, host, new_type_id):
rpcapi = volume_rpcapi.VolumeAPI()
# Create new volume on remote host
skip = self._VOLUME_CLONE_SKIP_PROPERTIES | {'host'}
new_vol_values = {k: volume[k] for k in set(volume.keys()) - skip}
if new_type_id:
new_vol_values['volume_type_id'] = new_type_id
new_volume = objects.Volume(
context=ctxt,
host=host['host'],
status='creating',
attach_status='detached',
migration_status='target:%s' % volume['id'],
**new_vol_values
)
new_volume.create()
rpcapi.create_volume(ctxt, new_volume, host['host'],
None, None, allow_reschedule=False)
# Wait for new_volume to become ready
starttime = time.time()
deadline = starttime + CONF.migration_create_volume_timeout_secs
# TODO(thangp): Replace get_by_id with refresh when it is available
new_volume = objects.Volume.get_by_id(ctxt, new_volume.id)
tries = 0
while new_volume.status != 'available':
tries += 1
now = time.time()
if new_volume.status == 'error':
msg = _("failed to create new_volume on destination host")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
elif now > deadline:
msg = _("timeout creating new_volume on destination host")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
else:
time.sleep(tries ** 2)
# TODO(thangp): Replace get_by_id with refresh when it is
# available
new_volume = objects.Volume.get_by_id(ctxt, new_volume.id)
# Copy the source volume to the destination volume
try:
attachments = volume.volume_attachment
if not attachments:
# Pre- and post-copy driver-specific actions
self.driver.before_volume_copy(ctxt, volume, new_volume,
remote='dest')
self._copy_volume_data(ctxt, volume, new_volume, remote='dest')
self.driver.after_volume_copy(ctxt, volume, new_volume,
remote='dest')
# The above call is synchronous so we complete the migration
self.migrate_volume_completion(ctxt, volume.id,
new_volume.id,
error=False,
volume=volume,
new_volume=new_volume)
else:
nova_api = compute.API()
# This is an async call to Nova, which will call the completion
# when it's done
for attachment in attachments:
instance_uuid = attachment['instance_uuid']
nova_api.update_server_volume(ctxt, instance_uuid,
volume.id,
new_volume.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE(
"Failed to copy volume %(vol1)s to %(vol2)s"), {
'vol1': volume.id, 'vol2': new_volume.id})
self._clean_temporary_volume(ctxt, volume,
new_volume)
def _clean_temporary_volume(self, ctxt, volume, new_volume,
clean_db_only=False):
# If we're in the migrating phase, we need to cleanup
# destination volume because source volume is remaining
if volume.migration_status == 'migrating':
try:
if clean_db_only:
# The temporary volume is not created, only DB data
# is created
new_volume.destroy()
else:
# The temporary volume is already created
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.delete_volume(ctxt, new_volume)
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find the temporary volume "
"%(vol)s in the database. There is no need "
"to clean up this volume."),
{'vol': new_volume.id})
else:
# If we're in the completing phase don't delete the
# destination because we may have already deleted the
# source! But the migration_status in database should
# be cleared to handle volume after migration failure
try:
new_volume.migration_status = None
new_volume.save()
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find destination volume "
"%(vol)s in the database. The entry might be "
"successfully deleted during migration "
"completion phase."),
{'vol': new_volume.id})
LOG.warning(_LW("Failed to migrate volume. The destination "
"volume %(vol)s is not deleted since the "
"source volume may have been deleted."),
{'vol': new_volume.id})
def migrate_volume_completion(self, ctxt, volume_id, new_volume_id,
error=False, volume=None, new_volume=None):
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None or new_volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(ctxt, volume_id)
new_volume = objects.Volume.get_by_id(ctxt, new_volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
LOG.debug("migrate_volume_completion: completing migration for "
"volume %(vol1)s (temporary volume %(vol2)s",
{'vol1': volume.id, 'vol2': new_volume.id})
rpcapi = volume_rpcapi.VolumeAPI()
orig_volume_status = volume.previous_status
if error:
LOG.info(_LI("migrate_volume_completion is cleaning up an error "
"for volume %(vol1)s (temporary volume %(vol2)s"),
{'vol1': volume['id'], 'vol2': new_volume.id})
rpcapi.delete_volume(ctxt, new_volume)
updates = {'migration_status': 'error',
'status': orig_volume_status}
volume.update(updates)
volume.save()
return volume.id
volume.migration_status = 'completing'
volume.save()
# Detach the source volume (if it fails, don't fail the migration)
try:
if orig_volume_status == 'in-use':
attachments = volume.volume_attachment
for attachment in attachments:
self.detach_volume(ctxt, volume.id, attachment['id'])
except Exception as ex:
LOG.error(_LE("Detach migration source volume failed: %(err)s"),
{'err': ex}, resource=volume)
# Give driver (new_volume) a chance to update things as needed
# after a successful migration.
# Note this needs to go through rpc to the host of the new volume
# the current host and driver object is for the "existing" volume.
rpcapi.update_migrated_volume(ctxt, volume, new_volume,
orig_volume_status)
volume.refresh()
new_volume.refresh()
# Swap src and dest DB records so we can continue using the src id and
# asynchronously delete the destination id
updated_new = volume.finish_volume_migration(new_volume)
updates = {'status': orig_volume_status,
'previous_status': volume.status,
'migration_status': 'success'}
if orig_volume_status == 'in-use':
attachments = volume.volume_attachment
for attachment in attachments:
rpcapi.attach_volume(ctxt, volume,
attachment['instance_uuid'],
attachment['attached_host'],
attachment['mountpoint'],
'rw')
volume.update(updates)
volume.save()
# Asynchronous deletion of the source volume in the back-end (now
# pointed by the target volume id)
try:
rpcapi.delete_volume(ctxt, updated_new)
except Exception as ex:
LOG.error(_LE('Failed to request async delete of migration source '
'vol %(vol)s: %(err)s'),
{'vol': volume.id, 'err': ex})
LOG.info(_LI("Complete-Migrate volume completed successfully."),
resource=volume)
return volume.id
def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False,
new_type_id=None, volume=None):
"""Migrate the volume to the specified host (called on source host)."""
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(ctxt, volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
model_update = None
moved = False
status_update = None
if volume.status in ('retyping', 'maintenance'):
status_update = {'status': volume.previous_status}
volume.migration_status = 'migrating'
volume.save()
if not force_host_copy and new_type_id is None:
try:
LOG.debug("Issue driver.migrate_volume.", resource=volume)
moved, model_update = self.driver.migrate_volume(ctxt,
volume,
host)
if moved:
updates = {'host': host['host'],
'migration_status': 'success',
'previous_status': volume.status}
if status_update:
updates.update(status_update)
if model_update:
updates.update(model_update)
volume.update(updates)
volume.save()
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
if not moved:
try:
self._migrate_volume_generic(ctxt, volume, host,
new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
LOG.info(_LI("Migrate volume completed successfully."),
resource=volume)
@periodic_task.periodic_task
def _report_driver_status(self, context):
if not self.driver.initialized:
if self.driver.configuration.config_group is None:
config_group = ''
else:
config_group = ('(config name %s)' %
self.driver.configuration.config_group)
LOG.warning(_LW("Update driver status failed: %(config_group)s "
"is uninitialized."),
{'config_group': config_group},
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
else:
volume_stats = self.driver.get_volume_stats(refresh=True)
if self.extra_capabilities:
volume_stats.update(self.extra_capabilities)
if volume_stats:
# Append volume stats with 'allocated_capacity_gb'
self._append_volume_stats(volume_stats)
# Append filter and goodness function if needed
volume_stats = (
self._append_filter_goodness_functions(volume_stats))
# queue it to be sent to the Schedulers.
self.update_service_capabilities(volume_stats)
def _append_volume_stats(self, vol_stats):
pools = vol_stats.get('pools', None)
if pools and isinstance(pools, list):
for pool in pools:
pool_name = pool['pool_name']
try:
pool_stats = self.stats['pools'][pool_name]
except KeyError:
# Pool not found in volume manager
pool_stats = dict(allocated_capacity_gb=0)
pool.update(pool_stats)
def _append_filter_goodness_functions(self, volume_stats):
"""Returns volume_stats updated as needed."""
# Append filter_function if needed
if 'filter_function' not in volume_stats:
volume_stats['filter_function'] = (
self.driver.get_filter_function())
# Append goodness_function if needed
if 'goodness_function' not in volume_stats:
volume_stats['goodness_function'] = (
self.driver.get_goodness_function())
return volume_stats
def publish_service_capabilities(self, context):
"""Collect driver status and then publish."""
self._report_driver_status(context)
self._publish_service_capabilities(context)
def _notify_about_volume_usage(self,
context,
volume,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_snapshot_usage(self,
context,
snapshot,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_consistencygroup_usage(self,
context,
group,
event_suffix,
volumes=None,
extra_usage_info=None):
vol_utils.notify_about_consistencygroup_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not volumes:
volumes = self.db.volume_get_all_by_group(context, group.id)
if volumes:
for volume in volumes:
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_cgsnapshot_usage(self,
context,
cgsnapshot,
event_suffix,
snapshots=None,
extra_usage_info=None):
vol_utils.notify_about_cgsnapshot_usage(
context, cgsnapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not snapshots:
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
if snapshots:
for snapshot in snapshots:
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def extend_volume(self, context, volume_id, new_size, reservations,
volume=None):
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.status = 'error_extending'
volume.save()
project_id = volume.project_id
size_increase = (int(new_size)) - volume.size
self._notify_about_volume_usage(context, volume, "resize.start")
try:
self.driver.extend_volume(volume, new_size)
except Exception:
LOG.exception(_LE("Extend volume failed."),
resource=volume)
try:
self.db.volume_update(context, volume.id,
{'status': 'error_extending'})
raise exception.CinderException(_("Volume %s: Error trying "
"to extend volume") %
volume.id)
finally:
QUOTAS.rollback(context, reservations, project_id=project_id)
return
QUOTAS.commit(context, reservations, project_id=project_id)
volume.update({'size': int(new_size), 'status': 'available'})
volume.save()
pool = vol_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume.host, 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += size_increase
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=size_increase)
self._notify_about_volume_usage(
context, volume, "resize.end",
extra_usage_info={'size': int(new_size)})
LOG.info(_LI("Extend volume completed successfully."),
resource=volume)
def retype(self, ctxt, volume_id, new_type_id, host,
migration_policy='never', reservations=None,
volume=None, old_reservations=None):
def _retype_error(context, volume, old_reservations,
new_reservations, status_update):
try:
volume.update(status_update)
volume.save()
finally:
QUOTAS.rollback(context, old_reservations)
QUOTAS.rollback(context, new_reservations)
context = ctxt.elevated()
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
status_update = {'status': volume.previous_status}
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
# NOTE(flaper87): Other exceptions in this method don't
# set the volume status to error. Should that be done
# here? Setting the volume back to it's original status
# for now.
volume.update(status_update)
volume.save()
# If old_reservations has been passed in from the API, we should
# skip quotas.
# TODO(ntpttr): These reservation checks are left in to be backwards
# compatible with Liberty and can be removed in N.
if not old_reservations:
# Get old reservations
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
# NOTE(wanghao): We don't need to reserve volumes and gigabytes
# quota for retyping operation since they didn't changed, just
# reserve volume_type and type gigabytes is fine.
reserve_opts.pop('volumes')
reserve_opts.pop('gigabytes')
old_reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
volume.update(status_update)
volume.save()
msg = _("Failed to update quota usage while retyping volume.")
LOG.exception(msg, resource=volume)
raise exception.CinderException(msg)
# We already got the new reservations
new_reservations = reservations
# If volume types have the same contents, no need to do anything
retyped = False
diff, all_equal = volume_types.volume_types_diff(
context, volume.volume_type_id, new_type_id)
if all_equal:
retyped = True
# Call driver to try and change the type
retype_model_update = None
# NOTE(jdg): Check to see if the destination host is the same
# as the current. If it's not don't call the driver.retype
# method, otherwise drivers that implement retype may report
# success, but it's invalid in the case of a migrate.
# We assume that those that support pools do this internally
# so we strip off the pools designation
if (not retyped and
vol_utils.hosts_are_equivalent(self.driver.host,
host['host'])):
try:
new_type = volume_types.get_volume_type(context, new_type_id)
ret = self.driver.retype(context,
volume,
new_type,
diff,
host)
# Check if the driver retype provided a model update or
# just a retype indication
if type(ret) == tuple:
retyped, retype_model_update = ret
else:
retyped = ret
if retyped:
LOG.info(_LI("Volume %s: retyped successfully"), volume.id)
except Exception:
retyped = False
LOG.exception(_LE("Volume %s: driver error when trying to "
"retype, falling back to generic "
"mechanism."), volume.id)
# We could not change the type, so we need to migrate the volume, where
# the destination volume will be of the new type
if not retyped:
if migration_policy == 'never':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Retype requires migration but is not allowed.")
raise exception.VolumeMigrationFailed(reason=msg)
snaps = objects.SnapshotList.get_all_for_volume(context,
volume.id)
if snaps:
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not have snapshots.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Don't allow volume with replicas to be migrated
rep_status = volume.replication_status
if rep_status is not None and rep_status != 'disabled':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not be replicated.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
volume.migration_status = 'starting'
volume.save()
try:
self.migrate_volume(context, volume.id, host,
new_type_id=new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
else:
model_update = {'volume_type_id': new_type_id,
'host': host['host'],
'status': status_update['status']}
if retype_model_update:
model_update.update(retype_model_update)
volume.update(model_update)
volume.save()
if old_reservations:
QUOTAS.commit(context, old_reservations, project_id=project_id)
if new_reservations:
QUOTAS.commit(context, new_reservations, project_id=project_id)
self._notify_about_volume_usage(
context, volume, "retype",
extra_usage_info={'volume_type': new_type_id})
self.publish_service_capabilities(context)
LOG.info(_LI("Retype volume completed successfully."),
resource=volume)
def manage_existing(self, ctxt, volume_id, ref=None):
try:
flow_engine = manage_existing.get_flow(
ctxt,
self.db,
self.driver,
self.host,
volume_id,
ref)
except Exception:
msg = _("Failed to create manage_existing flow.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume_id})
raise exception.CinderException(msg)
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# Fetch created volume from storage
vol_ref = flow_engine.storage.fetch('volume')
# Update volume stats
pool = vol_utils.extract_host(vol_ref['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
vol_ref['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] \
+= vol_ref['size']
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=vol_ref['size'])
LOG.info(_LI("Manage existing volume completed successfully."),
resource=vol_ref)
return vol_ref['id']
def promote_replica(self, ctxt, volume_id):
"""Promote volume replica secondary to be the primary volume."""
volume = self.db.volume_get(ctxt, volume_id)
model_update = None
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Promote volume replica failed."),
resource=volume)
try:
model_update = self.driver.promote_replica(ctxt, volume)
except exception.CinderException:
err_msg = (_('Error promoting secondary volume to primary'))
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
try:
if model_update:
volume = self.db.volume_update(ctxt,
volume_id,
model_update)
except exception.CinderException:
err_msg = (_("Failed updating model"
" with driver provided model %(model)s") %
{'model': model_update})
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
LOG.info(_LI("Promote volume replica completed successfully."),
resource=volume)
def reenable_replication(self, ctxt, volume_id):
"""Re-enable replication of secondary volume with primary volumes."""
volume = self.db.volume_get(ctxt, volume_id)
model_update = None
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Sync volume replica failed."),
resource=volume)
try:
model_update = self.driver.reenable_replication(ctxt, volume)
except exception.CinderException:
err_msg = (_("Synchronizing secondary volume to primary failed."))
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
try:
if model_update:
volume = self.db.volume_update(ctxt,
volume_id,
model_update)
except exception.CinderException:
err_msg = (_("Failed updating model"
" with driver provided model %(model)s") %
{'model': model_update})
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
def _update_replication_relationship_status(self, ctxt):
# Only want volumes that do not have a 'disabled' replication status
filters = {'replication_status': ['active', 'copying', 'error',
'active-stopped', 'inactive']}
volumes = self.db.volume_get_all_by_host(ctxt, self.host,
filters=filters)
for vol in volumes:
model_update = None
try:
model_update = self.driver.get_replication_status(
ctxt, vol)
if model_update:
self.db.volume_update(ctxt, vol['id'], model_update)
except Exception:
LOG.exception(_LE("Get replication status for volume failed."),
resource=vol)
def create_consistencygroup(self, context, group):
"""Creates the consistency group."""
context = context.elevated()
status = fields.ConsistencyGroupStatus.AVAILABLE
model_update = None
self._notify_about_consistencygroup_usage(
context, group, "create.start")
try:
utils.require_driver_initialized(self.driver)
LOG.info(_LI("Consistency group %s: creating"), group.name)
model_update = self.driver.create_consistencygroup(context,
group)
if model_update:
if (model_update['status'] ==
fields.ConsistencyGroupStatus.ERROR):
msg = (_('Create consistency group failed.'))
LOG.error(msg,
resource={'type': 'consistency_group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = fields.ConsistencyGroupStatus.ERROR
group.save()
LOG.error(_LE("Consistency group %s: create failed"),
group.name)
group.status = status
group.created_at = timeutils.utcnow()
group.save()
LOG.info(_LI("Consistency group %s: created successfully"),
group.name)
self._notify_about_consistencygroup_usage(
context, group, "create.end")
LOG.info(_LI("Create consistency group completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
return group
def create_consistencygroup_from_src(self, context, group,
cgsnapshot=None, source_cg=None):
"""Creates the consistency group from source.
The source can be a CG snapshot or a source CG.
"""
source_name = None
snapshots = None
source_vols = None
try:
volumes = self.db.volume_get_all_by_group(context, group.id)
if cgsnapshot:
try:
# Check if cgsnapshot still exists
cgsnapshot = objects.CGSnapshot.get_by_id(
context, cgsnapshot.id)
except exception.CgSnapshotNotFound:
LOG.error(_LE("Create consistency group "
"from snapshot-%(snap)s failed: "
"SnapshotNotFound."),
{'snap': cgsnapshot.id},
resource={'type': 'consistency_group',
'id': group.id})
raise
source_name = _("snapshot-%s") % cgsnapshot.id
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
for snap in snapshots:
if (snap.status not in
VALID_CREATE_CG_SRC_SNAP_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because snapshot %(snap)s is "
"not in a valid state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'snap': snap['id'],
'valid': VALID_CREATE_CG_SRC_SNAP_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
if source_cg:
try:
source_cg = objects.ConsistencyGroup.get_by_id(
context, source_cg.id)
except exception.ConsistencyGroupNotFound:
LOG.error(_LE("Create consistency group "
"from source cg-%(cg)s failed: "
"ConsistencyGroupNotFound."),
{'cg': source_cg.id},
resource={'type': 'consistency_group',
'id': group.id})
raise
source_name = _("cg-%s") % source_cg.id
source_vols = self.db.volume_get_all_by_group(
context, source_cg.id)
for source_vol in source_vols:
if (source_vol['status'] not in
VALID_CREATE_CG_SRC_CG_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because source volume "
"%(source_vol)s is not in a valid "
"state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'source_vol': source_vol['id'],
'valid': VALID_CREATE_CG_SRC_CG_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes.
sorted_snapshots = None
if cgsnapshot and snapshots:
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
# Sort source volumes so that they are in the same order as their
# corresponding target volumes.
sorted_source_vols = None
if source_cg and source_vols:
sorted_source_vols = self._sort_source_vols(volumes,
source_vols)
self._notify_about_consistencygroup_usage(
context, group, "create.start")
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
context, group, volumes, cgsnapshot,
sorted_snapshots, source_cg, sorted_source_vols))
if volumes_model_update:
for update in volumes_model_update:
self.db.volume_update(context, update['id'], update)
if model_update:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
LOG.error(_LE("Create consistency group "
"from source %(source)s failed."),
{'source': source_name},
resource={'type': 'consistency_group',
'id': group.id})
# Update volume status to 'error' as well.
for vol in volumes:
self.db.volume_update(
context, vol['id'], {'status': 'error'})
now = timeutils.utcnow()
status = 'available'
for vol in volumes:
update = {'status': status, 'created_at': now}
self._update_volume_from_src(context, vol, update, group=group)
self._update_allocated_capacity(vol)
group.status = status
group.created_at = now
group.save()
self._notify_about_consistencygroup_usage(
context, group, "create.end")
LOG.info(_LI("Create consistency group "
"from source-%(source)s completed successfully."),
{'source': source_name},
resource={'type': 'consistency_group',
'id': group.id})
return group
def _sort_snapshots(self, volumes, snapshots):
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes. Each source snapshot in the snapshots
# list should have a corresponding target volume in the volumes list.
if not volumes or not snapshots or len(volumes) != len(snapshots):
msg = _("Input volumes or snapshots are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_snapshots = []
for vol in volumes:
found_snaps = [snap for snap in snapshots
if snap['id'] == vol['snapshot_id']]
if not found_snaps:
LOG.error(_LE("Source snapshot cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.SnapshotNotFound(
snapshot_id=vol['snapshot_id'])
sorted_snapshots.extend(found_snaps)
return sorted_snapshots
def _sort_source_vols(self, volumes, source_vols):
# Sort source volumes so that they are in the same order as their
# corresponding target volumes. Each source volume in the source_vols
# list should have a corresponding target volume in the volumes list.
if not volumes or not source_vols or len(volumes) != len(source_vols):
msg = _("Input volumes or source volumes are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_source_vols = []
for vol in volumes:
found_source_vols = [source_vol for source_vol in source_vols
if source_vol['id'] == vol['source_volid']]
if not found_source_vols:
LOG.error(_LE("Source volumes cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.VolumeNotFound(
volume_id=vol['source_volid'])
sorted_source_vols.extend(found_source_vols)
return sorted_source_vols
def _update_volume_from_src(self, context, vol, update, group=None):
try:
snapshot_id = vol.get('snapshot_id')
if snapshot_id:
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
orig_vref = self.db.volume_get(context,
snapshot.volume_id)
if orig_vref.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_to_volume(
context, vol['id'], snapshot_id)
except exception.SnapshotNotFound:
LOG.error(_LE("Source snapshot %(snapshot_id)s cannot be found."),
{'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise
except exception.VolumeNotFound:
LOG.error(_LE("The source volume %(volume_id)s "
"cannot be found."),
{'volume_id': snapshot.volume_id})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise
except exception.CinderException as ex:
LOG.error(_LE("Failed to update %(volume_id)s"
" metadata using the provided snapshot"
" %(snapshot_id)s metadata."),
{'volume_id': vol['id'],
'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
self.db.volume_update(context, vol['id'], update)
def _update_allocated_capacity(self, vol):
# Update allocated capacity in volume stats
pool = vol_utils.extract_host(vol['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
vol['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += (
vol['size'])
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=vol['size'])
def delete_consistencygroup(self, context, group):
"""Deletes consistency group and the volumes in the group."""
context = context.elevated()
project_id = group.project_id
if context.project_id != group.project_id:
project_id = group.project_id
else:
project_id = context.project_id
volumes = self.db.volume_get_all_by_group(context, group.id)
for volume_ref in volumes:
if volume_ref['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_ref['id'])
# self.host is 'host@backend'
# volume_ref['host'] is 'host@backend#pool'
# Extract host before doing comparison
if volume_ref['host']:
new_host = vol_utils.extract_host(volume_ref['host'])
if new_host != self.host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node"))
self._notify_about_consistencygroup_usage(
context, group, "delete.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.delete_consistencygroup(context, group, volumes))
if volumes_model_update:
for volume in volumes_model_update:
update = {'status': volume['status']}
self.db.volume_update(context, volume['id'],
update)
# If we failed to delete a volume, make sure the status
# for the cg is set to error as well
if (volume['status'] in ['error_deleting', 'error'] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = volume['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Delete consistency group failed.'))
LOG.error(msg,
resource={'type': 'consistency_group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
# Update volume status to 'error' if driver returns
# None for volumes_model_update.
if not volumes_model_update:
for vol in volumes:
self.db.volume_update(
context, vol['id'], {'status': 'error'})
# Get reservations for group
try:
reserve_opts = {'consistencygroups': -1}
cgreservations = CGQUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
cgreservations = None
LOG.exception(_LE("Delete consistency group "
"failed to update usages."),
resource={'type': 'consistency_group',
'id': group.id})
for volume_ref in volumes:
# Get reservations for volume
try:
volume_id = volume_ref['id']
reserve_opts = {'volumes': -1,
'gigabytes': -volume_ref['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Delete consistency group "
"failed to update usages."),
resource={'type': 'consistency_group',
'id': group.id})
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
self.db.volume_destroy(context, volume_id)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= volume_ref['size']
if cgreservations:
CGQUOTAS.commit(context, cgreservations,
project_id=project_id)
group.destroy()
self._notify_about_consistencygroup_usage(
context, group, "delete.end", volumes)
self.publish_service_capabilities(context)
LOG.info(_LI("Delete consistency group "
"completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates consistency group.
Update consistency group by adding volumes to the group,
or removing volumes from the group.
"""
add_volumes_ref = []
remove_volumes_ref = []
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes_list = remove_volumes.split(',')
for add_vol in add_volumes_list:
try:
add_vol_ref = self.db.volume_get(context, add_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update consistency group "
"failed to add volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': add_vol_ref['id']},
resource={'type': 'consistency_group',
'id': group.id})
raise
if add_vol_ref['status'] not in VALID_ADD_VOL_TO_CG_STATUS:
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id,
'status': add_vol_ref['status'],
'valid': VALID_ADD_VOL_TO_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
# self.host is 'host@backend'
# volume_ref['host'] is 'host@backend#pool'
# Extract host before doing comparison
new_host = vol_utils.extract_host(add_vol_ref['host'])
if new_host != self.host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node."))
add_volumes_ref.append(add_vol_ref)
for remove_vol in remove_volumes_list:
try:
remove_vol_ref = self.db.volume_get(context, remove_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update consistency group "
"failed to remove volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': remove_vol_ref['id']},
resource={'type': 'consistency_group',
'id': group.id})
raise
if remove_vol_ref['status'] not in VALID_REMOVE_VOL_FROM_CG_STATUS:
msg = (_("Cannot remove volume %(volume_id)s from consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': remove_vol_ref['id'],
'group_id': group.id,
'status': remove_vol_ref['status'],
'valid': VALID_REMOVE_VOL_FROM_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
remove_volumes_ref.append(remove_vol_ref)
self._notify_about_consistencygroup_usage(
context, group, "update.start")
try:
utils.require_driver_initialized(self.driver)
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
if add_volumes_update:
for update in add_volumes_update:
self.db.volume_update(context, update['id'], update)
if remove_volumes_update:
for update in remove_volumes_update:
self.db.volume_update(context, update['id'], update)
if model_update:
if model_update['status'] in (
[fields.ConsistencyGroupStatus.ERROR]):
msg = (_('Error occurred when updating consistency group '
'%s.') % group.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group.update(model_update)
group.save()
except exception.VolumeDriverException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred in the volume driver when "
"updating consistency group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when updating consistency "
"group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
now = timeutils.utcnow()
group.status = 'available'
group.update_at = now
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'consistencygroup_id': group.id,
'updated_at': now})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'consistencygroup_id': None,
'updated_at': now})
self._notify_about_consistencygroup_usage(
context, group, "update.end")
LOG.info(_LI("Update consistency group "
"completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
def create_cgsnapshot(self, context, cgsnapshot):
"""Creates the cgsnapshot."""
caller_context = context
context = context.elevated()
LOG.info(_LI("Cgsnapshot %s: creating."), cgsnapshot.id)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "create.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Cgsnapshot %(cgsnap_id)s: creating.",
{'cgsnap_id': cgsnapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
model_update, snapshots_model_update = (
self.driver.create_cgsnapshot(context, cgsnapshot,
snapshots))
if snapshots_model_update:
for snap_model in snapshots_model_update:
# Update db for snapshot.
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
self.db.snapshot_update(context,
snap_model['id'],
snap_model)
if (snap_model['status'] in [
fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] == 'error':
msg = (_('Error occurred when creating cgsnapshot '
'%s.') % cgsnapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
cgsnapshot.update(model_update)
cgsnapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
cgsnapshot.status = 'error'
cgsnapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
volume_id = snapshot['volume_id']
snapshot_id = snapshot['id']
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot_id, volume_id)
except exception.CinderException as ex:
LOG.error(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
# TODO(thangp): Switch over to use snapshot.update()
# after cgsnapshot-objects bugs are fixed
self.db.snapshot_update(
context, snapshot_id, {
'status': fields.SnapshotStatus.ERROR})
raise exception.MetadataCopyFailure(
reason=six.text_type(ex))
self.db.snapshot_update(context,
snapshot['id'],
{'status': fields.SnapshotStatus.AVAILABLE,
'progress': '100%'})
cgsnapshot.status = 'available'
cgsnapshot.save()
LOG.info(_LI("cgsnapshot %s: created successfully"),
cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "create.end")
return cgsnapshot
def delete_cgsnapshot(self, context, cgsnapshot):
"""Deletes cgsnapshot."""
caller_context = context
context = context.elevated()
project_id = cgsnapshot.project_id
LOG.info(_LI("cgsnapshot %s: deleting"), cgsnapshot.id)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "delete.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("cgsnapshot %(cgsnap_id)s: deleting",
{'cgsnap_id': cgsnapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(context, cgsnapshot,
snapshots))
if snapshots_model_update:
for snap_model in snapshots_model_update:
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap = next((item for item in snapshots if
item.id == snap_model['id']), None)
if snap:
snap.status = snap_model['status']
snap.save()
if (snap_model['status'] in
[fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting cgsnapshot '
'%s.') % cgsnapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
cgsnapshot.update(model_update)
cgsnapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
cgsnapshot.status = 'error'
cgsnapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot['volume_size'],
}
volume_ref = self.db.volume_get(context, snapshot['volume_id'])
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot['id'])
# TODO(thangp): Switch over to use snapshot.destroy()
# after cgsnapshot-objects bugs are fixed
self.db.snapshot_destroy(context, snapshot['id'])
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
cgsnapshot.destroy()
LOG.info(_LI("cgsnapshot %s: deleted successfully"), cgsnapshot.id)
self._notify_about_cgsnapshot_usage(context, cgsnapshot, "delete.end",
snapshots)
def update_migrated_volume(self, ctxt, volume, new_volume, volume_status):
"""Finalize migration process on backend device."""
model_update = None
model_update_default = {'_name_id': new_volume.name_id,
'provider_location':
new_volume.provider_location}
try:
model_update = self.driver.update_migrated_volume(ctxt,
volume,
new_volume,
volume_status)
except NotImplementedError:
# If update_migrated_volume is not implemented for the driver,
# _name_id and provider_location will be set with the values
# from new_volume.
model_update = model_update_default
if model_update:
model_update_default.update(model_update)
# Swap keys that were changed in the source so we keep their values
# in the temporary volume's DB record.
# Need to convert 'metadata' and 'admin_metadata' since
# they are not keys of volume, their corresponding keys are
# 'volume_metadata' and 'volume_admin_metadata'.
model_update_new = dict()
for key in model_update:
if key == 'metadata':
if volume.get('volume_metadata'):
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_metadata}
elif key == 'admin_metadata':
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_admin_metadata}
else:
model_update_new[key] = volume[key]
with new_volume.obj_as_admin():
new_volume.update(model_update_new)
new_volume.save()
with volume.obj_as_admin():
volume.update(model_update_default)
volume.save()
# Replication V2.1 methods
def failover_host(self, context,
secondary_backend_id=None):
"""Failover a backend to a secondary replication target.
Instructs a replication capable/configured backend to failover
to one of it's secondary replication targets. host=None is
an acceptable input, and leaves it to the driver to failover
to the only configured target, or to choose a target on it's
own. All of the hosts volumes will be passed on to the driver
in order for it to determine the replicated volumes on the host,
if needed.
:param context: security context
:param secondary_backend_id: Specifies backend_id to fail over to
"""
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
'cinder-volume')
volumes = objects.VolumeList.get_all_by_host(context, self.host)
exception_encountered = False
try:
# expected form of volume_update_list:
# [{volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}},
# {volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}}]
(active_backend_id, volume_update_list) = (
self.driver.failover_host(
context,
volumes,
secondary_id=secondary_backend_id))
except exception.UnableToFailOver:
LOG.exception(_LE("Failed to perform replication failover"))
service.replication_status = (
fields.ReplicationStatus.FAILOVER_ERROR)
service.save()
exception_encountered = True
except exception.InvalidReplicationTarget:
LOG.exception(_LE("Invalid replication target specified "
"for failover"))
# Preserve the replication_status
if secondary_backend_id == "default":
service.replication_status = (
fields.ReplicationStatus.FAILED_OVER)
else:
service.replication_status = fields.ReplicationStatus.ENABLED
service.save()
exception_encountered = True
except exception.VolumeDriverException:
# NOTE(jdg): Drivers need to be aware if they fail during
# a failover sequence, we're expecting them to cleanup
# and make sure the driver state is such that the original
# backend is still set as primary as per driver memory
LOG.error(_LE("Driver reported error during "
"replication failover."))
service.status = 'error'
service.save()
exception_encountered = True
if exception_encountered:
LOG.error(
_LE("Error encountered during failover on host: "
"%(host)s invalid target ID %(backend_id)s"),
{'host': self.host, 'backend_id':
secondary_backend_id})
return
if secondary_backend_id == "default":
service.replication_status = fields.ReplicationStatus.ENABLED
service.active_backend_id = ""
if service.frozen:
service.disabled = True
service.disabled_reason = "frozen"
else:
service.disabled = False
service.disabled_reason = ""
service.save()
else:
service.replication_status = fields.ReplicationStatus.FAILED_OVER
service.active_backend_id = active_backend_id
service.disabled = True
service.disabled_reason = "failed-over"
service.save()
for update in volume_update_list:
# Response must include an id key: {volume_id: <cinder-uuid>}
if not update.get('volume_id'):
raise exception.UnableToFailOver(
reason=_("Update list, doesn't include volume_id"))
# Key things to consider (attaching failed-over volumes):
# provider_location
# provider_auth
# provider_id
# replication_status
vobj = objects.Volume.get_by_id(context, update['volume_id'])
vobj.update(update.get('updates', {}))
vobj.save()
LOG.info(_LI("Failed over to replication target successfully."))
def freeze_host(self, context):
"""Freeze management plane on this backend.
Basically puts the control/management plane into a
Read Only state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.freeze_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): In the case of freeze, we don't really
# need the backend's consent or anything, we'll just
# disable the service, so we can just log this and
# go about our business
LOG.warning(_LW('Error encountered on Cinder backend during '
'freeze operation, service is frozen, however '
'notification to driver has failed.'))
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
'cinder-volume')
service.disabled = True
service.disabled_reason = "frozen"
service.save()
LOG.info(_LI("Set backend status to frozen successfully."))
return True
def thaw_host(self, context):
"""UnFreeze management plane on this backend.
Basically puts the control/management plane back into
a normal state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.thaw_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): Thaw actually matters, if this call
# to the backend fails, we're stuck and can't re-enable
LOG.error(_LE('Error encountered on Cinder backend during '
'thaw operation, service will remain frozen.'))
return False
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
'cinder-volume')
service.disabled = False
service.disabled_reason = ""
service.save()
LOG.info(_LI("Thawed backend successfully."))
return True
def manage_existing_snapshot(self, ctxt, snapshot, ref=None):
LOG.debug('manage_existing_snapshot: managing %s.', ref)
try:
flow_engine = manage_existing_snapshot.get_flow(
ctxt,
self.db,
self.driver,
self.host,
snapshot.id,
ref)
except Exception:
msg = _LE("Failed to create manage_existing flow: "
"%(object_type)s %(object_id)s.")
LOG.exception(msg, {'object_type': 'snapshot',
'object_id': snapshot.id})
raise exception.CinderException(
_("Failed to create manage existing flow."))
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
return snapshot.id
def get_capabilities(self, context, discover):
"""Get capabilities of backend storage."""
if discover:
self.driver.init_capabilities()
capabilities = self.driver.capabilities
LOG.debug("Obtained capabilities list: %s.", capabilities)
return capabilities
def get_backup_device(self, ctxt, backup):
(backup_device, is_snapshot) = (
self.driver.get_backup_device(ctxt, backup))
secure_enabled = self.driver.secure_file_operations_enabled()
backup_device_dict = {'backup_device': backup_device,
'secure_enabled': secure_enabled,
'is_snapshot': is_snapshot, }
return backup_device_dict
def secure_file_operations_enabled(self, ctxt, volume):
secure_enabled = self.driver.secure_file_operations_enabled()
return secure_enabled
|
freeze_host
|
Freeze management plane on this backend.
Basically puts the control/management plane into a
Read Only state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume manager manages creating, attaching, detaching, and persistent storage.
Persistent storage volumes keep their state independent of instances. You can
attach to an instance, terminate the instance, spawn a new instance (even
one from a different image) and re-attach the volume with the same data
intact.
**Related Flags**
:volume_topic: What :mod:`rpc` topic to listen to (default: `cinder-volume`).
:volume_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.volume.manager.Manager`).
:volume_driver: Used by :class:`Manager`. Defaults to
:class:`cinder.volume.drivers.lvm.LVMVolumeDriver`.
:volume_group: Name of the group that will contain exported volumes (default:
`cinder-volumes`)
:num_shell_tries: Number of times to attempt to run commands (default: 3)
"""
import requests
import time
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
profiler = importutils.try_import('osprofiler.profiler')
import six
from taskflow import exceptions as tfe
from cinder import compute
from cinder import context
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import cache as image_cache
from cinder.image import glance
from cinder import manager
from cinder.message import api as message_api
from cinder.message import defined_messages
from cinder.message import resource_types
from cinder import objects
from cinder.objects import fields
from cinder import quota
from cinder import utils
from cinder import volume as cinder_volume
from cinder.volume import configuration as config
from cinder.volume.flows.manager import create_volume
from cinder.volume.flows.manager import manage_existing
from cinder.volume.flows.manager import manage_existing_snapshot
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
CGQUOTAS = quota.CGQUOTAS
VALID_REMOVE_VOL_FROM_CG_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_ADD_VOL_TO_CG_STATUS = (
'available',
'in-use')
VALID_CREATE_CG_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
VALID_CREATE_CG_SRC_CG_STATUS = ('available',)
volume_manager_opts = [
cfg.StrOpt('volume_driver',
default='cinder.volume.drivers.lvm.LVMVolumeDriver',
help='Driver to use for volume creation'),
cfg.IntOpt('migration_create_volume_timeout_secs',
default=300,
help='Timeout for creating the volume to migrate to '
'when performing volume migration (seconds)'),
cfg.BoolOpt('volume_service_inithost_offload',
default=False,
help='Offload pending volume delete during '
'volume service startup'),
cfg.StrOpt('zoning_mode',
help='FC Zoning mode configured'),
cfg.StrOpt('extra_capabilities',
default='{}',
help='User defined capabilities, a JSON formatted string '
'specifying key/value pairs. The key/value pairs can '
'be used by the CapabilitiesFilter to select between '
'backends when requests specify volume types. For '
'example, specifying a service level or the geographical '
'location of a backend, then creating a volume type to '
'allow the user to select by these different '
'properties.'),
cfg.BoolOpt('suppress_requests_ssl_warnings',
default=False,
help='Suppress requests library SSL certificate warnings.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_manager_opts)
MAPPING = {
'cinder.volume.drivers.huawei.huawei_18000.Huawei18000ISCSIDriver':
'cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver',
'cinder.volume.drivers.huawei.huawei_driver.Huawei18000ISCSIDriver':
'cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver',
'cinder.volume.drivers.huawei.huawei_18000.Huawei18000FCDriver':
'cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver',
'cinder.volume.drivers.huawei.huawei_driver.Huawei18000FCDriver':
'cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver',
'cinder.volume.drivers.fujitsu_eternus_dx_fc.FJDXFCDriver':
'cinder.volume.drivers.fujitsu.eternus_dx_fc.FJDXFCDriver',
'cinder.volume.drivers.fujitsu_eternus_dx_iscsi.FJDXISCSIDriver':
'cinder.volume.drivers.fujitsu.eternus_dx_iscsi.FJDXISCSIDriver',
'cinder.volume.drivers.hds.nfs.HDSNFSDriver':
'cinder.volume.drivers.hitachi.hnas_nfs.HDSNFSDriver',
'cinder.volume.drivers.hds.iscsi.HDSISCSIDriver':
'cinder.volume.drivers.hitachi.hnas_iscsi.HDSISCSIDriver',
'cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver':
'cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver',
'cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver':
'cinder.volume.drivers.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver',
'cinder.volume.drivers.san.hp.hp_lefthand_iscsi.HPLeftHandISCSIDriver':
'cinder.volume.drivers.hpe.hpe_lefthand_iscsi.HPELeftHandISCSIDriver',
'cinder.volume.drivers.san.hp.hp_xp_fc.HPXPFCDriver':
'cinder.volume.drivers.hpe.hpe_xp_fc.HPEXPFCDriver',
}
def locked_volume_operation(f):
"""Lock decorator for volume operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the volume. This lock can then be used
by other operations to avoid operation conflicts on shared volumes.
Example use:
If a volume operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
volume e.g. delete VolA while create volume VolB from VolA is in progress.
"""
def lvo_inner1(inst, context, volume_id, **kwargs):
@utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True)
def lvo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lvo_inner2(inst, context, volume_id, **kwargs)
return lvo_inner1
def locked_detach_operation(f):
"""Lock decorator for volume detach operations.
Takes a named lock prior to executing the detach call. The lock is named
with the operation executed and the id of the volume. This lock can then
be used by other operations to avoid operation conflicts on shared volumes.
This locking mechanism is only for detach calls. We can't use the
locked_volume_operation, because detach requires an additional
attachment_id in the parameter list.
"""
def ldo_inner1(inst, context, volume_id, attachment_id=None, **kwargs):
@utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True)
def ldo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return ldo_inner2(inst, context, volume_id, attachment_id, **kwargs)
return ldo_inner1
def locked_snapshot_operation(f):
"""Lock decorator for snapshot operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the snapshot. This lock can then be
used by other operations to avoid operation conflicts on shared snapshots.
Example use:
If a snapshot operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
snapshot e.g. delete SnapA while create volume VolA from SnapA is in
progress.
"""
def lso_inner1(inst, context, snapshot, **kwargs):
@utils.synchronized("%s-%s" % (snapshot.id, f.__name__), external=True)
def lso_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lso_inner2(inst, context, snapshot, **kwargs)
return lso_inner1
class VolumeManager(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = '2.0'
target = messaging.Target(version=RPC_API_VERSION)
# On cloning a volume, we shouldn't copy volume_type, consistencygroup
# and volume_attachment, because the db sets that according to [field]_id,
# which we do copy. We also skip some other values that are set during
# creation of Volume object.
_VOLUME_CLONE_SKIP_PROPERTIES = {
'id', '_name_id', 'name_id', 'name', 'status',
'attach_status', 'migration_status', 'volume_type',
'consistencygroup', 'volume_attachment'}
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_name='volume',
*args, **kwargs)
self.configuration = config.Configuration(volume_manager_opts,
config_group=service_name)
self.stats = {}
if not volume_driver:
# Get from configuration, which will get the default
# if its not using the multi backend
volume_driver = self.configuration.volume_driver
if volume_driver in MAPPING:
LOG.warning(_LW("Driver path %s is deprecated, update your "
"configuration to the new path."), volume_driver)
volume_driver = MAPPING[volume_driver]
vol_db_empty = self._set_voldb_empty_at_startup_indicator(
context.get_admin_context())
LOG.debug("Cinder Volume DB check: vol_db_empty=%s", vol_db_empty)
# We pass the current setting for service.active_backend_id to
# the driver on init, incase there was a restart or something
curr_active_backend_id = None
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
service = objects.Service.get_by_args(
context.get_admin_context(),
svc_host,
'cinder-volume')
except exception.ServiceNotFound:
# NOTE(jdg): This is to solve problems with unit tests
LOG.info(_LI("Service not found for updating "
"active_backend_id, assuming default "
"for driver init."))
else:
curr_active_backend_id = service.active_backend_id
if self.configuration.suppress_requests_ssl_warnings:
LOG.warning(_LW("Suppressing requests library SSL Warnings"))
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecurePlatformWarning)
self.driver = importutils.import_object(
volume_driver,
configuration=self.configuration,
db=self.db,
host=self.host,
is_vol_db_empty=vol_db_empty,
active_backend_id=curr_active_backend_id)
self.message_api = message_api.API()
if CONF.profiler.enabled and profiler is not None:
self.driver = profiler.trace_cls("driver")(self.driver)
try:
self.extra_capabilities = jsonutils.loads(
self.driver.configuration.extra_capabilities)
except AttributeError:
self.extra_capabilities = {}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Invalid JSON: %s"),
self.driver.configuration.extra_capabilities)
if self.driver.configuration.safe_get(
'image_volume_cache_enabled'):
max_cache_size = self.driver.configuration.safe_get(
'image_volume_cache_max_size_gb')
max_cache_entries = self.driver.configuration.safe_get(
'image_volume_cache_max_count')
self.image_volume_cache = image_cache.ImageVolumeCache(
self.db,
cinder_volume.API(),
max_cache_size,
max_cache_entries
)
LOG.info(_LI('Image-volume cache enabled for host %(host)s.'),
{'host': self.host})
else:
LOG.info(_LI('Image-volume cache disabled for host %(host)s.'),
{'host': self.host})
self.image_volume_cache = None
def _count_allocated_capacity(self, ctxt, volume):
pool = vol_utils.extract_host(volume['host'], 'pool')
if pool is None:
# No pool name encoded in host, so this is a legacy
# volume created before pool is introduced, ask
# driver to provide pool info if it has such
# knowledge and update the DB.
try:
pool = self.driver.get_pool(volume)
except Exception:
LOG.exception(_LE('Fetch volume pool name failed.'),
resource=volume)
return
if pool:
new_host = vol_utils.append_host(volume['host'],
pool)
self.db.volume_update(ctxt, volume['id'],
{'host': new_host})
else:
# Otherwise, put them into a special fixed pool with
# volume_backend_name being the pool name, if
# volume_backend_name is None, use default pool name.
# This is only for counting purpose, doesn't update DB.
pool = (self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume['host'], 'pool', True))
try:
pool_stat = self.stats['pools'][pool]
except KeyError:
# First volume in the pool
self.stats['pools'][pool] = dict(
allocated_capacity_gb=0)
pool_stat = self.stats['pools'][pool]
pool_sum = pool_stat['allocated_capacity_gb']
pool_sum += volume['size']
self.stats['pools'][pool]['allocated_capacity_gb'] = pool_sum
self.stats['allocated_capacity_gb'] += volume['size']
def _set_voldb_empty_at_startup_indicator(self, ctxt):
"""Determine if the Cinder volume DB is empty.
A check of the volume DB is done to determine whether it is empty or
not at this point.
:param ctxt: our working context
"""
vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None)
if len(vol_entries) == 0:
LOG.info(_LI("Determined volume DB was empty at startup."))
return True
else:
LOG.info(_LI("Determined volume DB was not empty at startup."))
return False
def _sync_provider_info(self, ctxt, volumes, snapshots):
# NOTE(jdg): For now this just updates provider_id, we can add more
# add more items to the update if theyr'e releveant but we need
# to be safe in what we allow and add a list of allowed keys
# things that make sense are provider_*, replication_status etc
updates, snapshot_updates = self.driver.update_provider_info(
volumes, snapshots)
if updates:
for volume in volumes:
# NOTE(JDG): Make sure returned item is in this hosts volumes
update = (
[updt for updt in updates if updt['id'] ==
volume['id']][0])
if update:
self.db.volume_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
# NOTE(jdg): snapshots are slighty harder, because
# we do not have a host column and of course no get
# all by host, so we use a get_all and bounce our
# response off of it
if snapshot_updates:
cinder_snaps = self.db.snapshot_get_all(ctxt)
for snap in cinder_snaps:
# NOTE(jdg): For now we only update those that have no entry
if not snap.get('provider_id', None):
update = (
[updt for updt in snapshot_updates if updt['id'] ==
snap['id']][0])
if update:
self.db.snapshot_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
def init_host(self):
"""Perform any required initialization."""
ctxt = context.get_admin_context()
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception:
LOG.exception(_LE("Failed to initialize driver."),
resource={'type': 'driver',
'id': self.__class__.__name__})
# we don't want to continue since we failed
# to initialize the driver correctly.
return
# Initialize backend capabilities list
self.driver.init_capabilities()
volumes = objects.VolumeList.get_all_by_host(ctxt, self.host)
snapshots = self.db.snapshot_get_by_host(ctxt, self.host)
self._sync_provider_info(ctxt, volumes, snapshots)
# FIXME volume count for exporting is wrong
try:
self.stats['pools'] = {}
self.stats.update({'allocated_capacity_gb': 0})
for volume in volumes:
# available volume should also be counted into allocated
if volume['status'] in ['in-use', 'available']:
# calculate allocated capacity for driver
self._count_allocated_capacity(ctxt, volume)
try:
if volume['status'] in ['in-use']:
self.driver.ensure_export(ctxt, volume)
except Exception:
LOG.exception(_LE("Failed to re-export volume, "
"setting to ERROR."),
resource=volume)
volume.status = 'error'
volume.save()
elif volume['status'] in ('downloading', 'creating'):
LOG.warning(_LW("Detected volume stuck "
"in %(curr_status)s "
"status, setting to ERROR."),
{'curr_status': volume['status']},
resource=volume)
if volume['status'] == 'downloading':
self.driver.clear_download(ctxt, volume)
volume.status = 'error'
volume.save()
elif volume.status == 'uploading':
# Set volume status to available or in-use.
self.db.volume_update_status_based_on_attachment(
ctxt, volume.id)
else:
pass
snapshots = objects.SnapshotList.get_by_host(
ctxt, self.host, {'status': fields.SnapshotStatus.CREATING})
for snapshot in snapshots:
LOG.warning(_LW("Detected snapshot stuck in creating "
"status, setting to ERROR."), resource=snapshot)
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
except Exception:
LOG.exception(_LE("Error during re-export on driver init."),
resource=volume)
return
self.driver.set_throttle()
# at this point the driver is considered initialized.
# NOTE(jdg): Careful though because that doesn't mean
# that an entry exists in the service table
self.driver.set_initialized()
for volume in volumes:
if volume['status'] == 'deleting':
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
# from being blocked.
self._add_to_threadpool(self.delete_volume, ctxt,
volume['id'], volume=volume)
else:
# By default, delete volumes sequentially
self.delete_volume(ctxt, volume['id'], volume=volume)
LOG.info(_LI("Resume volume delete completed successfully."),
resource=volume)
# collect and publish service capabilities
self.publish_service_capabilities(ctxt)
LOG.info(_LI("Driver initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
def init_host_with_rpc(self):
LOG.info(_LI("Initializing RPC dependent components of volume "
"driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
stats = self.driver.get_volume_stats(refresh=True)
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
service = objects.Service.get_by_args(
context.get_admin_context(),
svc_host,
'cinder-volume')
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Service not found for updating "
"replication_status."))
if service.replication_status != (
fields.ReplicationStatus.FAILED_OVER):
if stats and stats.get('replication_enabled', False):
service.replication_status = fields.ReplicationStatus.ENABLED
else:
service.replication_status = fields.ReplicationStatus.DISABLED
service.save()
LOG.info(_LI("Driver post RPC initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
def is_working(self):
"""Return if Manager is ready to accept requests.
This is to inform Service class that in case of volume driver
initialization failure the manager is actually down and not ready to
accept any requests.
"""
return self.driver.initialized
def create_volume(self, context, volume_id, request_spec=None,
filter_properties=None, allow_reschedule=True,
volume=None):
"""Creates the volume."""
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
context_elevated = context.elevated()
if filter_properties is None:
filter_properties = {}
if request_spec is None:
request_spec = {}
try:
# NOTE(flaper87): Driver initialization is
# verified by the task itself.
flow_engine = create_volume.get_flow(
context_elevated,
self,
self.db,
self.driver,
self.scheduler_rpcapi,
self.host,
volume.id,
allow_reschedule,
context,
request_spec,
filter_properties,
image_volume_cache=self.image_volume_cache,
)
except Exception:
msg = _("Create manager volume flow failed.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
raise exception.CinderException(msg)
snapshot_id = request_spec.get('snapshot_id')
source_volid = request_spec.get('source_volid')
source_replicaid = request_spec.get('source_replicaid')
if snapshot_id is not None:
# Make sure the snapshot is not deleted until we are done with it.
locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot')
elif source_volid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_volid, 'delete_volume')
elif source_replicaid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_replicaid, 'delete_volume')
else:
locked_action = None
def _run_flow():
# This code executes create volume flow. If something goes wrong,
# flow reverts all job that was done and reraises an exception.
# Otherwise, all data that was generated by flow becomes available
# in flow engine's storage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
@utils.synchronized(locked_action, external=True)
def _run_flow_locked():
_run_flow()
# NOTE(dulek): Flag to indicate if volume was rescheduled. Used to
# decide if allocated_capacity should be incremented.
rescheduled = False
vol_ref = None
try:
if locked_action is None:
_run_flow()
else:
_run_flow_locked()
finally:
try:
vol_ref = flow_engine.storage.fetch('volume_ref')
except tfe.NotFound:
# If there's no vol_ref, then flow is reverted. Lets check out
# if rescheduling occurred.
try:
rescheduled = flow_engine.storage.get_revert_result(
create_volume.OnFailureRescheduleTask.make_name(
[create_volume.ACTION]))
except tfe.NotFound:
pass
if not rescheduled:
if not vol_ref:
# Flow was reverted and not rescheduled, fetching
# volume_ref from the DB, because it will be needed.
vol_ref = objects.Volume.get_by_id(context, volume.id)
# NOTE(dulek): Volume wasn't rescheduled so we need to update
# volume stats as these are decremented on delete.
self._update_allocated_capacity(vol_ref)
LOG.info(_LI("Created volume successfully."), resource=vol_ref)
return vol_ref.id
@locked_volume_operation
def delete_volume(self, context, volume_id,
unmanage_only=False,
volume=None,
cascade=False):
"""Deletes and unexports volume.
1. Delete a volume(normal case)
Delete a volume and update quotas.
2. Delete a migration volume
If deleting the volume in a migration, we want to skip
quotas but we need database updates for the volume.
"""
context = context.elevated()
try:
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
volume = objects.Volume.get_by_id(context, volume_id)
else:
volume.refresh()
except exception.VolumeNotFound:
# NOTE(thingee): It could be possible for a volume to
# be deleted when resuming deletes from init_host().
LOG.debug("Attempted delete of non-existent volume: %s", volume_id)
return
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
if volume['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_id)
if vol_utils.extract_host(volume.host) != self.host:
raise exception.InvalidVolume(
reason=_("volume is not local to this node"))
if unmanage_only and cascade:
# This could be done, but is ruled out for now just
# for simplicity.
raise exception.Invalid(
reason=_("Unmanage and cascade delete options "
"are mutually exclusive."))
# The status 'deleting' is not included, because it only applies to
# the source volume to be deleted after a migration. No quota
# needs to be handled for it.
is_migrating = volume.migration_status not in (None, 'error',
'success')
is_migrating_dest = (is_migrating and
volume.migration_status.startswith(
'target:'))
self._notify_about_volume_usage(context, volume, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context, volume)
if unmanage_only:
self.driver.unmanage(volume)
elif cascade:
LOG.debug('Performing cascade delete.')
snapshots = objects.SnapshotList.get_all_for_volume(context,
volume.id)
for s in snapshots:
if s.status != 'deleting':
self._clear_db(context, is_migrating_dest, volume,
'error_deleting')
msg = (_("Snapshot %(id)s was found in state "
"%(state)s rather than 'deleting' during "
"cascade delete.") % {'id': s.id,
'state': s.status})
raise exception.InvalidSnapshot(reason=msg)
self.delete_snapshot(context, s)
LOG.debug('Snapshots deleted, issuing volume delete')
self.driver.delete_volume(volume)
else:
self.driver.delete_volume(volume)
except exception.VolumeIsBusy:
LOG.error(_LE("Unable to delete busy volume."),
resource=volume)
# If this is a destination volume, we have to clear the database
# record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume,
'available')
return
except Exception:
with excutils.save_and_reraise_exception():
# If this is a destination volume, we have to clear the
# database record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume,
'error_deleting')
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
# Get reservations
try:
reserve_opts = {'volumes': -1,
'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting volume."),
resource=volume)
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
volume.destroy()
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
self._notify_about_volume_usage(context, volume, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
pool = vol_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume.host, 'pool', True)
size = volume.size
try:
self.stats['pools'][pool]['allocated_capacity_gb'] -= size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=-size)
self.publish_service_capabilities(context)
LOG.info(_LI("Deleted volume successfully."), resource=volume)
def _clear_db(self, context, is_migrating_dest, volume_ref, status):
# This method is called when driver.unmanage() or
# driver.delete_volume() fails in delete_volume(), so it is already
# in the exception handling part.
if is_migrating_dest:
volume_ref.destroy()
LOG.error(_LE("Unable to delete the destination volume "
"during volume migration, (NOTE: database "
"record needs to be deleted)."), resource=volume_ref)
else:
volume_ref.status = status
volume_ref.save()
def create_snapshot(self, context, volume_id, snapshot):
"""Creates and exports the snapshot."""
context = context.elevated()
self._notify_about_snapshot_usage(
context, snapshot, "create.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
model_update = self.driver.create_snapshot(snapshot)
if model_update:
snapshot.update(model_update)
snapshot.save()
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot.id, volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.exception(_LE("Failed updating snapshot"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id}, resource=snapshot)
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.progress = '100%'
snapshot.save()
self._notify_about_snapshot_usage(context, snapshot, "create.end")
LOG.info(_LI("Create snapshot completed successfully"),
resource=snapshot)
return snapshot.id
@locked_snapshot_operation
def delete_snapshot(self, context, snapshot, unmanage_only=False):
"""Deletes and unexports snapshot."""
context = context.elevated()
snapshot._context = context
project_id = snapshot.project_id
self._notify_about_snapshot_usage(
context, snapshot, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
snapshot.save()
if unmanage_only:
self.driver.unmanage_snapshot(snapshot)
else:
self.driver.delete_snapshot(snapshot)
except exception.SnapshotIsBusy:
LOG.error(_LE("Delete snapshot failed, due to snapshot busy."),
resource=snapshot)
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.save()
return
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR_DELETING
snapshot.save()
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot.volume_size,
}
volume_ref = self.db.volume_get(context, snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Update snapshot usages failed."),
resource=snapshot)
self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id)
snapshot.destroy()
self._notify_about_snapshot_usage(context, snapshot, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
LOG.info(_LI("Delete snapshot completed successfully"),
resource=snapshot)
def attach_volume(self, context, volume_id, instance_uuid, host_name,
mountpoint, mode):
"""Updates db to show volume is attached."""
@utils.synchronized(volume_id, external=True)
def do_attach():
# check the volume status before attaching
volume = self.db.volume_get(context, volume_id)
volume_metadata = self.db.volume_admin_metadata_get(
context.elevated(), volume_id)
if volume['status'] == 'attaching':
if (volume_metadata.get('attached_mode') and
volume_metadata.get('attached_mode') != mode):
raise exception.InvalidVolume(
reason=_("being attached by different mode"))
if (volume['status'] == 'in-use' and not volume['multiattach']
and not volume['migration_status']):
raise exception.InvalidVolume(
reason=_("volume is already attached"))
host_name_sanitized = utils.sanitize_hostname(
host_name) if host_name else None
if instance_uuid:
attachments = \
self.db.volume_attachment_get_all_by_instance_uuid(
context, volume_id, instance_uuid)
else:
attachments = (
self.db.volume_attachment_get_all_by_host(
context,
volume_id,
host_name_sanitized))
if attachments:
self.db.volume_update(context, volume_id,
{'status': 'in-use'})
return
self._notify_about_volume_usage(context, volume,
"attach.start")
values = {'volume_id': volume_id,
'attach_status': 'attaching', }
attachment = self.db.volume_attach(context.elevated(), values)
volume_metadata = self.db.volume_admin_metadata_update(
context.elevated(), volume_id,
{"attached_mode": mode}, False)
attachment_id = attachment['id']
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
self.db.volume_attachment_update(context, attachment_id,
{'attach_status':
'error_attaching'})
raise exception.InvalidUUID(uuid=instance_uuid)
volume = self.db.volume_get(context, volume_id)
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
self.db.volume_update(context, volume_id,
{'status': 'error_attaching'})
self.message_api.create(
context, defined_messages.ATTACH_READONLY_VOLUME,
context.project_id, resource_type=resource_types.VOLUME,
resource_uuid=volume_id)
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.debug('Attaching volume %(volume_id)s to instance '
'%(instance)s at mountpoint %(mount)s on host '
'%(host)s.',
{'volume_id': volume_id, 'instance': instance_uuid,
'mount': mountpoint, 'host': host_name_sanitized},
resource=volume)
self.driver.attach_volume(context,
volume,
instance_uuid,
host_name_sanitized,
mountpoint)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment_id,
{'attach_status': 'error_attaching'})
volume = self.db.volume_attached(context.elevated(),
attachment_id,
instance_uuid,
host_name_sanitized,
mountpoint,
mode)
self._notify_about_volume_usage(context, volume, "attach.end")
LOG.info(_LI("Attach volume completed successfully."),
resource=volume)
return self.db.volume_attachment_get(context, attachment_id)
return do_attach()
@locked_detach_operation
def detach_volume(self, context, volume_id, attachment_id=None):
"""Updates db to show volume is detached."""
# TODO(vish): refactor this into a more general "unreserve"
volume = self.db.volume_get(context, volume_id)
attachment = None
if attachment_id:
try:
attachment = self.db.volume_attachment_get(context,
attachment_id)
except exception.VolumeAttachmentNotFound:
LOG.info(_LI("Volume detach called, but volume not attached."),
resource=volume)
# We need to make sure the volume status is set to the correct
# status. It could be in detaching status now, and we don't
# want to leave it there.
self.db.volume_detached(context, volume_id, attachment_id)
return
else:
# We can try and degrade gracefully here by trying to detach
# a volume without the attachment_id here if the volume only has
# one attachment. This is for backwards compatibility.
attachments = self.db.volume_attachment_get_all_by_volume_id(
context, volume_id)
if len(attachments) > 1:
# There are more than 1 attachments for this volume
# we have to have an attachment id.
msg = _("Detach volume failed: More than one attachment, "
"but no attachment_id provided.")
LOG.error(msg, resource=volume)
raise exception.InvalidVolume(reason=msg)
elif len(attachments) == 1:
attachment = attachments[0]
else:
# there aren't any attachments for this volume.
# so set the status to available and move on.
LOG.info(_LI("Volume detach called, but volume not attached."),
resource=volume)
self.db.volume_update(context, volume_id,
{'status': 'available',
'attach_status': 'detached'})
return
self._notify_about_volume_usage(context, volume, "detach.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.debug('Detaching volume %(volume_id)s from instance '
'%(instance)s.',
{'volume_id': volume_id,
'instance': attachment.get('instance_uuid')},
resource=volume)
self.driver.detach_volume(context, volume, attachment)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment.get('id'),
{'attach_status': 'error_detaching'})
self.db.volume_detached(context.elevated(), volume_id,
attachment.get('id'))
self.db.volume_admin_metadata_delete(context.elevated(), volume_id,
'attached_mode')
# NOTE(jdg): We used to do an ensure export here to
# catch upgrades while volumes were attached (E->F)
# this was necessary to convert in-use volumes from
# int ID's to UUID's. Don't need this any longer
# We're going to remove the export here
# (delete the iscsi target)
volume = self.db.volume_get(context, volume_id)
try:
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context.elevated(), volume)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Detach volume failed, due to "
"uninitialized driver."),
resource=volume)
except Exception as ex:
LOG.exception(_LE("Detach volume failed, due to "
"remove-export failure."),
resource=volume)
raise exception.RemoveExportException(volume=volume_id,
reason=six.text_type(ex))
self._notify_about_volume_usage(context, volume, "detach.end")
LOG.info(_LI("Detach volume completed successfully."), resource=volume)
def _create_image_cache_volume_entry(self, ctx, volume_ref,
image_id, image_meta):
"""Create a new image-volume and cache entry for it.
This assumes that the image has already been downloaded and stored
in the volume described by the volume_ref.
"""
image_volume = None
try:
if not self.image_volume_cache.ensure_space(
ctx,
volume_ref['size'],
volume_ref['host']):
LOG.warning(_LW('Unable to ensure space for image-volume in'
' cache. Will skip creating entry for image'
' %(image)s on host %(host)s.'),
{'image': image_id, 'host': volume_ref['host']})
return
image_volume = self._clone_image_volume(ctx,
volume_ref,
image_meta)
if not image_volume:
LOG.warning(_LW('Unable to clone image_volume for image '
'%(image_id)s will not create cache entry.'),
{'image_id': image_id})
return
self.image_volume_cache.create_cache_entry(
ctx,
image_volume,
image_id,
image_meta
)
except exception.CinderException as e:
LOG.warning(_LW('Failed to create new image-volume cache entry.'
' Error: %(exception)s'), {'exception': e})
if image_volume:
self.delete_volume(ctx, image_volume.id)
def _clone_image_volume(self, ctx, volume, image_meta):
volume_type_id = volume.get('volume_type_id')
reserve_opts = {'volumes': 1, 'gigabytes': volume.size}
QUOTAS.add_volume_type_opts(ctx, reserve_opts, volume_type_id)
reservations = QUOTAS.reserve(ctx, **reserve_opts)
try:
new_vol_values = {k: volume[k] for k in set(volume.keys()) -
self._VOLUME_CLONE_SKIP_PROPERTIES}
new_vol_values['volume_type_id'] = volume_type_id
new_vol_values['attach_status'] = 'detached'
new_vol_values['status'] = 'creating'
new_vol_values['project_id'] = ctx.project_id
new_vol_values['display_name'] = 'image-%s' % image_meta['id']
new_vol_values['source_volid'] = volume.id
LOG.debug('Creating image volume entry: %s.', new_vol_values)
image_volume = objects.Volume(context=ctx, **new_vol_values)
image_volume.create()
except Exception as ex:
LOG.exception(_LE('Create clone_image_volume: %(volume_id)s'
'for image %(image_id)s, '
'failed (Exception: %(except)s)'),
{'volume_id': volume.id,
'image_id': image_meta['id'],
'except': ex})
QUOTAS.rollback(ctx, reservations)
return
QUOTAS.commit(ctx, reservations,
project_id=new_vol_values['project_id'])
try:
self.create_volume(ctx, image_volume.id,
allow_reschedule=False, volume=image_volume)
image_volume = self.db.volume_get(ctx, image_volume.id)
if image_volume.status != 'available':
raise exception.InvalidVolume(_('Volume is not available.'))
self.db.volume_admin_metadata_update(ctx.elevated(),
image_volume.id,
{'readonly': 'True'},
False)
return image_volume
except exception.CinderException:
LOG.exception(_LE('Failed to clone volume %(volume_id)s for '
'image %(image_id)s.'),
{'volume_id': volume.id,
'image_id': image_meta['id']})
try:
self.delete_volume(ctx, image_volume.id)
except exception.CinderException:
LOG.exception(_LE('Could not delete the image volume %(id)s.'),
{'id': volume.id})
return
def _clone_image_volume_and_add_location(self, ctx, volume, image_service,
image_meta):
"""Create a cloned volume and register its location to the image."""
if (image_meta['disk_format'] != 'raw' or
image_meta['container_format'] != 'bare'):
return False
image_volume_context = ctx
if self.driver.configuration.image_upload_use_internal_tenant:
internal_ctx = context.get_internal_tenant_context()
if internal_ctx:
image_volume_context = internal_ctx
image_volume = self._clone_image_volume(image_volume_context,
volume,
image_meta)
if not image_volume:
return False
uri = 'cinder://%s' % image_volume.id
image_registered = None
try:
image_registered = image_service.add_location(
ctx, image_meta['id'], uri, {})
except (exception.NotAuthorized, exception.Invalid,
exception.NotFound):
LOG.exception(_LE('Failed to register image volume location '
'%(uri)s.'), {'uri': uri})
if not image_registered:
LOG.warning(_LW('Registration of image volume URI %(uri)s '
'to image %(image_id)s failed.'),
{'uri': uri, 'image_id': image_meta['id']})
try:
self.delete_volume(image_volume_context, image_volume)
except exception.CinderException:
LOG.exception(_LE('Could not delete failed image volume '
'%(id)s.'), {'id': image_volume.id})
return False
image_volume_meta = {'glance_image_id': image_meta['id'],
'image_owner': ctx.project_id}
self.db.volume_metadata_update(image_volume_context,
image_volume.id,
image_volume_meta,
False)
return True
def copy_volume_to_image(self, context, volume_id, image_meta):
"""Uploads the specified volume to Glance.
image_meta is a dictionary containing the following keys:
'id', 'container_format', 'disk_format'
"""
payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
image_service = None
try:
volume = self.db.volume_get(context, volume_id)
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
image_service, image_id = \
glance.get_remote_image_service(context, image_meta['id'])
if (self.driver.configuration.image_upload_use_cinder_backend
and self._clone_image_volume_and_add_location(
context, volume, image_service, image_meta)):
LOG.debug("Registered image volume location to glance "
"image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
else:
self.driver.copy_volume_to_image(context, volume,
image_service, image_meta)
LOG.debug("Uploaded volume to glance image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
except Exception as error:
LOG.error(_LE("Upload volume to image encountered an error "
"(image-id: %(image_id)s)."),
{'image_id': image_meta['id']},
resource=volume)
if image_service is not None:
# Deletes the image if it is in queued or saving state
self._delete_image(context, image_meta['id'], image_service)
with excutils.save_and_reraise_exception():
payload['message'] = six.text_type(error)
if isinstance(error, exception.ImageLimitExceeded):
self.message_api.create(
context, defined_messages.IMAGE_FROM_VOLUME_OVER_QUOTA,
context.project_id,
resource_type=resource_types.VOLUME,
resource_uuid=volume_id)
finally:
self.db.volume_update_status_based_on_attachment(context,
volume_id)
LOG.info(_LI("Copy volume to image completed successfully."),
resource=volume)
def _delete_image(self, context, image_id, image_service):
"""Deletes an image stuck in queued or saving state."""
try:
image_meta = image_service.show(context, image_id)
image_status = image_meta.get('status')
if image_status == 'queued' or image_status == 'saving':
LOG.warning(_LW("Deleting image in unexpected status: "
"%(image_status)s."),
{'image_status': image_status},
resource={'type': 'image', 'id': image_id})
image_service.delete(context, image_id)
except Exception:
LOG.warning(_LW("Image delete encountered an error."),
exc_info=True, resource={'type': 'image',
'id': image_id})
def initialize_connection(self, context, volume_id, connector):
"""Prepare volume for connection from host represented by connector.
This method calls the driver initialize_connection and returns
it to the caller. The connector parameter is a dictionary with
information about the host that will connect to the volume in the
following format::
{
'ip': ip,
'initiator': initiator,
}
ip: the ip address of the connecting machine
initiator: the iscsi initiator name of the connecting machine.
This can be None if the connecting machine does not support iscsi
connections.
driver is responsible for doing any necessary security setup and
returning a connection_info dictionary in the following format::
{
'driver_volume_type': driver_volume_type,
'data': data,
}
driver_volume_type: a string to identify the type of volume. This
can be used by the calling code to determine the
strategy for connecting to the volume. This could
be 'iscsi', 'rbd', 'sheepdog', etc.
data: this is the data that the calling code will use to connect
to the volume. Keep in mind that this will be serialized to
json in various places, so it should not contain any non-json
data types.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume = self.db.volume_get(context, volume_id)
model_update = None
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate volume connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
model_update = self.driver.create_export(context.elevated(),
volume, connector)
except exception.CinderException:
err_msg = (_("Create export for volume failed."))
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
if model_update:
volume = self.db.volume_update(context,
volume_id,
model_update)
except exception.CinderException as ex:
LOG.exception(_LE("Model update failed."), resource=volume)
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn_info = self.driver.initialize_connection(volume, connector)
except Exception as err:
err_msg = (_("Driver initialize connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
self.driver.remove_export(context.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
# Add qos_specs to connection info
typeid = volume['volume_type_id']
specs = None
if typeid:
res = volume_types.get_volume_type_qos_specs(typeid)
qos = res['qos_specs']
# only pass qos_specs that is designated to be consumed by
# front-end, or both front-end and back-end.
if qos and qos.get('consumer') in ['front-end', 'both']:
specs = qos.get('specs')
qos_spec = dict(qos_specs=specs)
conn_info['data'].update(qos_spec)
# Add access_mode to connection info
volume_metadata = self.db.volume_admin_metadata_get(context.elevated(),
volume_id)
access_mode = volume_metadata.get('attached_mode')
if access_mode is None:
# NOTE(zhiyan): client didn't call 'os-attach' before
access_mode = ('ro'
if volume_metadata.get('readonly') == 'True'
else 'rw')
conn_info['data']['access_mode'] = access_mode
# Add encrypted flag to connection_info if not set in the driver.
if conn_info['data'].get('encrypted') is None:
encrypted = bool(volume.get('encryption_key_id'))
conn_info['data']['encrypted'] = encrypted
# Add discard flag to connection_info if not set in the driver and
# configured to be reported.
if conn_info['data'].get('discard') is None:
discard_supported = (self.driver.configuration
.safe_get('report_discard_supported'))
if discard_supported:
conn_info['data']['discard'] = True
LOG.info(_LI("Initialize volume connection completed successfully."),
resource=volume)
return conn_info
def terminate_connection(self, context, volume_id, connector, force=False):
"""Cleanup connection from host represented by connector.
The format of connector is the same as for initialize_connection.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.terminate_connection(volume_ref, connector,
force=force)
except Exception as err:
err_msg = (_('Terminate volume connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info(_LI("Terminate volume connection completed successfully."),
resource=volume_ref)
def remove_export(self, context, volume_id):
"""Removes an export for a volume."""
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.remove_export(context, volume_ref)
except Exception:
msg = _("Remove volume export failed.")
LOG.exception(msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("Remove volume export completed successfully."),
resource=volume_ref)
def accept_transfer(self, context, volume_id, new_user, new_project):
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
# NOTE(jdg): need elevated context as we haven't "given" the vol
# yet
volume_ref = self.db.volume_get(context.elevated(), volume_id)
# NOTE(jdg): Some drivers tie provider info (CHAP) to tenant
# for those that do allow them to return updated model info
model_update = self.driver.accept_transfer(context,
volume_ref,
new_user,
new_project)
if model_update:
try:
self.db.volume_update(context.elevated(),
volume_id,
model_update)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Update volume model for "
"transfer operation failed."),
resource=volume_ref)
self.db.volume_update(context.elevated(),
volume_id,
{'status': 'error'})
LOG.info(_LI("Transfer volume completed successfully."),
resource=volume_ref)
return model_update
def _connect_device(self, conn):
use_multipath = self.configuration.use_multipath_for_image_xfer
device_scan_attempts = self.configuration.num_volume_device_scan_tries
protocol = conn['driver_volume_type']
connector = utils.brick_get_connector(
protocol,
use_multipath=use_multipath,
device_scan_attempts=device_scan_attempts,
conn=conn)
vol_handle = connector.connect_volume(conn['data'])
root_access = True
if not connector.check_valid_device(vol_handle['path'], root_access):
if isinstance(vol_handle['path'], six.string_types):
raise exception.DeviceUnavailable(
path=vol_handle['path'],
reason=(_("Unable to access the backend storage via the "
"path %(path)s.") %
{'path': vol_handle['path']}))
else:
raise exception.DeviceUnavailable(
path=None,
reason=(_("Unable to access the backend storage via file "
"handle.")))
return {'conn': conn, 'device': vol_handle, 'connector': connector}
def _attach_volume(self, ctxt, volume, properties, remote=False):
status = volume['status']
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
try:
conn = rpcapi.initialize_connection(ctxt, volume, properties)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach volume %(vol)s."),
{'vol': volume['id']})
self.db.volume_update(ctxt, volume['id'],
{'status': status})
else:
conn = self.initialize_connection(ctxt, volume['id'], properties)
return self._connect_device(conn)
def _detach_volume(self, ctxt, attach_info, volume, properties,
force=False, remote=False):
connector = attach_info['connector']
connector.disconnect_volume(attach_info['conn']['data'],
attach_info['device'])
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.terminate_connection(ctxt, volume, properties, force=force)
rpcapi.remove_export(ctxt, volume)
else:
try:
self.terminate_connection(ctxt, volume['id'], properties,
force=force)
self.remove_export(ctxt, volume['id'])
except Exception as err:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Unable to terminate volume connection: '
'%(err)s.') % {'err': err})
def _copy_volume_data(self, ctxt, src_vol, dest_vol, remote=None):
"""Copy data from src_vol to dest_vol."""
LOG.debug('copy_data_between_volumes %(src)s -> %(dest)s.',
{'src': src_vol['name'], 'dest': dest_vol['name']})
properties = utils.brick_get_connector_properties()
dest_remote = remote in ['dest', 'both']
dest_attach_info = self._attach_volume(ctxt, dest_vol, properties,
remote=dest_remote)
try:
src_remote = remote in ['src', 'both']
src_attach_info = self._attach_volume(ctxt, src_vol, properties,
remote=src_remote)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach source volume for copy."))
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, remote=dest_remote)
# Check the backend capabilities of migration destination host.
rpcapi = volume_rpcapi.VolumeAPI()
capabilities = rpcapi.get_capabilities(ctxt, dest_vol['host'],
False)
sparse_copy_volume = bool(capabilities and
capabilities.get('sparse_copy_volume',
False))
copy_error = True
try:
size_in_mb = int(src_vol['size']) * units.Ki # vol size is in GB
vol_utils.copy_volume(src_attach_info['device']['path'],
dest_attach_info['device']['path'],
size_in_mb,
self.configuration.volume_dd_blocksize,
sparse=sparse_copy_volume)
copy_error = False
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to copy volume %(src)s to %(dest)s."),
{'src': src_vol['id'], 'dest': dest_vol['id']})
finally:
try:
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, force=copy_error,
remote=dest_remote)
finally:
self._detach_volume(ctxt, src_attach_info, src_vol,
properties, force=copy_error,
remote=src_remote)
def _migrate_volume_generic(self, ctxt, volume, host, new_type_id):
rpcapi = volume_rpcapi.VolumeAPI()
# Create new volume on remote host
skip = self._VOLUME_CLONE_SKIP_PROPERTIES | {'host'}
new_vol_values = {k: volume[k] for k in set(volume.keys()) - skip}
if new_type_id:
new_vol_values['volume_type_id'] = new_type_id
new_volume = objects.Volume(
context=ctxt,
host=host['host'],
status='creating',
attach_status='detached',
migration_status='target:%s' % volume['id'],
**new_vol_values
)
new_volume.create()
rpcapi.create_volume(ctxt, new_volume, host['host'],
None, None, allow_reschedule=False)
# Wait for new_volume to become ready
starttime = time.time()
deadline = starttime + CONF.migration_create_volume_timeout_secs
# TODO(thangp): Replace get_by_id with refresh when it is available
new_volume = objects.Volume.get_by_id(ctxt, new_volume.id)
tries = 0
while new_volume.status != 'available':
tries += 1
now = time.time()
if new_volume.status == 'error':
msg = _("failed to create new_volume on destination host")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
elif now > deadline:
msg = _("timeout creating new_volume on destination host")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
else:
time.sleep(tries ** 2)
# TODO(thangp): Replace get_by_id with refresh when it is
# available
new_volume = objects.Volume.get_by_id(ctxt, new_volume.id)
# Copy the source volume to the destination volume
try:
attachments = volume.volume_attachment
if not attachments:
# Pre- and post-copy driver-specific actions
self.driver.before_volume_copy(ctxt, volume, new_volume,
remote='dest')
self._copy_volume_data(ctxt, volume, new_volume, remote='dest')
self.driver.after_volume_copy(ctxt, volume, new_volume,
remote='dest')
# The above call is synchronous so we complete the migration
self.migrate_volume_completion(ctxt, volume.id,
new_volume.id,
error=False,
volume=volume,
new_volume=new_volume)
else:
nova_api = compute.API()
# This is an async call to Nova, which will call the completion
# when it's done
for attachment in attachments:
instance_uuid = attachment['instance_uuid']
nova_api.update_server_volume(ctxt, instance_uuid,
volume.id,
new_volume.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE(
"Failed to copy volume %(vol1)s to %(vol2)s"), {
'vol1': volume.id, 'vol2': new_volume.id})
self._clean_temporary_volume(ctxt, volume,
new_volume)
def _clean_temporary_volume(self, ctxt, volume, new_volume,
clean_db_only=False):
# If we're in the migrating phase, we need to cleanup
# destination volume because source volume is remaining
if volume.migration_status == 'migrating':
try:
if clean_db_only:
# The temporary volume is not created, only DB data
# is created
new_volume.destroy()
else:
# The temporary volume is already created
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.delete_volume(ctxt, new_volume)
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find the temporary volume "
"%(vol)s in the database. There is no need "
"to clean up this volume."),
{'vol': new_volume.id})
else:
# If we're in the completing phase don't delete the
# destination because we may have already deleted the
# source! But the migration_status in database should
# be cleared to handle volume after migration failure
try:
new_volume.migration_status = None
new_volume.save()
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find destination volume "
"%(vol)s in the database. The entry might be "
"successfully deleted during migration "
"completion phase."),
{'vol': new_volume.id})
LOG.warning(_LW("Failed to migrate volume. The destination "
"volume %(vol)s is not deleted since the "
"source volume may have been deleted."),
{'vol': new_volume.id})
def migrate_volume_completion(self, ctxt, volume_id, new_volume_id,
error=False, volume=None, new_volume=None):
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None or new_volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(ctxt, volume_id)
new_volume = objects.Volume.get_by_id(ctxt, new_volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
LOG.debug("migrate_volume_completion: completing migration for "
"volume %(vol1)s (temporary volume %(vol2)s",
{'vol1': volume.id, 'vol2': new_volume.id})
rpcapi = volume_rpcapi.VolumeAPI()
orig_volume_status = volume.previous_status
if error:
LOG.info(_LI("migrate_volume_completion is cleaning up an error "
"for volume %(vol1)s (temporary volume %(vol2)s"),
{'vol1': volume['id'], 'vol2': new_volume.id})
rpcapi.delete_volume(ctxt, new_volume)
updates = {'migration_status': 'error',
'status': orig_volume_status}
volume.update(updates)
volume.save()
return volume.id
volume.migration_status = 'completing'
volume.save()
# Detach the source volume (if it fails, don't fail the migration)
try:
if orig_volume_status == 'in-use':
attachments = volume.volume_attachment
for attachment in attachments:
self.detach_volume(ctxt, volume.id, attachment['id'])
except Exception as ex:
LOG.error(_LE("Detach migration source volume failed: %(err)s"),
{'err': ex}, resource=volume)
# Give driver (new_volume) a chance to update things as needed
# after a successful migration.
# Note this needs to go through rpc to the host of the new volume
# the current host and driver object is for the "existing" volume.
rpcapi.update_migrated_volume(ctxt, volume, new_volume,
orig_volume_status)
volume.refresh()
new_volume.refresh()
# Swap src and dest DB records so we can continue using the src id and
# asynchronously delete the destination id
updated_new = volume.finish_volume_migration(new_volume)
updates = {'status': orig_volume_status,
'previous_status': volume.status,
'migration_status': 'success'}
if orig_volume_status == 'in-use':
attachments = volume.volume_attachment
for attachment in attachments:
rpcapi.attach_volume(ctxt, volume,
attachment['instance_uuid'],
attachment['attached_host'],
attachment['mountpoint'],
'rw')
volume.update(updates)
volume.save()
# Asynchronous deletion of the source volume in the back-end (now
# pointed by the target volume id)
try:
rpcapi.delete_volume(ctxt, updated_new)
except Exception as ex:
LOG.error(_LE('Failed to request async delete of migration source '
'vol %(vol)s: %(err)s'),
{'vol': volume.id, 'err': ex})
LOG.info(_LI("Complete-Migrate volume completed successfully."),
resource=volume)
return volume.id
def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False,
new_type_id=None, volume=None):
"""Migrate the volume to the specified host (called on source host)."""
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(ctxt, volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
model_update = None
moved = False
status_update = None
if volume.status in ('retyping', 'maintenance'):
status_update = {'status': volume.previous_status}
volume.migration_status = 'migrating'
volume.save()
if not force_host_copy and new_type_id is None:
try:
LOG.debug("Issue driver.migrate_volume.", resource=volume)
moved, model_update = self.driver.migrate_volume(ctxt,
volume,
host)
if moved:
updates = {'host': host['host'],
'migration_status': 'success',
'previous_status': volume.status}
if status_update:
updates.update(status_update)
if model_update:
updates.update(model_update)
volume.update(updates)
volume.save()
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
if not moved:
try:
self._migrate_volume_generic(ctxt, volume, host,
new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
LOG.info(_LI("Migrate volume completed successfully."),
resource=volume)
@periodic_task.periodic_task
def _report_driver_status(self, context):
if not self.driver.initialized:
if self.driver.configuration.config_group is None:
config_group = ''
else:
config_group = ('(config name %s)' %
self.driver.configuration.config_group)
LOG.warning(_LW("Update driver status failed: %(config_group)s "
"is uninitialized."),
{'config_group': config_group},
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
else:
volume_stats = self.driver.get_volume_stats(refresh=True)
if self.extra_capabilities:
volume_stats.update(self.extra_capabilities)
if volume_stats:
# Append volume stats with 'allocated_capacity_gb'
self._append_volume_stats(volume_stats)
# Append filter and goodness function if needed
volume_stats = (
self._append_filter_goodness_functions(volume_stats))
# queue it to be sent to the Schedulers.
self.update_service_capabilities(volume_stats)
def _append_volume_stats(self, vol_stats):
pools = vol_stats.get('pools', None)
if pools and isinstance(pools, list):
for pool in pools:
pool_name = pool['pool_name']
try:
pool_stats = self.stats['pools'][pool_name]
except KeyError:
# Pool not found in volume manager
pool_stats = dict(allocated_capacity_gb=0)
pool.update(pool_stats)
def _append_filter_goodness_functions(self, volume_stats):
"""Returns volume_stats updated as needed."""
# Append filter_function if needed
if 'filter_function' not in volume_stats:
volume_stats['filter_function'] = (
self.driver.get_filter_function())
# Append goodness_function if needed
if 'goodness_function' not in volume_stats:
volume_stats['goodness_function'] = (
self.driver.get_goodness_function())
return volume_stats
def publish_service_capabilities(self, context):
"""Collect driver status and then publish."""
self._report_driver_status(context)
self._publish_service_capabilities(context)
def _notify_about_volume_usage(self,
context,
volume,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_snapshot_usage(self,
context,
snapshot,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_consistencygroup_usage(self,
context,
group,
event_suffix,
volumes=None,
extra_usage_info=None):
vol_utils.notify_about_consistencygroup_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not volumes:
volumes = self.db.volume_get_all_by_group(context, group.id)
if volumes:
for volume in volumes:
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_cgsnapshot_usage(self,
context,
cgsnapshot,
event_suffix,
snapshots=None,
extra_usage_info=None):
vol_utils.notify_about_cgsnapshot_usage(
context, cgsnapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not snapshots:
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
if snapshots:
for snapshot in snapshots:
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def extend_volume(self, context, volume_id, new_size, reservations,
volume=None):
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.status = 'error_extending'
volume.save()
project_id = volume.project_id
size_increase = (int(new_size)) - volume.size
self._notify_about_volume_usage(context, volume, "resize.start")
try:
self.driver.extend_volume(volume, new_size)
except Exception:
LOG.exception(_LE("Extend volume failed."),
resource=volume)
try:
self.db.volume_update(context, volume.id,
{'status': 'error_extending'})
raise exception.CinderException(_("Volume %s: Error trying "
"to extend volume") %
volume.id)
finally:
QUOTAS.rollback(context, reservations, project_id=project_id)
return
QUOTAS.commit(context, reservations, project_id=project_id)
volume.update({'size': int(new_size), 'status': 'available'})
volume.save()
pool = vol_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume.host, 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += size_increase
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=size_increase)
self._notify_about_volume_usage(
context, volume, "resize.end",
extra_usage_info={'size': int(new_size)})
LOG.info(_LI("Extend volume completed successfully."),
resource=volume)
def retype(self, ctxt, volume_id, new_type_id, host,
migration_policy='never', reservations=None,
volume=None, old_reservations=None):
def _retype_error(context, volume, old_reservations,
new_reservations, status_update):
try:
volume.update(status_update)
volume.save()
finally:
QUOTAS.rollback(context, old_reservations)
QUOTAS.rollback(context, new_reservations)
context = ctxt.elevated()
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
status_update = {'status': volume.previous_status}
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
# NOTE(flaper87): Other exceptions in this method don't
# set the volume status to error. Should that be done
# here? Setting the volume back to it's original status
# for now.
volume.update(status_update)
volume.save()
# If old_reservations has been passed in from the API, we should
# skip quotas.
# TODO(ntpttr): These reservation checks are left in to be backwards
# compatible with Liberty and can be removed in N.
if not old_reservations:
# Get old reservations
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
# NOTE(wanghao): We don't need to reserve volumes and gigabytes
# quota for retyping operation since they didn't changed, just
# reserve volume_type and type gigabytes is fine.
reserve_opts.pop('volumes')
reserve_opts.pop('gigabytes')
old_reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
volume.update(status_update)
volume.save()
msg = _("Failed to update quota usage while retyping volume.")
LOG.exception(msg, resource=volume)
raise exception.CinderException(msg)
# We already got the new reservations
new_reservations = reservations
# If volume types have the same contents, no need to do anything
retyped = False
diff, all_equal = volume_types.volume_types_diff(
context, volume.volume_type_id, new_type_id)
if all_equal:
retyped = True
# Call driver to try and change the type
retype_model_update = None
# NOTE(jdg): Check to see if the destination host is the same
# as the current. If it's not don't call the driver.retype
# method, otherwise drivers that implement retype may report
# success, but it's invalid in the case of a migrate.
# We assume that those that support pools do this internally
# so we strip off the pools designation
if (not retyped and
vol_utils.hosts_are_equivalent(self.driver.host,
host['host'])):
try:
new_type = volume_types.get_volume_type(context, new_type_id)
ret = self.driver.retype(context,
volume,
new_type,
diff,
host)
# Check if the driver retype provided a model update or
# just a retype indication
if type(ret) == tuple:
retyped, retype_model_update = ret
else:
retyped = ret
if retyped:
LOG.info(_LI("Volume %s: retyped successfully"), volume.id)
except Exception:
retyped = False
LOG.exception(_LE("Volume %s: driver error when trying to "
"retype, falling back to generic "
"mechanism."), volume.id)
# We could not change the type, so we need to migrate the volume, where
# the destination volume will be of the new type
if not retyped:
if migration_policy == 'never':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Retype requires migration but is not allowed.")
raise exception.VolumeMigrationFailed(reason=msg)
snaps = objects.SnapshotList.get_all_for_volume(context,
volume.id)
if snaps:
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not have snapshots.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Don't allow volume with replicas to be migrated
rep_status = volume.replication_status
if rep_status is not None and rep_status != 'disabled':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not be replicated.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
volume.migration_status = 'starting'
volume.save()
try:
self.migrate_volume(context, volume.id, host,
new_type_id=new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
else:
model_update = {'volume_type_id': new_type_id,
'host': host['host'],
'status': status_update['status']}
if retype_model_update:
model_update.update(retype_model_update)
volume.update(model_update)
volume.save()
if old_reservations:
QUOTAS.commit(context, old_reservations, project_id=project_id)
if new_reservations:
QUOTAS.commit(context, new_reservations, project_id=project_id)
self._notify_about_volume_usage(
context, volume, "retype",
extra_usage_info={'volume_type': new_type_id})
self.publish_service_capabilities(context)
LOG.info(_LI("Retype volume completed successfully."),
resource=volume)
def manage_existing(self, ctxt, volume_id, ref=None):
try:
flow_engine = manage_existing.get_flow(
ctxt,
self.db,
self.driver,
self.host,
volume_id,
ref)
except Exception:
msg = _("Failed to create manage_existing flow.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume_id})
raise exception.CinderException(msg)
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# Fetch created volume from storage
vol_ref = flow_engine.storage.fetch('volume')
# Update volume stats
pool = vol_utils.extract_host(vol_ref['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
vol_ref['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] \
+= vol_ref['size']
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=vol_ref['size'])
LOG.info(_LI("Manage existing volume completed successfully."),
resource=vol_ref)
return vol_ref['id']
def promote_replica(self, ctxt, volume_id):
"""Promote volume replica secondary to be the primary volume."""
volume = self.db.volume_get(ctxt, volume_id)
model_update = None
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Promote volume replica failed."),
resource=volume)
try:
model_update = self.driver.promote_replica(ctxt, volume)
except exception.CinderException:
err_msg = (_('Error promoting secondary volume to primary'))
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
try:
if model_update:
volume = self.db.volume_update(ctxt,
volume_id,
model_update)
except exception.CinderException:
err_msg = (_("Failed updating model"
" with driver provided model %(model)s") %
{'model': model_update})
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
LOG.info(_LI("Promote volume replica completed successfully."),
resource=volume)
def reenable_replication(self, ctxt, volume_id):
"""Re-enable replication of secondary volume with primary volumes."""
volume = self.db.volume_get(ctxt, volume_id)
model_update = None
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Sync volume replica failed."),
resource=volume)
try:
model_update = self.driver.reenable_replication(ctxt, volume)
except exception.CinderException:
err_msg = (_("Synchronizing secondary volume to primary failed."))
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
try:
if model_update:
volume = self.db.volume_update(ctxt,
volume_id,
model_update)
except exception.CinderException:
err_msg = (_("Failed updating model"
" with driver provided model %(model)s") %
{'model': model_update})
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
def _update_replication_relationship_status(self, ctxt):
# Only want volumes that do not have a 'disabled' replication status
filters = {'replication_status': ['active', 'copying', 'error',
'active-stopped', 'inactive']}
volumes = self.db.volume_get_all_by_host(ctxt, self.host,
filters=filters)
for vol in volumes:
model_update = None
try:
model_update = self.driver.get_replication_status(
ctxt, vol)
if model_update:
self.db.volume_update(ctxt, vol['id'], model_update)
except Exception:
LOG.exception(_LE("Get replication status for volume failed."),
resource=vol)
def create_consistencygroup(self, context, group):
"""Creates the consistency group."""
context = context.elevated()
status = fields.ConsistencyGroupStatus.AVAILABLE
model_update = None
self._notify_about_consistencygroup_usage(
context, group, "create.start")
try:
utils.require_driver_initialized(self.driver)
LOG.info(_LI("Consistency group %s: creating"), group.name)
model_update = self.driver.create_consistencygroup(context,
group)
if model_update:
if (model_update['status'] ==
fields.ConsistencyGroupStatus.ERROR):
msg = (_('Create consistency group failed.'))
LOG.error(msg,
resource={'type': 'consistency_group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = fields.ConsistencyGroupStatus.ERROR
group.save()
LOG.error(_LE("Consistency group %s: create failed"),
group.name)
group.status = status
group.created_at = timeutils.utcnow()
group.save()
LOG.info(_LI("Consistency group %s: created successfully"),
group.name)
self._notify_about_consistencygroup_usage(
context, group, "create.end")
LOG.info(_LI("Create consistency group completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
return group
def create_consistencygroup_from_src(self, context, group,
cgsnapshot=None, source_cg=None):
"""Creates the consistency group from source.
The source can be a CG snapshot or a source CG.
"""
source_name = None
snapshots = None
source_vols = None
try:
volumes = self.db.volume_get_all_by_group(context, group.id)
if cgsnapshot:
try:
# Check if cgsnapshot still exists
cgsnapshot = objects.CGSnapshot.get_by_id(
context, cgsnapshot.id)
except exception.CgSnapshotNotFound:
LOG.error(_LE("Create consistency group "
"from snapshot-%(snap)s failed: "
"SnapshotNotFound."),
{'snap': cgsnapshot.id},
resource={'type': 'consistency_group',
'id': group.id})
raise
source_name = _("snapshot-%s") % cgsnapshot.id
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
for snap in snapshots:
if (snap.status not in
VALID_CREATE_CG_SRC_SNAP_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because snapshot %(snap)s is "
"not in a valid state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'snap': snap['id'],
'valid': VALID_CREATE_CG_SRC_SNAP_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
if source_cg:
try:
source_cg = objects.ConsistencyGroup.get_by_id(
context, source_cg.id)
except exception.ConsistencyGroupNotFound:
LOG.error(_LE("Create consistency group "
"from source cg-%(cg)s failed: "
"ConsistencyGroupNotFound."),
{'cg': source_cg.id},
resource={'type': 'consistency_group',
'id': group.id})
raise
source_name = _("cg-%s") % source_cg.id
source_vols = self.db.volume_get_all_by_group(
context, source_cg.id)
for source_vol in source_vols:
if (source_vol['status'] not in
VALID_CREATE_CG_SRC_CG_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because source volume "
"%(source_vol)s is not in a valid "
"state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'source_vol': source_vol['id'],
'valid': VALID_CREATE_CG_SRC_CG_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes.
sorted_snapshots = None
if cgsnapshot and snapshots:
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
# Sort source volumes so that they are in the same order as their
# corresponding target volumes.
sorted_source_vols = None
if source_cg and source_vols:
sorted_source_vols = self._sort_source_vols(volumes,
source_vols)
self._notify_about_consistencygroup_usage(
context, group, "create.start")
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
context, group, volumes, cgsnapshot,
sorted_snapshots, source_cg, sorted_source_vols))
if volumes_model_update:
for update in volumes_model_update:
self.db.volume_update(context, update['id'], update)
if model_update:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
LOG.error(_LE("Create consistency group "
"from source %(source)s failed."),
{'source': source_name},
resource={'type': 'consistency_group',
'id': group.id})
# Update volume status to 'error' as well.
for vol in volumes:
self.db.volume_update(
context, vol['id'], {'status': 'error'})
now = timeutils.utcnow()
status = 'available'
for vol in volumes:
update = {'status': status, 'created_at': now}
self._update_volume_from_src(context, vol, update, group=group)
self._update_allocated_capacity(vol)
group.status = status
group.created_at = now
group.save()
self._notify_about_consistencygroup_usage(
context, group, "create.end")
LOG.info(_LI("Create consistency group "
"from source-%(source)s completed successfully."),
{'source': source_name},
resource={'type': 'consistency_group',
'id': group.id})
return group
def _sort_snapshots(self, volumes, snapshots):
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes. Each source snapshot in the snapshots
# list should have a corresponding target volume in the volumes list.
if not volumes or not snapshots or len(volumes) != len(snapshots):
msg = _("Input volumes or snapshots are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_snapshots = []
for vol in volumes:
found_snaps = [snap for snap in snapshots
if snap['id'] == vol['snapshot_id']]
if not found_snaps:
LOG.error(_LE("Source snapshot cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.SnapshotNotFound(
snapshot_id=vol['snapshot_id'])
sorted_snapshots.extend(found_snaps)
return sorted_snapshots
def _sort_source_vols(self, volumes, source_vols):
# Sort source volumes so that they are in the same order as their
# corresponding target volumes. Each source volume in the source_vols
# list should have a corresponding target volume in the volumes list.
if not volumes or not source_vols or len(volumes) != len(source_vols):
msg = _("Input volumes or source volumes are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_source_vols = []
for vol in volumes:
found_source_vols = [source_vol for source_vol in source_vols
if source_vol['id'] == vol['source_volid']]
if not found_source_vols:
LOG.error(_LE("Source volumes cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.VolumeNotFound(
volume_id=vol['source_volid'])
sorted_source_vols.extend(found_source_vols)
return sorted_source_vols
def _update_volume_from_src(self, context, vol, update, group=None):
try:
snapshot_id = vol.get('snapshot_id')
if snapshot_id:
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
orig_vref = self.db.volume_get(context,
snapshot.volume_id)
if orig_vref.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_to_volume(
context, vol['id'], snapshot_id)
except exception.SnapshotNotFound:
LOG.error(_LE("Source snapshot %(snapshot_id)s cannot be found."),
{'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise
except exception.VolumeNotFound:
LOG.error(_LE("The source volume %(volume_id)s "
"cannot be found."),
{'volume_id': snapshot.volume_id})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise
except exception.CinderException as ex:
LOG.error(_LE("Failed to update %(volume_id)s"
" metadata using the provided snapshot"
" %(snapshot_id)s metadata."),
{'volume_id': vol['id'],
'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
self.db.volume_update(context, vol['id'], update)
def _update_allocated_capacity(self, vol):
# Update allocated capacity in volume stats
pool = vol_utils.extract_host(vol['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
vol['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += (
vol['size'])
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=vol['size'])
def delete_consistencygroup(self, context, group):
"""Deletes consistency group and the volumes in the group."""
context = context.elevated()
project_id = group.project_id
if context.project_id != group.project_id:
project_id = group.project_id
else:
project_id = context.project_id
volumes = self.db.volume_get_all_by_group(context, group.id)
for volume_ref in volumes:
if volume_ref['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_ref['id'])
# self.host is 'host@backend'
# volume_ref['host'] is 'host@backend#pool'
# Extract host before doing comparison
if volume_ref['host']:
new_host = vol_utils.extract_host(volume_ref['host'])
if new_host != self.host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node"))
self._notify_about_consistencygroup_usage(
context, group, "delete.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.delete_consistencygroup(context, group, volumes))
if volumes_model_update:
for volume in volumes_model_update:
update = {'status': volume['status']}
self.db.volume_update(context, volume['id'],
update)
# If we failed to delete a volume, make sure the status
# for the cg is set to error as well
if (volume['status'] in ['error_deleting', 'error'] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = volume['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Delete consistency group failed.'))
LOG.error(msg,
resource={'type': 'consistency_group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
# Update volume status to 'error' if driver returns
# None for volumes_model_update.
if not volumes_model_update:
for vol in volumes:
self.db.volume_update(
context, vol['id'], {'status': 'error'})
# Get reservations for group
try:
reserve_opts = {'consistencygroups': -1}
cgreservations = CGQUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
cgreservations = None
LOG.exception(_LE("Delete consistency group "
"failed to update usages."),
resource={'type': 'consistency_group',
'id': group.id})
for volume_ref in volumes:
# Get reservations for volume
try:
volume_id = volume_ref['id']
reserve_opts = {'volumes': -1,
'gigabytes': -volume_ref['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Delete consistency group "
"failed to update usages."),
resource={'type': 'consistency_group',
'id': group.id})
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
self.db.volume_destroy(context, volume_id)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= volume_ref['size']
if cgreservations:
CGQUOTAS.commit(context, cgreservations,
project_id=project_id)
group.destroy()
self._notify_about_consistencygroup_usage(
context, group, "delete.end", volumes)
self.publish_service_capabilities(context)
LOG.info(_LI("Delete consistency group "
"completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates consistency group.
Update consistency group by adding volumes to the group,
or removing volumes from the group.
"""
add_volumes_ref = []
remove_volumes_ref = []
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes_list = remove_volumes.split(',')
for add_vol in add_volumes_list:
try:
add_vol_ref = self.db.volume_get(context, add_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update consistency group "
"failed to add volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': add_vol_ref['id']},
resource={'type': 'consistency_group',
'id': group.id})
raise
if add_vol_ref['status'] not in VALID_ADD_VOL_TO_CG_STATUS:
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id,
'status': add_vol_ref['status'],
'valid': VALID_ADD_VOL_TO_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
# self.host is 'host@backend'
# volume_ref['host'] is 'host@backend#pool'
# Extract host before doing comparison
new_host = vol_utils.extract_host(add_vol_ref['host'])
if new_host != self.host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node."))
add_volumes_ref.append(add_vol_ref)
for remove_vol in remove_volumes_list:
try:
remove_vol_ref = self.db.volume_get(context, remove_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update consistency group "
"failed to remove volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': remove_vol_ref['id']},
resource={'type': 'consistency_group',
'id': group.id})
raise
if remove_vol_ref['status'] not in VALID_REMOVE_VOL_FROM_CG_STATUS:
msg = (_("Cannot remove volume %(volume_id)s from consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': remove_vol_ref['id'],
'group_id': group.id,
'status': remove_vol_ref['status'],
'valid': VALID_REMOVE_VOL_FROM_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
remove_volumes_ref.append(remove_vol_ref)
self._notify_about_consistencygroup_usage(
context, group, "update.start")
try:
utils.require_driver_initialized(self.driver)
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
if add_volumes_update:
for update in add_volumes_update:
self.db.volume_update(context, update['id'], update)
if remove_volumes_update:
for update in remove_volumes_update:
self.db.volume_update(context, update['id'], update)
if model_update:
if model_update['status'] in (
[fields.ConsistencyGroupStatus.ERROR]):
msg = (_('Error occurred when updating consistency group '
'%s.') % group.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group.update(model_update)
group.save()
except exception.VolumeDriverException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred in the volume driver when "
"updating consistency group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when updating consistency "
"group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
now = timeutils.utcnow()
group.status = 'available'
group.update_at = now
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'consistencygroup_id': group.id,
'updated_at': now})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'consistencygroup_id': None,
'updated_at': now})
self._notify_about_consistencygroup_usage(
context, group, "update.end")
LOG.info(_LI("Update consistency group "
"completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
def create_cgsnapshot(self, context, cgsnapshot):
"""Creates the cgsnapshot."""
caller_context = context
context = context.elevated()
LOG.info(_LI("Cgsnapshot %s: creating."), cgsnapshot.id)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "create.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Cgsnapshot %(cgsnap_id)s: creating.",
{'cgsnap_id': cgsnapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
model_update, snapshots_model_update = (
self.driver.create_cgsnapshot(context, cgsnapshot,
snapshots))
if snapshots_model_update:
for snap_model in snapshots_model_update:
# Update db for snapshot.
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
self.db.snapshot_update(context,
snap_model['id'],
snap_model)
if (snap_model['status'] in [
fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] == 'error':
msg = (_('Error occurred when creating cgsnapshot '
'%s.') % cgsnapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
cgsnapshot.update(model_update)
cgsnapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
cgsnapshot.status = 'error'
cgsnapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
volume_id = snapshot['volume_id']
snapshot_id = snapshot['id']
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot_id, volume_id)
except exception.CinderException as ex:
LOG.error(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
# TODO(thangp): Switch over to use snapshot.update()
# after cgsnapshot-objects bugs are fixed
self.db.snapshot_update(
context, snapshot_id, {
'status': fields.SnapshotStatus.ERROR})
raise exception.MetadataCopyFailure(
reason=six.text_type(ex))
self.db.snapshot_update(context,
snapshot['id'],
{'status': fields.SnapshotStatus.AVAILABLE,
'progress': '100%'})
cgsnapshot.status = 'available'
cgsnapshot.save()
LOG.info(_LI("cgsnapshot %s: created successfully"),
cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "create.end")
return cgsnapshot
def delete_cgsnapshot(self, context, cgsnapshot):
"""Deletes cgsnapshot."""
caller_context = context
context = context.elevated()
project_id = cgsnapshot.project_id
LOG.info(_LI("cgsnapshot %s: deleting"), cgsnapshot.id)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "delete.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("cgsnapshot %(cgsnap_id)s: deleting",
{'cgsnap_id': cgsnapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(context, cgsnapshot,
snapshots))
if snapshots_model_update:
for snap_model in snapshots_model_update:
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap = next((item for item in snapshots if
item.id == snap_model['id']), None)
if snap:
snap.status = snap_model['status']
snap.save()
if (snap_model['status'] in
[fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting cgsnapshot '
'%s.') % cgsnapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
cgsnapshot.update(model_update)
cgsnapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
cgsnapshot.status = 'error'
cgsnapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot['volume_size'],
}
volume_ref = self.db.volume_get(context, snapshot['volume_id'])
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot['id'])
# TODO(thangp): Switch over to use snapshot.destroy()
# after cgsnapshot-objects bugs are fixed
self.db.snapshot_destroy(context, snapshot['id'])
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
cgsnapshot.destroy()
LOG.info(_LI("cgsnapshot %s: deleted successfully"), cgsnapshot.id)
self._notify_about_cgsnapshot_usage(context, cgsnapshot, "delete.end",
snapshots)
def update_migrated_volume(self, ctxt, volume, new_volume, volume_status):
"""Finalize migration process on backend device."""
model_update = None
model_update_default = {'_name_id': new_volume.name_id,
'provider_location':
new_volume.provider_location}
try:
model_update = self.driver.update_migrated_volume(ctxt,
volume,
new_volume,
volume_status)
except NotImplementedError:
# If update_migrated_volume is not implemented for the driver,
# _name_id and provider_location will be set with the values
# from new_volume.
model_update = model_update_default
if model_update:
model_update_default.update(model_update)
# Swap keys that were changed in the source so we keep their values
# in the temporary volume's DB record.
# Need to convert 'metadata' and 'admin_metadata' since
# they are not keys of volume, their corresponding keys are
# 'volume_metadata' and 'volume_admin_metadata'.
model_update_new = dict()
for key in model_update:
if key == 'metadata':
if volume.get('volume_metadata'):
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_metadata}
elif key == 'admin_metadata':
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_admin_metadata}
else:
model_update_new[key] = volume[key]
with new_volume.obj_as_admin():
new_volume.update(model_update_new)
new_volume.save()
with volume.obj_as_admin():
volume.update(model_update_default)
volume.save()
# Replication V2.1 methods
def failover_host(self, context,
secondary_backend_id=None):
"""Failover a backend to a secondary replication target.
Instructs a replication capable/configured backend to failover
to one of it's secondary replication targets. host=None is
an acceptable input, and leaves it to the driver to failover
to the only configured target, or to choose a target on it's
own. All of the hosts volumes will be passed on to the driver
in order for it to determine the replicated volumes on the host,
if needed.
:param context: security context
:param secondary_backend_id: Specifies backend_id to fail over to
"""
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
'cinder-volume')
volumes = objects.VolumeList.get_all_by_host(context, self.host)
exception_encountered = False
try:
# expected form of volume_update_list:
# [{volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}},
# {volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}}]
(active_backend_id, volume_update_list) = (
self.driver.failover_host(
context,
volumes,
secondary_id=secondary_backend_id))
except exception.UnableToFailOver:
LOG.exception(_LE("Failed to perform replication failover"))
service.replication_status = (
fields.ReplicationStatus.FAILOVER_ERROR)
service.save()
exception_encountered = True
except exception.InvalidReplicationTarget:
LOG.exception(_LE("Invalid replication target specified "
"for failover"))
# Preserve the replication_status
if secondary_backend_id == "default":
service.replication_status = (
fields.ReplicationStatus.FAILED_OVER)
else:
service.replication_status = fields.ReplicationStatus.ENABLED
service.save()
exception_encountered = True
except exception.VolumeDriverException:
# NOTE(jdg): Drivers need to be aware if they fail during
# a failover sequence, we're expecting them to cleanup
# and make sure the driver state is such that the original
# backend is still set as primary as per driver memory
LOG.error(_LE("Driver reported error during "
"replication failover."))
service.status = 'error'
service.save()
exception_encountered = True
if exception_encountered:
LOG.error(
_LE("Error encountered during failover on host: "
"%(host)s invalid target ID %(backend_id)s"),
{'host': self.host, 'backend_id':
secondary_backend_id})
return
if secondary_backend_id == "default":
service.replication_status = fields.ReplicationStatus.ENABLED
service.active_backend_id = ""
if service.frozen:
service.disabled = True
service.disabled_reason = "frozen"
else:
service.disabled = False
service.disabled_reason = ""
service.save()
else:
service.replication_status = fields.ReplicationStatus.FAILED_OVER
service.active_backend_id = active_backend_id
service.disabled = True
service.disabled_reason = "failed-over"
service.save()
for update in volume_update_list:
# Response must include an id key: {volume_id: <cinder-uuid>}
if not update.get('volume_id'):
raise exception.UnableToFailOver(
reason=_("Update list, doesn't include volume_id"))
# Key things to consider (attaching failed-over volumes):
# provider_location
# provider_auth
# provider_id
# replication_status
vobj = objects.Volume.get_by_id(context, update['volume_id'])
vobj.update(update.get('updates', {}))
vobj.save()
LOG.info(_LI("Failed over to replication target successfully."))
# MASKED: freeze_host function (lines 3325-3357)
def thaw_host(self, context):
"""UnFreeze management plane on this backend.
Basically puts the control/management plane back into
a normal state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.thaw_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): Thaw actually matters, if this call
# to the backend fails, we're stuck and can't re-enable
LOG.error(_LE('Error encountered on Cinder backend during '
'thaw operation, service will remain frozen.'))
return False
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
'cinder-volume')
service.disabled = False
service.disabled_reason = ""
service.save()
LOG.info(_LI("Thawed backend successfully."))
return True
def manage_existing_snapshot(self, ctxt, snapshot, ref=None):
LOG.debug('manage_existing_snapshot: managing %s.', ref)
try:
flow_engine = manage_existing_snapshot.get_flow(
ctxt,
self.db,
self.driver,
self.host,
snapshot.id,
ref)
except Exception:
msg = _LE("Failed to create manage_existing flow: "
"%(object_type)s %(object_id)s.")
LOG.exception(msg, {'object_type': 'snapshot',
'object_id': snapshot.id})
raise exception.CinderException(
_("Failed to create manage existing flow."))
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
return snapshot.id
def get_capabilities(self, context, discover):
"""Get capabilities of backend storage."""
if discover:
self.driver.init_capabilities()
capabilities = self.driver.capabilities
LOG.debug("Obtained capabilities list: %s.", capabilities)
return capabilities
def get_backup_device(self, ctxt, backup):
(backup_device, is_snapshot) = (
self.driver.get_backup_device(ctxt, backup))
secure_enabled = self.driver.secure_file_operations_enabled()
backup_device_dict = {'backup_device': backup_device,
'secure_enabled': secure_enabled,
'is_snapshot': is_snapshot, }
return backup_device_dict
def secure_file_operations_enabled(self, ctxt, volume):
secure_enabled = self.driver.secure_file_operations_enabled()
return secure_enabled
|
def freeze_host(self, context):
"""Freeze management plane on this backend.
Basically puts the control/management plane into a
Read Only state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.freeze_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): In the case of freeze, we don't really
# need the backend's consent or anything, we'll just
# disable the service, so we can just log this and
# go about our business
LOG.warning(_LW('Error encountered on Cinder backend during '
'freeze operation, service is frozen, however '
'notification to driver has failed.'))
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
'cinder-volume')
service.disabled = True
service.disabled_reason = "frozen"
service.save()
LOG.info(_LI("Set backend status to frozen successfully."))
return True
| 3,325 | 3,357 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume manager manages creating, attaching, detaching, and persistent storage.
Persistent storage volumes keep their state independent of instances. You can
attach to an instance, terminate the instance, spawn a new instance (even
one from a different image) and re-attach the volume with the same data
intact.
**Related Flags**
:volume_topic: What :mod:`rpc` topic to listen to (default: `cinder-volume`).
:volume_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.volume.manager.Manager`).
:volume_driver: Used by :class:`Manager`. Defaults to
:class:`cinder.volume.drivers.lvm.LVMVolumeDriver`.
:volume_group: Name of the group that will contain exported volumes (default:
`cinder-volumes`)
:num_shell_tries: Number of times to attempt to run commands (default: 3)
"""
import requests
import time
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
profiler = importutils.try_import('osprofiler.profiler')
import six
from taskflow import exceptions as tfe
from cinder import compute
from cinder import context
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import cache as image_cache
from cinder.image import glance
from cinder import manager
from cinder.message import api as message_api
from cinder.message import defined_messages
from cinder.message import resource_types
from cinder import objects
from cinder.objects import fields
from cinder import quota
from cinder import utils
from cinder import volume as cinder_volume
from cinder.volume import configuration as config
from cinder.volume.flows.manager import create_volume
from cinder.volume.flows.manager import manage_existing
from cinder.volume.flows.manager import manage_existing_snapshot
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
CGQUOTAS = quota.CGQUOTAS
VALID_REMOVE_VOL_FROM_CG_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_ADD_VOL_TO_CG_STATUS = (
'available',
'in-use')
VALID_CREATE_CG_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
VALID_CREATE_CG_SRC_CG_STATUS = ('available',)
volume_manager_opts = [
cfg.StrOpt('volume_driver',
default='cinder.volume.drivers.lvm.LVMVolumeDriver',
help='Driver to use for volume creation'),
cfg.IntOpt('migration_create_volume_timeout_secs',
default=300,
help='Timeout for creating the volume to migrate to '
'when performing volume migration (seconds)'),
cfg.BoolOpt('volume_service_inithost_offload',
default=False,
help='Offload pending volume delete during '
'volume service startup'),
cfg.StrOpt('zoning_mode',
help='FC Zoning mode configured'),
cfg.StrOpt('extra_capabilities',
default='{}',
help='User defined capabilities, a JSON formatted string '
'specifying key/value pairs. The key/value pairs can '
'be used by the CapabilitiesFilter to select between '
'backends when requests specify volume types. For '
'example, specifying a service level or the geographical '
'location of a backend, then creating a volume type to '
'allow the user to select by these different '
'properties.'),
cfg.BoolOpt('suppress_requests_ssl_warnings',
default=False,
help='Suppress requests library SSL certificate warnings.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_manager_opts)
MAPPING = {
'cinder.volume.drivers.huawei.huawei_18000.Huawei18000ISCSIDriver':
'cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver',
'cinder.volume.drivers.huawei.huawei_driver.Huawei18000ISCSIDriver':
'cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver',
'cinder.volume.drivers.huawei.huawei_18000.Huawei18000FCDriver':
'cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver',
'cinder.volume.drivers.huawei.huawei_driver.Huawei18000FCDriver':
'cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver',
'cinder.volume.drivers.fujitsu_eternus_dx_fc.FJDXFCDriver':
'cinder.volume.drivers.fujitsu.eternus_dx_fc.FJDXFCDriver',
'cinder.volume.drivers.fujitsu_eternus_dx_iscsi.FJDXISCSIDriver':
'cinder.volume.drivers.fujitsu.eternus_dx_iscsi.FJDXISCSIDriver',
'cinder.volume.drivers.hds.nfs.HDSNFSDriver':
'cinder.volume.drivers.hitachi.hnas_nfs.HDSNFSDriver',
'cinder.volume.drivers.hds.iscsi.HDSISCSIDriver':
'cinder.volume.drivers.hitachi.hnas_iscsi.HDSISCSIDriver',
'cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver':
'cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver',
'cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver':
'cinder.volume.drivers.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver',
'cinder.volume.drivers.san.hp.hp_lefthand_iscsi.HPLeftHandISCSIDriver':
'cinder.volume.drivers.hpe.hpe_lefthand_iscsi.HPELeftHandISCSIDriver',
'cinder.volume.drivers.san.hp.hp_xp_fc.HPXPFCDriver':
'cinder.volume.drivers.hpe.hpe_xp_fc.HPEXPFCDriver',
}
def locked_volume_operation(f):
"""Lock decorator for volume operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the volume. This lock can then be used
by other operations to avoid operation conflicts on shared volumes.
Example use:
If a volume operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
volume e.g. delete VolA while create volume VolB from VolA is in progress.
"""
def lvo_inner1(inst, context, volume_id, **kwargs):
@utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True)
def lvo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lvo_inner2(inst, context, volume_id, **kwargs)
return lvo_inner1
def locked_detach_operation(f):
"""Lock decorator for volume detach operations.
Takes a named lock prior to executing the detach call. The lock is named
with the operation executed and the id of the volume. This lock can then
be used by other operations to avoid operation conflicts on shared volumes.
This locking mechanism is only for detach calls. We can't use the
locked_volume_operation, because detach requires an additional
attachment_id in the parameter list.
"""
def ldo_inner1(inst, context, volume_id, attachment_id=None, **kwargs):
@utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True)
def ldo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return ldo_inner2(inst, context, volume_id, attachment_id, **kwargs)
return ldo_inner1
def locked_snapshot_operation(f):
"""Lock decorator for snapshot operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the snapshot. This lock can then be
used by other operations to avoid operation conflicts on shared snapshots.
Example use:
If a snapshot operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
snapshot e.g. delete SnapA while create volume VolA from SnapA is in
progress.
"""
def lso_inner1(inst, context, snapshot, **kwargs):
@utils.synchronized("%s-%s" % (snapshot.id, f.__name__), external=True)
def lso_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lso_inner2(inst, context, snapshot, **kwargs)
return lso_inner1
class VolumeManager(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = '2.0'
target = messaging.Target(version=RPC_API_VERSION)
# On cloning a volume, we shouldn't copy volume_type, consistencygroup
# and volume_attachment, because the db sets that according to [field]_id,
# which we do copy. We also skip some other values that are set during
# creation of Volume object.
_VOLUME_CLONE_SKIP_PROPERTIES = {
'id', '_name_id', 'name_id', 'name', 'status',
'attach_status', 'migration_status', 'volume_type',
'consistencygroup', 'volume_attachment'}
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_name='volume',
*args, **kwargs)
self.configuration = config.Configuration(volume_manager_opts,
config_group=service_name)
self.stats = {}
if not volume_driver:
# Get from configuration, which will get the default
# if its not using the multi backend
volume_driver = self.configuration.volume_driver
if volume_driver in MAPPING:
LOG.warning(_LW("Driver path %s is deprecated, update your "
"configuration to the new path."), volume_driver)
volume_driver = MAPPING[volume_driver]
vol_db_empty = self._set_voldb_empty_at_startup_indicator(
context.get_admin_context())
LOG.debug("Cinder Volume DB check: vol_db_empty=%s", vol_db_empty)
# We pass the current setting for service.active_backend_id to
# the driver on init, incase there was a restart or something
curr_active_backend_id = None
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
service = objects.Service.get_by_args(
context.get_admin_context(),
svc_host,
'cinder-volume')
except exception.ServiceNotFound:
# NOTE(jdg): This is to solve problems with unit tests
LOG.info(_LI("Service not found for updating "
"active_backend_id, assuming default "
"for driver init."))
else:
curr_active_backend_id = service.active_backend_id
if self.configuration.suppress_requests_ssl_warnings:
LOG.warning(_LW("Suppressing requests library SSL Warnings"))
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecurePlatformWarning)
self.driver = importutils.import_object(
volume_driver,
configuration=self.configuration,
db=self.db,
host=self.host,
is_vol_db_empty=vol_db_empty,
active_backend_id=curr_active_backend_id)
self.message_api = message_api.API()
if CONF.profiler.enabled and profiler is not None:
self.driver = profiler.trace_cls("driver")(self.driver)
try:
self.extra_capabilities = jsonutils.loads(
self.driver.configuration.extra_capabilities)
except AttributeError:
self.extra_capabilities = {}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Invalid JSON: %s"),
self.driver.configuration.extra_capabilities)
if self.driver.configuration.safe_get(
'image_volume_cache_enabled'):
max_cache_size = self.driver.configuration.safe_get(
'image_volume_cache_max_size_gb')
max_cache_entries = self.driver.configuration.safe_get(
'image_volume_cache_max_count')
self.image_volume_cache = image_cache.ImageVolumeCache(
self.db,
cinder_volume.API(),
max_cache_size,
max_cache_entries
)
LOG.info(_LI('Image-volume cache enabled for host %(host)s.'),
{'host': self.host})
else:
LOG.info(_LI('Image-volume cache disabled for host %(host)s.'),
{'host': self.host})
self.image_volume_cache = None
def _count_allocated_capacity(self, ctxt, volume):
pool = vol_utils.extract_host(volume['host'], 'pool')
if pool is None:
# No pool name encoded in host, so this is a legacy
# volume created before pool is introduced, ask
# driver to provide pool info if it has such
# knowledge and update the DB.
try:
pool = self.driver.get_pool(volume)
except Exception:
LOG.exception(_LE('Fetch volume pool name failed.'),
resource=volume)
return
if pool:
new_host = vol_utils.append_host(volume['host'],
pool)
self.db.volume_update(ctxt, volume['id'],
{'host': new_host})
else:
# Otherwise, put them into a special fixed pool with
# volume_backend_name being the pool name, if
# volume_backend_name is None, use default pool name.
# This is only for counting purpose, doesn't update DB.
pool = (self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume['host'], 'pool', True))
try:
pool_stat = self.stats['pools'][pool]
except KeyError:
# First volume in the pool
self.stats['pools'][pool] = dict(
allocated_capacity_gb=0)
pool_stat = self.stats['pools'][pool]
pool_sum = pool_stat['allocated_capacity_gb']
pool_sum += volume['size']
self.stats['pools'][pool]['allocated_capacity_gb'] = pool_sum
self.stats['allocated_capacity_gb'] += volume['size']
def _set_voldb_empty_at_startup_indicator(self, ctxt):
"""Determine if the Cinder volume DB is empty.
A check of the volume DB is done to determine whether it is empty or
not at this point.
:param ctxt: our working context
"""
vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None)
if len(vol_entries) == 0:
LOG.info(_LI("Determined volume DB was empty at startup."))
return True
else:
LOG.info(_LI("Determined volume DB was not empty at startup."))
return False
def _sync_provider_info(self, ctxt, volumes, snapshots):
# NOTE(jdg): For now this just updates provider_id, we can add more
# add more items to the update if theyr'e releveant but we need
# to be safe in what we allow and add a list of allowed keys
# things that make sense are provider_*, replication_status etc
updates, snapshot_updates = self.driver.update_provider_info(
volumes, snapshots)
if updates:
for volume in volumes:
# NOTE(JDG): Make sure returned item is in this hosts volumes
update = (
[updt for updt in updates if updt['id'] ==
volume['id']][0])
if update:
self.db.volume_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
# NOTE(jdg): snapshots are slighty harder, because
# we do not have a host column and of course no get
# all by host, so we use a get_all and bounce our
# response off of it
if snapshot_updates:
cinder_snaps = self.db.snapshot_get_all(ctxt)
for snap in cinder_snaps:
# NOTE(jdg): For now we only update those that have no entry
if not snap.get('provider_id', None):
update = (
[updt for updt in snapshot_updates if updt['id'] ==
snap['id']][0])
if update:
self.db.snapshot_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
def init_host(self):
"""Perform any required initialization."""
ctxt = context.get_admin_context()
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception:
LOG.exception(_LE("Failed to initialize driver."),
resource={'type': 'driver',
'id': self.__class__.__name__})
# we don't want to continue since we failed
# to initialize the driver correctly.
return
# Initialize backend capabilities list
self.driver.init_capabilities()
volumes = objects.VolumeList.get_all_by_host(ctxt, self.host)
snapshots = self.db.snapshot_get_by_host(ctxt, self.host)
self._sync_provider_info(ctxt, volumes, snapshots)
# FIXME volume count for exporting is wrong
try:
self.stats['pools'] = {}
self.stats.update({'allocated_capacity_gb': 0})
for volume in volumes:
# available volume should also be counted into allocated
if volume['status'] in ['in-use', 'available']:
# calculate allocated capacity for driver
self._count_allocated_capacity(ctxt, volume)
try:
if volume['status'] in ['in-use']:
self.driver.ensure_export(ctxt, volume)
except Exception:
LOG.exception(_LE("Failed to re-export volume, "
"setting to ERROR."),
resource=volume)
volume.status = 'error'
volume.save()
elif volume['status'] in ('downloading', 'creating'):
LOG.warning(_LW("Detected volume stuck "
"in %(curr_status)s "
"status, setting to ERROR."),
{'curr_status': volume['status']},
resource=volume)
if volume['status'] == 'downloading':
self.driver.clear_download(ctxt, volume)
volume.status = 'error'
volume.save()
elif volume.status == 'uploading':
# Set volume status to available or in-use.
self.db.volume_update_status_based_on_attachment(
ctxt, volume.id)
else:
pass
snapshots = objects.SnapshotList.get_by_host(
ctxt, self.host, {'status': fields.SnapshotStatus.CREATING})
for snapshot in snapshots:
LOG.warning(_LW("Detected snapshot stuck in creating "
"status, setting to ERROR."), resource=snapshot)
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
except Exception:
LOG.exception(_LE("Error during re-export on driver init."),
resource=volume)
return
self.driver.set_throttle()
# at this point the driver is considered initialized.
# NOTE(jdg): Careful though because that doesn't mean
# that an entry exists in the service table
self.driver.set_initialized()
for volume in volumes:
if volume['status'] == 'deleting':
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
# from being blocked.
self._add_to_threadpool(self.delete_volume, ctxt,
volume['id'], volume=volume)
else:
# By default, delete volumes sequentially
self.delete_volume(ctxt, volume['id'], volume=volume)
LOG.info(_LI("Resume volume delete completed successfully."),
resource=volume)
# collect and publish service capabilities
self.publish_service_capabilities(ctxt)
LOG.info(_LI("Driver initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
def init_host_with_rpc(self):
LOG.info(_LI("Initializing RPC dependent components of volume "
"driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
stats = self.driver.get_volume_stats(refresh=True)
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
service = objects.Service.get_by_args(
context.get_admin_context(),
svc_host,
'cinder-volume')
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Service not found for updating "
"replication_status."))
if service.replication_status != (
fields.ReplicationStatus.FAILED_OVER):
if stats and stats.get('replication_enabled', False):
service.replication_status = fields.ReplicationStatus.ENABLED
else:
service.replication_status = fields.ReplicationStatus.DISABLED
service.save()
LOG.info(_LI("Driver post RPC initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
def is_working(self):
"""Return if Manager is ready to accept requests.
This is to inform Service class that in case of volume driver
initialization failure the manager is actually down and not ready to
accept any requests.
"""
return self.driver.initialized
def create_volume(self, context, volume_id, request_spec=None,
filter_properties=None, allow_reschedule=True,
volume=None):
"""Creates the volume."""
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
context_elevated = context.elevated()
if filter_properties is None:
filter_properties = {}
if request_spec is None:
request_spec = {}
try:
# NOTE(flaper87): Driver initialization is
# verified by the task itself.
flow_engine = create_volume.get_flow(
context_elevated,
self,
self.db,
self.driver,
self.scheduler_rpcapi,
self.host,
volume.id,
allow_reschedule,
context,
request_spec,
filter_properties,
image_volume_cache=self.image_volume_cache,
)
except Exception:
msg = _("Create manager volume flow failed.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
raise exception.CinderException(msg)
snapshot_id = request_spec.get('snapshot_id')
source_volid = request_spec.get('source_volid')
source_replicaid = request_spec.get('source_replicaid')
if snapshot_id is not None:
# Make sure the snapshot is not deleted until we are done with it.
locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot')
elif source_volid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_volid, 'delete_volume')
elif source_replicaid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_replicaid, 'delete_volume')
else:
locked_action = None
def _run_flow():
# This code executes create volume flow. If something goes wrong,
# flow reverts all job that was done and reraises an exception.
# Otherwise, all data that was generated by flow becomes available
# in flow engine's storage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
@utils.synchronized(locked_action, external=True)
def _run_flow_locked():
_run_flow()
# NOTE(dulek): Flag to indicate if volume was rescheduled. Used to
# decide if allocated_capacity should be incremented.
rescheduled = False
vol_ref = None
try:
if locked_action is None:
_run_flow()
else:
_run_flow_locked()
finally:
try:
vol_ref = flow_engine.storage.fetch('volume_ref')
except tfe.NotFound:
# If there's no vol_ref, then flow is reverted. Lets check out
# if rescheduling occurred.
try:
rescheduled = flow_engine.storage.get_revert_result(
create_volume.OnFailureRescheduleTask.make_name(
[create_volume.ACTION]))
except tfe.NotFound:
pass
if not rescheduled:
if not vol_ref:
# Flow was reverted and not rescheduled, fetching
# volume_ref from the DB, because it will be needed.
vol_ref = objects.Volume.get_by_id(context, volume.id)
# NOTE(dulek): Volume wasn't rescheduled so we need to update
# volume stats as these are decremented on delete.
self._update_allocated_capacity(vol_ref)
LOG.info(_LI("Created volume successfully."), resource=vol_ref)
return vol_ref.id
@locked_volume_operation
def delete_volume(self, context, volume_id,
unmanage_only=False,
volume=None,
cascade=False):
"""Deletes and unexports volume.
1. Delete a volume(normal case)
Delete a volume and update quotas.
2. Delete a migration volume
If deleting the volume in a migration, we want to skip
quotas but we need database updates for the volume.
"""
context = context.elevated()
try:
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
volume = objects.Volume.get_by_id(context, volume_id)
else:
volume.refresh()
except exception.VolumeNotFound:
# NOTE(thingee): It could be possible for a volume to
# be deleted when resuming deletes from init_host().
LOG.debug("Attempted delete of non-existent volume: %s", volume_id)
return
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
if volume['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_id)
if vol_utils.extract_host(volume.host) != self.host:
raise exception.InvalidVolume(
reason=_("volume is not local to this node"))
if unmanage_only and cascade:
# This could be done, but is ruled out for now just
# for simplicity.
raise exception.Invalid(
reason=_("Unmanage and cascade delete options "
"are mutually exclusive."))
# The status 'deleting' is not included, because it only applies to
# the source volume to be deleted after a migration. No quota
# needs to be handled for it.
is_migrating = volume.migration_status not in (None, 'error',
'success')
is_migrating_dest = (is_migrating and
volume.migration_status.startswith(
'target:'))
self._notify_about_volume_usage(context, volume, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context, volume)
if unmanage_only:
self.driver.unmanage(volume)
elif cascade:
LOG.debug('Performing cascade delete.')
snapshots = objects.SnapshotList.get_all_for_volume(context,
volume.id)
for s in snapshots:
if s.status != 'deleting':
self._clear_db(context, is_migrating_dest, volume,
'error_deleting')
msg = (_("Snapshot %(id)s was found in state "
"%(state)s rather than 'deleting' during "
"cascade delete.") % {'id': s.id,
'state': s.status})
raise exception.InvalidSnapshot(reason=msg)
self.delete_snapshot(context, s)
LOG.debug('Snapshots deleted, issuing volume delete')
self.driver.delete_volume(volume)
else:
self.driver.delete_volume(volume)
except exception.VolumeIsBusy:
LOG.error(_LE("Unable to delete busy volume."),
resource=volume)
# If this is a destination volume, we have to clear the database
# record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume,
'available')
return
except Exception:
with excutils.save_and_reraise_exception():
# If this is a destination volume, we have to clear the
# database record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume,
'error_deleting')
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
# Get reservations
try:
reserve_opts = {'volumes': -1,
'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting volume."),
resource=volume)
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
volume.destroy()
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
self._notify_about_volume_usage(context, volume, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
pool = vol_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume.host, 'pool', True)
size = volume.size
try:
self.stats['pools'][pool]['allocated_capacity_gb'] -= size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=-size)
self.publish_service_capabilities(context)
LOG.info(_LI("Deleted volume successfully."), resource=volume)
def _clear_db(self, context, is_migrating_dest, volume_ref, status):
# This method is called when driver.unmanage() or
# driver.delete_volume() fails in delete_volume(), so it is already
# in the exception handling part.
if is_migrating_dest:
volume_ref.destroy()
LOG.error(_LE("Unable to delete the destination volume "
"during volume migration, (NOTE: database "
"record needs to be deleted)."), resource=volume_ref)
else:
volume_ref.status = status
volume_ref.save()
def create_snapshot(self, context, volume_id, snapshot):
"""Creates and exports the snapshot."""
context = context.elevated()
self._notify_about_snapshot_usage(
context, snapshot, "create.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
model_update = self.driver.create_snapshot(snapshot)
if model_update:
snapshot.update(model_update)
snapshot.save()
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot.id, volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.exception(_LE("Failed updating snapshot"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id}, resource=snapshot)
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.progress = '100%'
snapshot.save()
self._notify_about_snapshot_usage(context, snapshot, "create.end")
LOG.info(_LI("Create snapshot completed successfully"),
resource=snapshot)
return snapshot.id
@locked_snapshot_operation
def delete_snapshot(self, context, snapshot, unmanage_only=False):
"""Deletes and unexports snapshot."""
context = context.elevated()
snapshot._context = context
project_id = snapshot.project_id
self._notify_about_snapshot_usage(
context, snapshot, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
snapshot.save()
if unmanage_only:
self.driver.unmanage_snapshot(snapshot)
else:
self.driver.delete_snapshot(snapshot)
except exception.SnapshotIsBusy:
LOG.error(_LE("Delete snapshot failed, due to snapshot busy."),
resource=snapshot)
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.save()
return
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR_DELETING
snapshot.save()
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot.volume_size,
}
volume_ref = self.db.volume_get(context, snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Update snapshot usages failed."),
resource=snapshot)
self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id)
snapshot.destroy()
self._notify_about_snapshot_usage(context, snapshot, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
LOG.info(_LI("Delete snapshot completed successfully"),
resource=snapshot)
def attach_volume(self, context, volume_id, instance_uuid, host_name,
mountpoint, mode):
"""Updates db to show volume is attached."""
@utils.synchronized(volume_id, external=True)
def do_attach():
# check the volume status before attaching
volume = self.db.volume_get(context, volume_id)
volume_metadata = self.db.volume_admin_metadata_get(
context.elevated(), volume_id)
if volume['status'] == 'attaching':
if (volume_metadata.get('attached_mode') and
volume_metadata.get('attached_mode') != mode):
raise exception.InvalidVolume(
reason=_("being attached by different mode"))
if (volume['status'] == 'in-use' and not volume['multiattach']
and not volume['migration_status']):
raise exception.InvalidVolume(
reason=_("volume is already attached"))
host_name_sanitized = utils.sanitize_hostname(
host_name) if host_name else None
if instance_uuid:
attachments = \
self.db.volume_attachment_get_all_by_instance_uuid(
context, volume_id, instance_uuid)
else:
attachments = (
self.db.volume_attachment_get_all_by_host(
context,
volume_id,
host_name_sanitized))
if attachments:
self.db.volume_update(context, volume_id,
{'status': 'in-use'})
return
self._notify_about_volume_usage(context, volume,
"attach.start")
values = {'volume_id': volume_id,
'attach_status': 'attaching', }
attachment = self.db.volume_attach(context.elevated(), values)
volume_metadata = self.db.volume_admin_metadata_update(
context.elevated(), volume_id,
{"attached_mode": mode}, False)
attachment_id = attachment['id']
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
self.db.volume_attachment_update(context, attachment_id,
{'attach_status':
'error_attaching'})
raise exception.InvalidUUID(uuid=instance_uuid)
volume = self.db.volume_get(context, volume_id)
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
self.db.volume_update(context, volume_id,
{'status': 'error_attaching'})
self.message_api.create(
context, defined_messages.ATTACH_READONLY_VOLUME,
context.project_id, resource_type=resource_types.VOLUME,
resource_uuid=volume_id)
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.debug('Attaching volume %(volume_id)s to instance '
'%(instance)s at mountpoint %(mount)s on host '
'%(host)s.',
{'volume_id': volume_id, 'instance': instance_uuid,
'mount': mountpoint, 'host': host_name_sanitized},
resource=volume)
self.driver.attach_volume(context,
volume,
instance_uuid,
host_name_sanitized,
mountpoint)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment_id,
{'attach_status': 'error_attaching'})
volume = self.db.volume_attached(context.elevated(),
attachment_id,
instance_uuid,
host_name_sanitized,
mountpoint,
mode)
self._notify_about_volume_usage(context, volume, "attach.end")
LOG.info(_LI("Attach volume completed successfully."),
resource=volume)
return self.db.volume_attachment_get(context, attachment_id)
return do_attach()
@locked_detach_operation
def detach_volume(self, context, volume_id, attachment_id=None):
"""Updates db to show volume is detached."""
# TODO(vish): refactor this into a more general "unreserve"
volume = self.db.volume_get(context, volume_id)
attachment = None
if attachment_id:
try:
attachment = self.db.volume_attachment_get(context,
attachment_id)
except exception.VolumeAttachmentNotFound:
LOG.info(_LI("Volume detach called, but volume not attached."),
resource=volume)
# We need to make sure the volume status is set to the correct
# status. It could be in detaching status now, and we don't
# want to leave it there.
self.db.volume_detached(context, volume_id, attachment_id)
return
else:
# We can try and degrade gracefully here by trying to detach
# a volume without the attachment_id here if the volume only has
# one attachment. This is for backwards compatibility.
attachments = self.db.volume_attachment_get_all_by_volume_id(
context, volume_id)
if len(attachments) > 1:
# There are more than 1 attachments for this volume
# we have to have an attachment id.
msg = _("Detach volume failed: More than one attachment, "
"but no attachment_id provided.")
LOG.error(msg, resource=volume)
raise exception.InvalidVolume(reason=msg)
elif len(attachments) == 1:
attachment = attachments[0]
else:
# there aren't any attachments for this volume.
# so set the status to available and move on.
LOG.info(_LI("Volume detach called, but volume not attached."),
resource=volume)
self.db.volume_update(context, volume_id,
{'status': 'available',
'attach_status': 'detached'})
return
self._notify_about_volume_usage(context, volume, "detach.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.debug('Detaching volume %(volume_id)s from instance '
'%(instance)s.',
{'volume_id': volume_id,
'instance': attachment.get('instance_uuid')},
resource=volume)
self.driver.detach_volume(context, volume, attachment)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment.get('id'),
{'attach_status': 'error_detaching'})
self.db.volume_detached(context.elevated(), volume_id,
attachment.get('id'))
self.db.volume_admin_metadata_delete(context.elevated(), volume_id,
'attached_mode')
# NOTE(jdg): We used to do an ensure export here to
# catch upgrades while volumes were attached (E->F)
# this was necessary to convert in-use volumes from
# int ID's to UUID's. Don't need this any longer
# We're going to remove the export here
# (delete the iscsi target)
volume = self.db.volume_get(context, volume_id)
try:
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context.elevated(), volume)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Detach volume failed, due to "
"uninitialized driver."),
resource=volume)
except Exception as ex:
LOG.exception(_LE("Detach volume failed, due to "
"remove-export failure."),
resource=volume)
raise exception.RemoveExportException(volume=volume_id,
reason=six.text_type(ex))
self._notify_about_volume_usage(context, volume, "detach.end")
LOG.info(_LI("Detach volume completed successfully."), resource=volume)
def _create_image_cache_volume_entry(self, ctx, volume_ref,
image_id, image_meta):
"""Create a new image-volume and cache entry for it.
This assumes that the image has already been downloaded and stored
in the volume described by the volume_ref.
"""
image_volume = None
try:
if not self.image_volume_cache.ensure_space(
ctx,
volume_ref['size'],
volume_ref['host']):
LOG.warning(_LW('Unable to ensure space for image-volume in'
' cache. Will skip creating entry for image'
' %(image)s on host %(host)s.'),
{'image': image_id, 'host': volume_ref['host']})
return
image_volume = self._clone_image_volume(ctx,
volume_ref,
image_meta)
if not image_volume:
LOG.warning(_LW('Unable to clone image_volume for image '
'%(image_id)s will not create cache entry.'),
{'image_id': image_id})
return
self.image_volume_cache.create_cache_entry(
ctx,
image_volume,
image_id,
image_meta
)
except exception.CinderException as e:
LOG.warning(_LW('Failed to create new image-volume cache entry.'
' Error: %(exception)s'), {'exception': e})
if image_volume:
self.delete_volume(ctx, image_volume.id)
def _clone_image_volume(self, ctx, volume, image_meta):
volume_type_id = volume.get('volume_type_id')
reserve_opts = {'volumes': 1, 'gigabytes': volume.size}
QUOTAS.add_volume_type_opts(ctx, reserve_opts, volume_type_id)
reservations = QUOTAS.reserve(ctx, **reserve_opts)
try:
new_vol_values = {k: volume[k] for k in set(volume.keys()) -
self._VOLUME_CLONE_SKIP_PROPERTIES}
new_vol_values['volume_type_id'] = volume_type_id
new_vol_values['attach_status'] = 'detached'
new_vol_values['status'] = 'creating'
new_vol_values['project_id'] = ctx.project_id
new_vol_values['display_name'] = 'image-%s' % image_meta['id']
new_vol_values['source_volid'] = volume.id
LOG.debug('Creating image volume entry: %s.', new_vol_values)
image_volume = objects.Volume(context=ctx, **new_vol_values)
image_volume.create()
except Exception as ex:
LOG.exception(_LE('Create clone_image_volume: %(volume_id)s'
'for image %(image_id)s, '
'failed (Exception: %(except)s)'),
{'volume_id': volume.id,
'image_id': image_meta['id'],
'except': ex})
QUOTAS.rollback(ctx, reservations)
return
QUOTAS.commit(ctx, reservations,
project_id=new_vol_values['project_id'])
try:
self.create_volume(ctx, image_volume.id,
allow_reschedule=False, volume=image_volume)
image_volume = self.db.volume_get(ctx, image_volume.id)
if image_volume.status != 'available':
raise exception.InvalidVolume(_('Volume is not available.'))
self.db.volume_admin_metadata_update(ctx.elevated(),
image_volume.id,
{'readonly': 'True'},
False)
return image_volume
except exception.CinderException:
LOG.exception(_LE('Failed to clone volume %(volume_id)s for '
'image %(image_id)s.'),
{'volume_id': volume.id,
'image_id': image_meta['id']})
try:
self.delete_volume(ctx, image_volume.id)
except exception.CinderException:
LOG.exception(_LE('Could not delete the image volume %(id)s.'),
{'id': volume.id})
return
def _clone_image_volume_and_add_location(self, ctx, volume, image_service,
image_meta):
"""Create a cloned volume and register its location to the image."""
if (image_meta['disk_format'] != 'raw' or
image_meta['container_format'] != 'bare'):
return False
image_volume_context = ctx
if self.driver.configuration.image_upload_use_internal_tenant:
internal_ctx = context.get_internal_tenant_context()
if internal_ctx:
image_volume_context = internal_ctx
image_volume = self._clone_image_volume(image_volume_context,
volume,
image_meta)
if not image_volume:
return False
uri = 'cinder://%s' % image_volume.id
image_registered = None
try:
image_registered = image_service.add_location(
ctx, image_meta['id'], uri, {})
except (exception.NotAuthorized, exception.Invalid,
exception.NotFound):
LOG.exception(_LE('Failed to register image volume location '
'%(uri)s.'), {'uri': uri})
if not image_registered:
LOG.warning(_LW('Registration of image volume URI %(uri)s '
'to image %(image_id)s failed.'),
{'uri': uri, 'image_id': image_meta['id']})
try:
self.delete_volume(image_volume_context, image_volume)
except exception.CinderException:
LOG.exception(_LE('Could not delete failed image volume '
'%(id)s.'), {'id': image_volume.id})
return False
image_volume_meta = {'glance_image_id': image_meta['id'],
'image_owner': ctx.project_id}
self.db.volume_metadata_update(image_volume_context,
image_volume.id,
image_volume_meta,
False)
return True
def copy_volume_to_image(self, context, volume_id, image_meta):
"""Uploads the specified volume to Glance.
image_meta is a dictionary containing the following keys:
'id', 'container_format', 'disk_format'
"""
payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
image_service = None
try:
volume = self.db.volume_get(context, volume_id)
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
image_service, image_id = \
glance.get_remote_image_service(context, image_meta['id'])
if (self.driver.configuration.image_upload_use_cinder_backend
and self._clone_image_volume_and_add_location(
context, volume, image_service, image_meta)):
LOG.debug("Registered image volume location to glance "
"image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
else:
self.driver.copy_volume_to_image(context, volume,
image_service, image_meta)
LOG.debug("Uploaded volume to glance image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
except Exception as error:
LOG.error(_LE("Upload volume to image encountered an error "
"(image-id: %(image_id)s)."),
{'image_id': image_meta['id']},
resource=volume)
if image_service is not None:
# Deletes the image if it is in queued or saving state
self._delete_image(context, image_meta['id'], image_service)
with excutils.save_and_reraise_exception():
payload['message'] = six.text_type(error)
if isinstance(error, exception.ImageLimitExceeded):
self.message_api.create(
context, defined_messages.IMAGE_FROM_VOLUME_OVER_QUOTA,
context.project_id,
resource_type=resource_types.VOLUME,
resource_uuid=volume_id)
finally:
self.db.volume_update_status_based_on_attachment(context,
volume_id)
LOG.info(_LI("Copy volume to image completed successfully."),
resource=volume)
def _delete_image(self, context, image_id, image_service):
"""Deletes an image stuck in queued or saving state."""
try:
image_meta = image_service.show(context, image_id)
image_status = image_meta.get('status')
if image_status == 'queued' or image_status == 'saving':
LOG.warning(_LW("Deleting image in unexpected status: "
"%(image_status)s."),
{'image_status': image_status},
resource={'type': 'image', 'id': image_id})
image_service.delete(context, image_id)
except Exception:
LOG.warning(_LW("Image delete encountered an error."),
exc_info=True, resource={'type': 'image',
'id': image_id})
def initialize_connection(self, context, volume_id, connector):
"""Prepare volume for connection from host represented by connector.
This method calls the driver initialize_connection and returns
it to the caller. The connector parameter is a dictionary with
information about the host that will connect to the volume in the
following format::
{
'ip': ip,
'initiator': initiator,
}
ip: the ip address of the connecting machine
initiator: the iscsi initiator name of the connecting machine.
This can be None if the connecting machine does not support iscsi
connections.
driver is responsible for doing any necessary security setup and
returning a connection_info dictionary in the following format::
{
'driver_volume_type': driver_volume_type,
'data': data,
}
driver_volume_type: a string to identify the type of volume. This
can be used by the calling code to determine the
strategy for connecting to the volume. This could
be 'iscsi', 'rbd', 'sheepdog', etc.
data: this is the data that the calling code will use to connect
to the volume. Keep in mind that this will be serialized to
json in various places, so it should not contain any non-json
data types.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume = self.db.volume_get(context, volume_id)
model_update = None
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate volume connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
model_update = self.driver.create_export(context.elevated(),
volume, connector)
except exception.CinderException:
err_msg = (_("Create export for volume failed."))
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
if model_update:
volume = self.db.volume_update(context,
volume_id,
model_update)
except exception.CinderException as ex:
LOG.exception(_LE("Model update failed."), resource=volume)
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn_info = self.driver.initialize_connection(volume, connector)
except Exception as err:
err_msg = (_("Driver initialize connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
self.driver.remove_export(context.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
# Add qos_specs to connection info
typeid = volume['volume_type_id']
specs = None
if typeid:
res = volume_types.get_volume_type_qos_specs(typeid)
qos = res['qos_specs']
# only pass qos_specs that is designated to be consumed by
# front-end, or both front-end and back-end.
if qos and qos.get('consumer') in ['front-end', 'both']:
specs = qos.get('specs')
qos_spec = dict(qos_specs=specs)
conn_info['data'].update(qos_spec)
# Add access_mode to connection info
volume_metadata = self.db.volume_admin_metadata_get(context.elevated(),
volume_id)
access_mode = volume_metadata.get('attached_mode')
if access_mode is None:
# NOTE(zhiyan): client didn't call 'os-attach' before
access_mode = ('ro'
if volume_metadata.get('readonly') == 'True'
else 'rw')
conn_info['data']['access_mode'] = access_mode
# Add encrypted flag to connection_info if not set in the driver.
if conn_info['data'].get('encrypted') is None:
encrypted = bool(volume.get('encryption_key_id'))
conn_info['data']['encrypted'] = encrypted
# Add discard flag to connection_info if not set in the driver and
# configured to be reported.
if conn_info['data'].get('discard') is None:
discard_supported = (self.driver.configuration
.safe_get('report_discard_supported'))
if discard_supported:
conn_info['data']['discard'] = True
LOG.info(_LI("Initialize volume connection completed successfully."),
resource=volume)
return conn_info
def terminate_connection(self, context, volume_id, connector, force=False):
"""Cleanup connection from host represented by connector.
The format of connector is the same as for initialize_connection.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.terminate_connection(volume_ref, connector,
force=force)
except Exception as err:
err_msg = (_('Terminate volume connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info(_LI("Terminate volume connection completed successfully."),
resource=volume_ref)
def remove_export(self, context, volume_id):
"""Removes an export for a volume."""
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.remove_export(context, volume_ref)
except Exception:
msg = _("Remove volume export failed.")
LOG.exception(msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("Remove volume export completed successfully."),
resource=volume_ref)
def accept_transfer(self, context, volume_id, new_user, new_project):
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
# NOTE(jdg): need elevated context as we haven't "given" the vol
# yet
volume_ref = self.db.volume_get(context.elevated(), volume_id)
# NOTE(jdg): Some drivers tie provider info (CHAP) to tenant
# for those that do allow them to return updated model info
model_update = self.driver.accept_transfer(context,
volume_ref,
new_user,
new_project)
if model_update:
try:
self.db.volume_update(context.elevated(),
volume_id,
model_update)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Update volume model for "
"transfer operation failed."),
resource=volume_ref)
self.db.volume_update(context.elevated(),
volume_id,
{'status': 'error'})
LOG.info(_LI("Transfer volume completed successfully."),
resource=volume_ref)
return model_update
def _connect_device(self, conn):
use_multipath = self.configuration.use_multipath_for_image_xfer
device_scan_attempts = self.configuration.num_volume_device_scan_tries
protocol = conn['driver_volume_type']
connector = utils.brick_get_connector(
protocol,
use_multipath=use_multipath,
device_scan_attempts=device_scan_attempts,
conn=conn)
vol_handle = connector.connect_volume(conn['data'])
root_access = True
if not connector.check_valid_device(vol_handle['path'], root_access):
if isinstance(vol_handle['path'], six.string_types):
raise exception.DeviceUnavailable(
path=vol_handle['path'],
reason=(_("Unable to access the backend storage via the "
"path %(path)s.") %
{'path': vol_handle['path']}))
else:
raise exception.DeviceUnavailable(
path=None,
reason=(_("Unable to access the backend storage via file "
"handle.")))
return {'conn': conn, 'device': vol_handle, 'connector': connector}
def _attach_volume(self, ctxt, volume, properties, remote=False):
status = volume['status']
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
try:
conn = rpcapi.initialize_connection(ctxt, volume, properties)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach volume %(vol)s."),
{'vol': volume['id']})
self.db.volume_update(ctxt, volume['id'],
{'status': status})
else:
conn = self.initialize_connection(ctxt, volume['id'], properties)
return self._connect_device(conn)
def _detach_volume(self, ctxt, attach_info, volume, properties,
force=False, remote=False):
connector = attach_info['connector']
connector.disconnect_volume(attach_info['conn']['data'],
attach_info['device'])
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.terminate_connection(ctxt, volume, properties, force=force)
rpcapi.remove_export(ctxt, volume)
else:
try:
self.terminate_connection(ctxt, volume['id'], properties,
force=force)
self.remove_export(ctxt, volume['id'])
except Exception as err:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Unable to terminate volume connection: '
'%(err)s.') % {'err': err})
def _copy_volume_data(self, ctxt, src_vol, dest_vol, remote=None):
"""Copy data from src_vol to dest_vol."""
LOG.debug('copy_data_between_volumes %(src)s -> %(dest)s.',
{'src': src_vol['name'], 'dest': dest_vol['name']})
properties = utils.brick_get_connector_properties()
dest_remote = remote in ['dest', 'both']
dest_attach_info = self._attach_volume(ctxt, dest_vol, properties,
remote=dest_remote)
try:
src_remote = remote in ['src', 'both']
src_attach_info = self._attach_volume(ctxt, src_vol, properties,
remote=src_remote)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach source volume for copy."))
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, remote=dest_remote)
# Check the backend capabilities of migration destination host.
rpcapi = volume_rpcapi.VolumeAPI()
capabilities = rpcapi.get_capabilities(ctxt, dest_vol['host'],
False)
sparse_copy_volume = bool(capabilities and
capabilities.get('sparse_copy_volume',
False))
copy_error = True
try:
size_in_mb = int(src_vol['size']) * units.Ki # vol size is in GB
vol_utils.copy_volume(src_attach_info['device']['path'],
dest_attach_info['device']['path'],
size_in_mb,
self.configuration.volume_dd_blocksize,
sparse=sparse_copy_volume)
copy_error = False
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to copy volume %(src)s to %(dest)s."),
{'src': src_vol['id'], 'dest': dest_vol['id']})
finally:
try:
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, force=copy_error,
remote=dest_remote)
finally:
self._detach_volume(ctxt, src_attach_info, src_vol,
properties, force=copy_error,
remote=src_remote)
def _migrate_volume_generic(self, ctxt, volume, host, new_type_id):
rpcapi = volume_rpcapi.VolumeAPI()
# Create new volume on remote host
skip = self._VOLUME_CLONE_SKIP_PROPERTIES | {'host'}
new_vol_values = {k: volume[k] for k in set(volume.keys()) - skip}
if new_type_id:
new_vol_values['volume_type_id'] = new_type_id
new_volume = objects.Volume(
context=ctxt,
host=host['host'],
status='creating',
attach_status='detached',
migration_status='target:%s' % volume['id'],
**new_vol_values
)
new_volume.create()
rpcapi.create_volume(ctxt, new_volume, host['host'],
None, None, allow_reschedule=False)
# Wait for new_volume to become ready
starttime = time.time()
deadline = starttime + CONF.migration_create_volume_timeout_secs
# TODO(thangp): Replace get_by_id with refresh when it is available
new_volume = objects.Volume.get_by_id(ctxt, new_volume.id)
tries = 0
while new_volume.status != 'available':
tries += 1
now = time.time()
if new_volume.status == 'error':
msg = _("failed to create new_volume on destination host")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
elif now > deadline:
msg = _("timeout creating new_volume on destination host")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
else:
time.sleep(tries ** 2)
# TODO(thangp): Replace get_by_id with refresh when it is
# available
new_volume = objects.Volume.get_by_id(ctxt, new_volume.id)
# Copy the source volume to the destination volume
try:
attachments = volume.volume_attachment
if not attachments:
# Pre- and post-copy driver-specific actions
self.driver.before_volume_copy(ctxt, volume, new_volume,
remote='dest')
self._copy_volume_data(ctxt, volume, new_volume, remote='dest')
self.driver.after_volume_copy(ctxt, volume, new_volume,
remote='dest')
# The above call is synchronous so we complete the migration
self.migrate_volume_completion(ctxt, volume.id,
new_volume.id,
error=False,
volume=volume,
new_volume=new_volume)
else:
nova_api = compute.API()
# This is an async call to Nova, which will call the completion
# when it's done
for attachment in attachments:
instance_uuid = attachment['instance_uuid']
nova_api.update_server_volume(ctxt, instance_uuid,
volume.id,
new_volume.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE(
"Failed to copy volume %(vol1)s to %(vol2)s"), {
'vol1': volume.id, 'vol2': new_volume.id})
self._clean_temporary_volume(ctxt, volume,
new_volume)
def _clean_temporary_volume(self, ctxt, volume, new_volume,
clean_db_only=False):
# If we're in the migrating phase, we need to cleanup
# destination volume because source volume is remaining
if volume.migration_status == 'migrating':
try:
if clean_db_only:
# The temporary volume is not created, only DB data
# is created
new_volume.destroy()
else:
# The temporary volume is already created
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.delete_volume(ctxt, new_volume)
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find the temporary volume "
"%(vol)s in the database. There is no need "
"to clean up this volume."),
{'vol': new_volume.id})
else:
# If we're in the completing phase don't delete the
# destination because we may have already deleted the
# source! But the migration_status in database should
# be cleared to handle volume after migration failure
try:
new_volume.migration_status = None
new_volume.save()
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find destination volume "
"%(vol)s in the database. The entry might be "
"successfully deleted during migration "
"completion phase."),
{'vol': new_volume.id})
LOG.warning(_LW("Failed to migrate volume. The destination "
"volume %(vol)s is not deleted since the "
"source volume may have been deleted."),
{'vol': new_volume.id})
def migrate_volume_completion(self, ctxt, volume_id, new_volume_id,
error=False, volume=None, new_volume=None):
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None or new_volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(ctxt, volume_id)
new_volume = objects.Volume.get_by_id(ctxt, new_volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
LOG.debug("migrate_volume_completion: completing migration for "
"volume %(vol1)s (temporary volume %(vol2)s",
{'vol1': volume.id, 'vol2': new_volume.id})
rpcapi = volume_rpcapi.VolumeAPI()
orig_volume_status = volume.previous_status
if error:
LOG.info(_LI("migrate_volume_completion is cleaning up an error "
"for volume %(vol1)s (temporary volume %(vol2)s"),
{'vol1': volume['id'], 'vol2': new_volume.id})
rpcapi.delete_volume(ctxt, new_volume)
updates = {'migration_status': 'error',
'status': orig_volume_status}
volume.update(updates)
volume.save()
return volume.id
volume.migration_status = 'completing'
volume.save()
# Detach the source volume (if it fails, don't fail the migration)
try:
if orig_volume_status == 'in-use':
attachments = volume.volume_attachment
for attachment in attachments:
self.detach_volume(ctxt, volume.id, attachment['id'])
except Exception as ex:
LOG.error(_LE("Detach migration source volume failed: %(err)s"),
{'err': ex}, resource=volume)
# Give driver (new_volume) a chance to update things as needed
# after a successful migration.
# Note this needs to go through rpc to the host of the new volume
# the current host and driver object is for the "existing" volume.
rpcapi.update_migrated_volume(ctxt, volume, new_volume,
orig_volume_status)
volume.refresh()
new_volume.refresh()
# Swap src and dest DB records so we can continue using the src id and
# asynchronously delete the destination id
updated_new = volume.finish_volume_migration(new_volume)
updates = {'status': orig_volume_status,
'previous_status': volume.status,
'migration_status': 'success'}
if orig_volume_status == 'in-use':
attachments = volume.volume_attachment
for attachment in attachments:
rpcapi.attach_volume(ctxt, volume,
attachment['instance_uuid'],
attachment['attached_host'],
attachment['mountpoint'],
'rw')
volume.update(updates)
volume.save()
# Asynchronous deletion of the source volume in the back-end (now
# pointed by the target volume id)
try:
rpcapi.delete_volume(ctxt, updated_new)
except Exception as ex:
LOG.error(_LE('Failed to request async delete of migration source '
'vol %(vol)s: %(err)s'),
{'vol': volume.id, 'err': ex})
LOG.info(_LI("Complete-Migrate volume completed successfully."),
resource=volume)
return volume.id
def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False,
new_type_id=None, volume=None):
"""Migrate the volume to the specified host (called on source host)."""
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(ctxt, volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
model_update = None
moved = False
status_update = None
if volume.status in ('retyping', 'maintenance'):
status_update = {'status': volume.previous_status}
volume.migration_status = 'migrating'
volume.save()
if not force_host_copy and new_type_id is None:
try:
LOG.debug("Issue driver.migrate_volume.", resource=volume)
moved, model_update = self.driver.migrate_volume(ctxt,
volume,
host)
if moved:
updates = {'host': host['host'],
'migration_status': 'success',
'previous_status': volume.status}
if status_update:
updates.update(status_update)
if model_update:
updates.update(model_update)
volume.update(updates)
volume.save()
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
if not moved:
try:
self._migrate_volume_generic(ctxt, volume, host,
new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
LOG.info(_LI("Migrate volume completed successfully."),
resource=volume)
@periodic_task.periodic_task
def _report_driver_status(self, context):
if not self.driver.initialized:
if self.driver.configuration.config_group is None:
config_group = ''
else:
config_group = ('(config name %s)' %
self.driver.configuration.config_group)
LOG.warning(_LW("Update driver status failed: %(config_group)s "
"is uninitialized."),
{'config_group': config_group},
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
else:
volume_stats = self.driver.get_volume_stats(refresh=True)
if self.extra_capabilities:
volume_stats.update(self.extra_capabilities)
if volume_stats:
# Append volume stats with 'allocated_capacity_gb'
self._append_volume_stats(volume_stats)
# Append filter and goodness function if needed
volume_stats = (
self._append_filter_goodness_functions(volume_stats))
# queue it to be sent to the Schedulers.
self.update_service_capabilities(volume_stats)
def _append_volume_stats(self, vol_stats):
pools = vol_stats.get('pools', None)
if pools and isinstance(pools, list):
for pool in pools:
pool_name = pool['pool_name']
try:
pool_stats = self.stats['pools'][pool_name]
except KeyError:
# Pool not found in volume manager
pool_stats = dict(allocated_capacity_gb=0)
pool.update(pool_stats)
def _append_filter_goodness_functions(self, volume_stats):
"""Returns volume_stats updated as needed."""
# Append filter_function if needed
if 'filter_function' not in volume_stats:
volume_stats['filter_function'] = (
self.driver.get_filter_function())
# Append goodness_function if needed
if 'goodness_function' not in volume_stats:
volume_stats['goodness_function'] = (
self.driver.get_goodness_function())
return volume_stats
def publish_service_capabilities(self, context):
"""Collect driver status and then publish."""
self._report_driver_status(context)
self._publish_service_capabilities(context)
def _notify_about_volume_usage(self,
context,
volume,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_snapshot_usage(self,
context,
snapshot,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_consistencygroup_usage(self,
context,
group,
event_suffix,
volumes=None,
extra_usage_info=None):
vol_utils.notify_about_consistencygroup_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not volumes:
volumes = self.db.volume_get_all_by_group(context, group.id)
if volumes:
for volume in volumes:
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_cgsnapshot_usage(self,
context,
cgsnapshot,
event_suffix,
snapshots=None,
extra_usage_info=None):
vol_utils.notify_about_cgsnapshot_usage(
context, cgsnapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not snapshots:
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
if snapshots:
for snapshot in snapshots:
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def extend_volume(self, context, volume_id, new_size, reservations,
volume=None):
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.status = 'error_extending'
volume.save()
project_id = volume.project_id
size_increase = (int(new_size)) - volume.size
self._notify_about_volume_usage(context, volume, "resize.start")
try:
self.driver.extend_volume(volume, new_size)
except Exception:
LOG.exception(_LE("Extend volume failed."),
resource=volume)
try:
self.db.volume_update(context, volume.id,
{'status': 'error_extending'})
raise exception.CinderException(_("Volume %s: Error trying "
"to extend volume") %
volume.id)
finally:
QUOTAS.rollback(context, reservations, project_id=project_id)
return
QUOTAS.commit(context, reservations, project_id=project_id)
volume.update({'size': int(new_size), 'status': 'available'})
volume.save()
pool = vol_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume.host, 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += size_increase
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=size_increase)
self._notify_about_volume_usage(
context, volume, "resize.end",
extra_usage_info={'size': int(new_size)})
LOG.info(_LI("Extend volume completed successfully."),
resource=volume)
def retype(self, ctxt, volume_id, new_type_id, host,
migration_policy='never', reservations=None,
volume=None, old_reservations=None):
def _retype_error(context, volume, old_reservations,
new_reservations, status_update):
try:
volume.update(status_update)
volume.save()
finally:
QUOTAS.rollback(context, old_reservations)
QUOTAS.rollback(context, new_reservations)
context = ctxt.elevated()
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
status_update = {'status': volume.previous_status}
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
# NOTE(flaper87): Other exceptions in this method don't
# set the volume status to error. Should that be done
# here? Setting the volume back to it's original status
# for now.
volume.update(status_update)
volume.save()
# If old_reservations has been passed in from the API, we should
# skip quotas.
# TODO(ntpttr): These reservation checks are left in to be backwards
# compatible with Liberty and can be removed in N.
if not old_reservations:
# Get old reservations
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
# NOTE(wanghao): We don't need to reserve volumes and gigabytes
# quota for retyping operation since they didn't changed, just
# reserve volume_type and type gigabytes is fine.
reserve_opts.pop('volumes')
reserve_opts.pop('gigabytes')
old_reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
volume.update(status_update)
volume.save()
msg = _("Failed to update quota usage while retyping volume.")
LOG.exception(msg, resource=volume)
raise exception.CinderException(msg)
# We already got the new reservations
new_reservations = reservations
# If volume types have the same contents, no need to do anything
retyped = False
diff, all_equal = volume_types.volume_types_diff(
context, volume.volume_type_id, new_type_id)
if all_equal:
retyped = True
# Call driver to try and change the type
retype_model_update = None
# NOTE(jdg): Check to see if the destination host is the same
# as the current. If it's not don't call the driver.retype
# method, otherwise drivers that implement retype may report
# success, but it's invalid in the case of a migrate.
# We assume that those that support pools do this internally
# so we strip off the pools designation
if (not retyped and
vol_utils.hosts_are_equivalent(self.driver.host,
host['host'])):
try:
new_type = volume_types.get_volume_type(context, new_type_id)
ret = self.driver.retype(context,
volume,
new_type,
diff,
host)
# Check if the driver retype provided a model update or
# just a retype indication
if type(ret) == tuple:
retyped, retype_model_update = ret
else:
retyped = ret
if retyped:
LOG.info(_LI("Volume %s: retyped successfully"), volume.id)
except Exception:
retyped = False
LOG.exception(_LE("Volume %s: driver error when trying to "
"retype, falling back to generic "
"mechanism."), volume.id)
# We could not change the type, so we need to migrate the volume, where
# the destination volume will be of the new type
if not retyped:
if migration_policy == 'never':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Retype requires migration but is not allowed.")
raise exception.VolumeMigrationFailed(reason=msg)
snaps = objects.SnapshotList.get_all_for_volume(context,
volume.id)
if snaps:
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not have snapshots.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Don't allow volume with replicas to be migrated
rep_status = volume.replication_status
if rep_status is not None and rep_status != 'disabled':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not be replicated.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
volume.migration_status = 'starting'
volume.save()
try:
self.migrate_volume(context, volume.id, host,
new_type_id=new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
else:
model_update = {'volume_type_id': new_type_id,
'host': host['host'],
'status': status_update['status']}
if retype_model_update:
model_update.update(retype_model_update)
volume.update(model_update)
volume.save()
if old_reservations:
QUOTAS.commit(context, old_reservations, project_id=project_id)
if new_reservations:
QUOTAS.commit(context, new_reservations, project_id=project_id)
self._notify_about_volume_usage(
context, volume, "retype",
extra_usage_info={'volume_type': new_type_id})
self.publish_service_capabilities(context)
LOG.info(_LI("Retype volume completed successfully."),
resource=volume)
def manage_existing(self, ctxt, volume_id, ref=None):
try:
flow_engine = manage_existing.get_flow(
ctxt,
self.db,
self.driver,
self.host,
volume_id,
ref)
except Exception:
msg = _("Failed to create manage_existing flow.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume_id})
raise exception.CinderException(msg)
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# Fetch created volume from storage
vol_ref = flow_engine.storage.fetch('volume')
# Update volume stats
pool = vol_utils.extract_host(vol_ref['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
vol_ref['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] \
+= vol_ref['size']
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=vol_ref['size'])
LOG.info(_LI("Manage existing volume completed successfully."),
resource=vol_ref)
return vol_ref['id']
def promote_replica(self, ctxt, volume_id):
"""Promote volume replica secondary to be the primary volume."""
volume = self.db.volume_get(ctxt, volume_id)
model_update = None
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Promote volume replica failed."),
resource=volume)
try:
model_update = self.driver.promote_replica(ctxt, volume)
except exception.CinderException:
err_msg = (_('Error promoting secondary volume to primary'))
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
try:
if model_update:
volume = self.db.volume_update(ctxt,
volume_id,
model_update)
except exception.CinderException:
err_msg = (_("Failed updating model"
" with driver provided model %(model)s") %
{'model': model_update})
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
LOG.info(_LI("Promote volume replica completed successfully."),
resource=volume)
def reenable_replication(self, ctxt, volume_id):
"""Re-enable replication of secondary volume with primary volumes."""
volume = self.db.volume_get(ctxt, volume_id)
model_update = None
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Sync volume replica failed."),
resource=volume)
try:
model_update = self.driver.reenable_replication(ctxt, volume)
except exception.CinderException:
err_msg = (_("Synchronizing secondary volume to primary failed."))
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
try:
if model_update:
volume = self.db.volume_update(ctxt,
volume_id,
model_update)
except exception.CinderException:
err_msg = (_("Failed updating model"
" with driver provided model %(model)s") %
{'model': model_update})
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
def _update_replication_relationship_status(self, ctxt):
# Only want volumes that do not have a 'disabled' replication status
filters = {'replication_status': ['active', 'copying', 'error',
'active-stopped', 'inactive']}
volumes = self.db.volume_get_all_by_host(ctxt, self.host,
filters=filters)
for vol in volumes:
model_update = None
try:
model_update = self.driver.get_replication_status(
ctxt, vol)
if model_update:
self.db.volume_update(ctxt, vol['id'], model_update)
except Exception:
LOG.exception(_LE("Get replication status for volume failed."),
resource=vol)
def create_consistencygroup(self, context, group):
"""Creates the consistency group."""
context = context.elevated()
status = fields.ConsistencyGroupStatus.AVAILABLE
model_update = None
self._notify_about_consistencygroup_usage(
context, group, "create.start")
try:
utils.require_driver_initialized(self.driver)
LOG.info(_LI("Consistency group %s: creating"), group.name)
model_update = self.driver.create_consistencygroup(context,
group)
if model_update:
if (model_update['status'] ==
fields.ConsistencyGroupStatus.ERROR):
msg = (_('Create consistency group failed.'))
LOG.error(msg,
resource={'type': 'consistency_group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = fields.ConsistencyGroupStatus.ERROR
group.save()
LOG.error(_LE("Consistency group %s: create failed"),
group.name)
group.status = status
group.created_at = timeutils.utcnow()
group.save()
LOG.info(_LI("Consistency group %s: created successfully"),
group.name)
self._notify_about_consistencygroup_usage(
context, group, "create.end")
LOG.info(_LI("Create consistency group completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
return group
def create_consistencygroup_from_src(self, context, group,
cgsnapshot=None, source_cg=None):
"""Creates the consistency group from source.
The source can be a CG snapshot or a source CG.
"""
source_name = None
snapshots = None
source_vols = None
try:
volumes = self.db.volume_get_all_by_group(context, group.id)
if cgsnapshot:
try:
# Check if cgsnapshot still exists
cgsnapshot = objects.CGSnapshot.get_by_id(
context, cgsnapshot.id)
except exception.CgSnapshotNotFound:
LOG.error(_LE("Create consistency group "
"from snapshot-%(snap)s failed: "
"SnapshotNotFound."),
{'snap': cgsnapshot.id},
resource={'type': 'consistency_group',
'id': group.id})
raise
source_name = _("snapshot-%s") % cgsnapshot.id
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
for snap in snapshots:
if (snap.status not in
VALID_CREATE_CG_SRC_SNAP_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because snapshot %(snap)s is "
"not in a valid state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'snap': snap['id'],
'valid': VALID_CREATE_CG_SRC_SNAP_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
if source_cg:
try:
source_cg = objects.ConsistencyGroup.get_by_id(
context, source_cg.id)
except exception.ConsistencyGroupNotFound:
LOG.error(_LE("Create consistency group "
"from source cg-%(cg)s failed: "
"ConsistencyGroupNotFound."),
{'cg': source_cg.id},
resource={'type': 'consistency_group',
'id': group.id})
raise
source_name = _("cg-%s") % source_cg.id
source_vols = self.db.volume_get_all_by_group(
context, source_cg.id)
for source_vol in source_vols:
if (source_vol['status'] not in
VALID_CREATE_CG_SRC_CG_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because source volume "
"%(source_vol)s is not in a valid "
"state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'source_vol': source_vol['id'],
'valid': VALID_CREATE_CG_SRC_CG_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes.
sorted_snapshots = None
if cgsnapshot and snapshots:
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
# Sort source volumes so that they are in the same order as their
# corresponding target volumes.
sorted_source_vols = None
if source_cg and source_vols:
sorted_source_vols = self._sort_source_vols(volumes,
source_vols)
self._notify_about_consistencygroup_usage(
context, group, "create.start")
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
context, group, volumes, cgsnapshot,
sorted_snapshots, source_cg, sorted_source_vols))
if volumes_model_update:
for update in volumes_model_update:
self.db.volume_update(context, update['id'], update)
if model_update:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
LOG.error(_LE("Create consistency group "
"from source %(source)s failed."),
{'source': source_name},
resource={'type': 'consistency_group',
'id': group.id})
# Update volume status to 'error' as well.
for vol in volumes:
self.db.volume_update(
context, vol['id'], {'status': 'error'})
now = timeutils.utcnow()
status = 'available'
for vol in volumes:
update = {'status': status, 'created_at': now}
self._update_volume_from_src(context, vol, update, group=group)
self._update_allocated_capacity(vol)
group.status = status
group.created_at = now
group.save()
self._notify_about_consistencygroup_usage(
context, group, "create.end")
LOG.info(_LI("Create consistency group "
"from source-%(source)s completed successfully."),
{'source': source_name},
resource={'type': 'consistency_group',
'id': group.id})
return group
def _sort_snapshots(self, volumes, snapshots):
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes. Each source snapshot in the snapshots
# list should have a corresponding target volume in the volumes list.
if not volumes or not snapshots or len(volumes) != len(snapshots):
msg = _("Input volumes or snapshots are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_snapshots = []
for vol in volumes:
found_snaps = [snap for snap in snapshots
if snap['id'] == vol['snapshot_id']]
if not found_snaps:
LOG.error(_LE("Source snapshot cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.SnapshotNotFound(
snapshot_id=vol['snapshot_id'])
sorted_snapshots.extend(found_snaps)
return sorted_snapshots
def _sort_source_vols(self, volumes, source_vols):
# Sort source volumes so that they are in the same order as their
# corresponding target volumes. Each source volume in the source_vols
# list should have a corresponding target volume in the volumes list.
if not volumes or not source_vols or len(volumes) != len(source_vols):
msg = _("Input volumes or source volumes are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_source_vols = []
for vol in volumes:
found_source_vols = [source_vol for source_vol in source_vols
if source_vol['id'] == vol['source_volid']]
if not found_source_vols:
LOG.error(_LE("Source volumes cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.VolumeNotFound(
volume_id=vol['source_volid'])
sorted_source_vols.extend(found_source_vols)
return sorted_source_vols
def _update_volume_from_src(self, context, vol, update, group=None):
try:
snapshot_id = vol.get('snapshot_id')
if snapshot_id:
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
orig_vref = self.db.volume_get(context,
snapshot.volume_id)
if orig_vref.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_to_volume(
context, vol['id'], snapshot_id)
except exception.SnapshotNotFound:
LOG.error(_LE("Source snapshot %(snapshot_id)s cannot be found."),
{'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise
except exception.VolumeNotFound:
LOG.error(_LE("The source volume %(volume_id)s "
"cannot be found."),
{'volume_id': snapshot.volume_id})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise
except exception.CinderException as ex:
LOG.error(_LE("Failed to update %(volume_id)s"
" metadata using the provided snapshot"
" %(snapshot_id)s metadata."),
{'volume_id': vol['id'],
'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
self.db.volume_update(context, vol['id'], update)
def _update_allocated_capacity(self, vol):
# Update allocated capacity in volume stats
pool = vol_utils.extract_host(vol['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
vol['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += (
vol['size'])
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=vol['size'])
def delete_consistencygroup(self, context, group):
"""Deletes consistency group and the volumes in the group."""
context = context.elevated()
project_id = group.project_id
if context.project_id != group.project_id:
project_id = group.project_id
else:
project_id = context.project_id
volumes = self.db.volume_get_all_by_group(context, group.id)
for volume_ref in volumes:
if volume_ref['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_ref['id'])
# self.host is 'host@backend'
# volume_ref['host'] is 'host@backend#pool'
# Extract host before doing comparison
if volume_ref['host']:
new_host = vol_utils.extract_host(volume_ref['host'])
if new_host != self.host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node"))
self._notify_about_consistencygroup_usage(
context, group, "delete.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.delete_consistencygroup(context, group, volumes))
if volumes_model_update:
for volume in volumes_model_update:
update = {'status': volume['status']}
self.db.volume_update(context, volume['id'],
update)
# If we failed to delete a volume, make sure the status
# for the cg is set to error as well
if (volume['status'] in ['error_deleting', 'error'] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = volume['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Delete consistency group failed.'))
LOG.error(msg,
resource={'type': 'consistency_group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
# Update volume status to 'error' if driver returns
# None for volumes_model_update.
if not volumes_model_update:
for vol in volumes:
self.db.volume_update(
context, vol['id'], {'status': 'error'})
# Get reservations for group
try:
reserve_opts = {'consistencygroups': -1}
cgreservations = CGQUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
cgreservations = None
LOG.exception(_LE("Delete consistency group "
"failed to update usages."),
resource={'type': 'consistency_group',
'id': group.id})
for volume_ref in volumes:
# Get reservations for volume
try:
volume_id = volume_ref['id']
reserve_opts = {'volumes': -1,
'gigabytes': -volume_ref['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Delete consistency group "
"failed to update usages."),
resource={'type': 'consistency_group',
'id': group.id})
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
self.db.volume_destroy(context, volume_id)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= volume_ref['size']
if cgreservations:
CGQUOTAS.commit(context, cgreservations,
project_id=project_id)
group.destroy()
self._notify_about_consistencygroup_usage(
context, group, "delete.end", volumes)
self.publish_service_capabilities(context)
LOG.info(_LI("Delete consistency group "
"completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates consistency group.
Update consistency group by adding volumes to the group,
or removing volumes from the group.
"""
add_volumes_ref = []
remove_volumes_ref = []
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes_list = remove_volumes.split(',')
for add_vol in add_volumes_list:
try:
add_vol_ref = self.db.volume_get(context, add_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update consistency group "
"failed to add volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': add_vol_ref['id']},
resource={'type': 'consistency_group',
'id': group.id})
raise
if add_vol_ref['status'] not in VALID_ADD_VOL_TO_CG_STATUS:
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id,
'status': add_vol_ref['status'],
'valid': VALID_ADD_VOL_TO_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
# self.host is 'host@backend'
# volume_ref['host'] is 'host@backend#pool'
# Extract host before doing comparison
new_host = vol_utils.extract_host(add_vol_ref['host'])
if new_host != self.host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node."))
add_volumes_ref.append(add_vol_ref)
for remove_vol in remove_volumes_list:
try:
remove_vol_ref = self.db.volume_get(context, remove_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update consistency group "
"failed to remove volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': remove_vol_ref['id']},
resource={'type': 'consistency_group',
'id': group.id})
raise
if remove_vol_ref['status'] not in VALID_REMOVE_VOL_FROM_CG_STATUS:
msg = (_("Cannot remove volume %(volume_id)s from consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': remove_vol_ref['id'],
'group_id': group.id,
'status': remove_vol_ref['status'],
'valid': VALID_REMOVE_VOL_FROM_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
remove_volumes_ref.append(remove_vol_ref)
self._notify_about_consistencygroup_usage(
context, group, "update.start")
try:
utils.require_driver_initialized(self.driver)
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
if add_volumes_update:
for update in add_volumes_update:
self.db.volume_update(context, update['id'], update)
if remove_volumes_update:
for update in remove_volumes_update:
self.db.volume_update(context, update['id'], update)
if model_update:
if model_update['status'] in (
[fields.ConsistencyGroupStatus.ERROR]):
msg = (_('Error occurred when updating consistency group '
'%s.') % group.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group.update(model_update)
group.save()
except exception.VolumeDriverException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred in the volume driver when "
"updating consistency group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when updating consistency "
"group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
now = timeutils.utcnow()
group.status = 'available'
group.update_at = now
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'consistencygroup_id': group.id,
'updated_at': now})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'consistencygroup_id': None,
'updated_at': now})
self._notify_about_consistencygroup_usage(
context, group, "update.end")
LOG.info(_LI("Update consistency group "
"completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
def create_cgsnapshot(self, context, cgsnapshot):
"""Creates the cgsnapshot."""
caller_context = context
context = context.elevated()
LOG.info(_LI("Cgsnapshot %s: creating."), cgsnapshot.id)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "create.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Cgsnapshot %(cgsnap_id)s: creating.",
{'cgsnap_id': cgsnapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
model_update, snapshots_model_update = (
self.driver.create_cgsnapshot(context, cgsnapshot,
snapshots))
if snapshots_model_update:
for snap_model in snapshots_model_update:
# Update db for snapshot.
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
self.db.snapshot_update(context,
snap_model['id'],
snap_model)
if (snap_model['status'] in [
fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] == 'error':
msg = (_('Error occurred when creating cgsnapshot '
'%s.') % cgsnapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
cgsnapshot.update(model_update)
cgsnapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
cgsnapshot.status = 'error'
cgsnapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
volume_id = snapshot['volume_id']
snapshot_id = snapshot['id']
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot_id, volume_id)
except exception.CinderException as ex:
LOG.error(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
# TODO(thangp): Switch over to use snapshot.update()
# after cgsnapshot-objects bugs are fixed
self.db.snapshot_update(
context, snapshot_id, {
'status': fields.SnapshotStatus.ERROR})
raise exception.MetadataCopyFailure(
reason=six.text_type(ex))
self.db.snapshot_update(context,
snapshot['id'],
{'status': fields.SnapshotStatus.AVAILABLE,
'progress': '100%'})
cgsnapshot.status = 'available'
cgsnapshot.save()
LOG.info(_LI("cgsnapshot %s: created successfully"),
cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "create.end")
return cgsnapshot
def delete_cgsnapshot(self, context, cgsnapshot):
"""Deletes cgsnapshot."""
caller_context = context
context = context.elevated()
project_id = cgsnapshot.project_id
LOG.info(_LI("cgsnapshot %s: deleting"), cgsnapshot.id)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "delete.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("cgsnapshot %(cgsnap_id)s: deleting",
{'cgsnap_id': cgsnapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(context, cgsnapshot,
snapshots))
if snapshots_model_update:
for snap_model in snapshots_model_update:
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap = next((item for item in snapshots if
item.id == snap_model['id']), None)
if snap:
snap.status = snap_model['status']
snap.save()
if (snap_model['status'] in
[fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting cgsnapshot '
'%s.') % cgsnapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
cgsnapshot.update(model_update)
cgsnapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
cgsnapshot.status = 'error'
cgsnapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot['volume_size'],
}
volume_ref = self.db.volume_get(context, snapshot['volume_id'])
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot['id'])
# TODO(thangp): Switch over to use snapshot.destroy()
# after cgsnapshot-objects bugs are fixed
self.db.snapshot_destroy(context, snapshot['id'])
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
cgsnapshot.destroy()
LOG.info(_LI("cgsnapshot %s: deleted successfully"), cgsnapshot.id)
self._notify_about_cgsnapshot_usage(context, cgsnapshot, "delete.end",
snapshots)
def update_migrated_volume(self, ctxt, volume, new_volume, volume_status):
"""Finalize migration process on backend device."""
model_update = None
model_update_default = {'_name_id': new_volume.name_id,
'provider_location':
new_volume.provider_location}
try:
model_update = self.driver.update_migrated_volume(ctxt,
volume,
new_volume,
volume_status)
except NotImplementedError:
# If update_migrated_volume is not implemented for the driver,
# _name_id and provider_location will be set with the values
# from new_volume.
model_update = model_update_default
if model_update:
model_update_default.update(model_update)
# Swap keys that were changed in the source so we keep their values
# in the temporary volume's DB record.
# Need to convert 'metadata' and 'admin_metadata' since
# they are not keys of volume, their corresponding keys are
# 'volume_metadata' and 'volume_admin_metadata'.
model_update_new = dict()
for key in model_update:
if key == 'metadata':
if volume.get('volume_metadata'):
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_metadata}
elif key == 'admin_metadata':
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_admin_metadata}
else:
model_update_new[key] = volume[key]
with new_volume.obj_as_admin():
new_volume.update(model_update_new)
new_volume.save()
with volume.obj_as_admin():
volume.update(model_update_default)
volume.save()
# Replication V2.1 methods
def failover_host(self, context,
secondary_backend_id=None):
"""Failover a backend to a secondary replication target.
Instructs a replication capable/configured backend to failover
to one of it's secondary replication targets. host=None is
an acceptable input, and leaves it to the driver to failover
to the only configured target, or to choose a target on it's
own. All of the hosts volumes will be passed on to the driver
in order for it to determine the replicated volumes on the host,
if needed.
:param context: security context
:param secondary_backend_id: Specifies backend_id to fail over to
"""
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
'cinder-volume')
volumes = objects.VolumeList.get_all_by_host(context, self.host)
exception_encountered = False
try:
# expected form of volume_update_list:
# [{volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}},
# {volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}}]
(active_backend_id, volume_update_list) = (
self.driver.failover_host(
context,
volumes,
secondary_id=secondary_backend_id))
except exception.UnableToFailOver:
LOG.exception(_LE("Failed to perform replication failover"))
service.replication_status = (
fields.ReplicationStatus.FAILOVER_ERROR)
service.save()
exception_encountered = True
except exception.InvalidReplicationTarget:
LOG.exception(_LE("Invalid replication target specified "
"for failover"))
# Preserve the replication_status
if secondary_backend_id == "default":
service.replication_status = (
fields.ReplicationStatus.FAILED_OVER)
else:
service.replication_status = fields.ReplicationStatus.ENABLED
service.save()
exception_encountered = True
except exception.VolumeDriverException:
# NOTE(jdg): Drivers need to be aware if they fail during
# a failover sequence, we're expecting them to cleanup
# and make sure the driver state is such that the original
# backend is still set as primary as per driver memory
LOG.error(_LE("Driver reported error during "
"replication failover."))
service.status = 'error'
service.save()
exception_encountered = True
if exception_encountered:
LOG.error(
_LE("Error encountered during failover on host: "
"%(host)s invalid target ID %(backend_id)s"),
{'host': self.host, 'backend_id':
secondary_backend_id})
return
if secondary_backend_id == "default":
service.replication_status = fields.ReplicationStatus.ENABLED
service.active_backend_id = ""
if service.frozen:
service.disabled = True
service.disabled_reason = "frozen"
else:
service.disabled = False
service.disabled_reason = ""
service.save()
else:
service.replication_status = fields.ReplicationStatus.FAILED_OVER
service.active_backend_id = active_backend_id
service.disabled = True
service.disabled_reason = "failed-over"
service.save()
for update in volume_update_list:
# Response must include an id key: {volume_id: <cinder-uuid>}
if not update.get('volume_id'):
raise exception.UnableToFailOver(
reason=_("Update list, doesn't include volume_id"))
# Key things to consider (attaching failed-over volumes):
# provider_location
# provider_auth
# provider_id
# replication_status
vobj = objects.Volume.get_by_id(context, update['volume_id'])
vobj.update(update.get('updates', {}))
vobj.save()
LOG.info(_LI("Failed over to replication target successfully."))
def freeze_host(self, context):
"""Freeze management plane on this backend.
Basically puts the control/management plane into a
Read Only state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.freeze_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): In the case of freeze, we don't really
# need the backend's consent or anything, we'll just
# disable the service, so we can just log this and
# go about our business
LOG.warning(_LW('Error encountered on Cinder backend during '
'freeze operation, service is frozen, however '
'notification to driver has failed.'))
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
'cinder-volume')
service.disabled = True
service.disabled_reason = "frozen"
service.save()
LOG.info(_LI("Set backend status to frozen successfully."))
return True
def thaw_host(self, context):
"""UnFreeze management plane on this backend.
Basically puts the control/management plane back into
a normal state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.thaw_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): Thaw actually matters, if this call
# to the backend fails, we're stuck and can't re-enable
LOG.error(_LE('Error encountered on Cinder backend during '
'thaw operation, service will remain frozen.'))
return False
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
'cinder-volume')
service.disabled = False
service.disabled_reason = ""
service.save()
LOG.info(_LI("Thawed backend successfully."))
return True
def manage_existing_snapshot(self, ctxt, snapshot, ref=None):
LOG.debug('manage_existing_snapshot: managing %s.', ref)
try:
flow_engine = manage_existing_snapshot.get_flow(
ctxt,
self.db,
self.driver,
self.host,
snapshot.id,
ref)
except Exception:
msg = _LE("Failed to create manage_existing flow: "
"%(object_type)s %(object_id)s.")
LOG.exception(msg, {'object_type': 'snapshot',
'object_id': snapshot.id})
raise exception.CinderException(
_("Failed to create manage existing flow."))
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
return snapshot.id
def get_capabilities(self, context, discover):
"""Get capabilities of backend storage."""
if discover:
self.driver.init_capabilities()
capabilities = self.driver.capabilities
LOG.debug("Obtained capabilities list: %s.", capabilities)
return capabilities
def get_backup_device(self, ctxt, backup):
(backup_device, is_snapshot) = (
self.driver.get_backup_device(ctxt, backup))
secure_enabled = self.driver.secure_file_operations_enabled()
backup_device_dict = {'backup_device': backup_device,
'secure_enabled': secure_enabled,
'is_snapshot': is_snapshot, }
return backup_device_dict
def secure_file_operations_enabled(self, ctxt, volume):
secure_enabled = self.driver.secure_file_operations_enabled()
return secure_enabled
|
thaw_host
|
UnFreeze management plane on this backend.
Basically puts the control/management plane back into
a normal state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume manager manages creating, attaching, detaching, and persistent storage.
Persistent storage volumes keep their state independent of instances. You can
attach to an instance, terminate the instance, spawn a new instance (even
one from a different image) and re-attach the volume with the same data
intact.
**Related Flags**
:volume_topic: What :mod:`rpc` topic to listen to (default: `cinder-volume`).
:volume_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.volume.manager.Manager`).
:volume_driver: Used by :class:`Manager`. Defaults to
:class:`cinder.volume.drivers.lvm.LVMVolumeDriver`.
:volume_group: Name of the group that will contain exported volumes (default:
`cinder-volumes`)
:num_shell_tries: Number of times to attempt to run commands (default: 3)
"""
import requests
import time
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
profiler = importutils.try_import('osprofiler.profiler')
import six
from taskflow import exceptions as tfe
from cinder import compute
from cinder import context
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import cache as image_cache
from cinder.image import glance
from cinder import manager
from cinder.message import api as message_api
from cinder.message import defined_messages
from cinder.message import resource_types
from cinder import objects
from cinder.objects import fields
from cinder import quota
from cinder import utils
from cinder import volume as cinder_volume
from cinder.volume import configuration as config
from cinder.volume.flows.manager import create_volume
from cinder.volume.flows.manager import manage_existing
from cinder.volume.flows.manager import manage_existing_snapshot
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
CGQUOTAS = quota.CGQUOTAS
VALID_REMOVE_VOL_FROM_CG_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_ADD_VOL_TO_CG_STATUS = (
'available',
'in-use')
VALID_CREATE_CG_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
VALID_CREATE_CG_SRC_CG_STATUS = ('available',)
volume_manager_opts = [
cfg.StrOpt('volume_driver',
default='cinder.volume.drivers.lvm.LVMVolumeDriver',
help='Driver to use for volume creation'),
cfg.IntOpt('migration_create_volume_timeout_secs',
default=300,
help='Timeout for creating the volume to migrate to '
'when performing volume migration (seconds)'),
cfg.BoolOpt('volume_service_inithost_offload',
default=False,
help='Offload pending volume delete during '
'volume service startup'),
cfg.StrOpt('zoning_mode',
help='FC Zoning mode configured'),
cfg.StrOpt('extra_capabilities',
default='{}',
help='User defined capabilities, a JSON formatted string '
'specifying key/value pairs. The key/value pairs can '
'be used by the CapabilitiesFilter to select between '
'backends when requests specify volume types. For '
'example, specifying a service level or the geographical '
'location of a backend, then creating a volume type to '
'allow the user to select by these different '
'properties.'),
cfg.BoolOpt('suppress_requests_ssl_warnings',
default=False,
help='Suppress requests library SSL certificate warnings.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_manager_opts)
MAPPING = {
'cinder.volume.drivers.huawei.huawei_18000.Huawei18000ISCSIDriver':
'cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver',
'cinder.volume.drivers.huawei.huawei_driver.Huawei18000ISCSIDriver':
'cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver',
'cinder.volume.drivers.huawei.huawei_18000.Huawei18000FCDriver':
'cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver',
'cinder.volume.drivers.huawei.huawei_driver.Huawei18000FCDriver':
'cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver',
'cinder.volume.drivers.fujitsu_eternus_dx_fc.FJDXFCDriver':
'cinder.volume.drivers.fujitsu.eternus_dx_fc.FJDXFCDriver',
'cinder.volume.drivers.fujitsu_eternus_dx_iscsi.FJDXISCSIDriver':
'cinder.volume.drivers.fujitsu.eternus_dx_iscsi.FJDXISCSIDriver',
'cinder.volume.drivers.hds.nfs.HDSNFSDriver':
'cinder.volume.drivers.hitachi.hnas_nfs.HDSNFSDriver',
'cinder.volume.drivers.hds.iscsi.HDSISCSIDriver':
'cinder.volume.drivers.hitachi.hnas_iscsi.HDSISCSIDriver',
'cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver':
'cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver',
'cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver':
'cinder.volume.drivers.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver',
'cinder.volume.drivers.san.hp.hp_lefthand_iscsi.HPLeftHandISCSIDriver':
'cinder.volume.drivers.hpe.hpe_lefthand_iscsi.HPELeftHandISCSIDriver',
'cinder.volume.drivers.san.hp.hp_xp_fc.HPXPFCDriver':
'cinder.volume.drivers.hpe.hpe_xp_fc.HPEXPFCDriver',
}
def locked_volume_operation(f):
"""Lock decorator for volume operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the volume. This lock can then be used
by other operations to avoid operation conflicts on shared volumes.
Example use:
If a volume operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
volume e.g. delete VolA while create volume VolB from VolA is in progress.
"""
def lvo_inner1(inst, context, volume_id, **kwargs):
@utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True)
def lvo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lvo_inner2(inst, context, volume_id, **kwargs)
return lvo_inner1
def locked_detach_operation(f):
"""Lock decorator for volume detach operations.
Takes a named lock prior to executing the detach call. The lock is named
with the operation executed and the id of the volume. This lock can then
be used by other operations to avoid operation conflicts on shared volumes.
This locking mechanism is only for detach calls. We can't use the
locked_volume_operation, because detach requires an additional
attachment_id in the parameter list.
"""
def ldo_inner1(inst, context, volume_id, attachment_id=None, **kwargs):
@utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True)
def ldo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return ldo_inner2(inst, context, volume_id, attachment_id, **kwargs)
return ldo_inner1
def locked_snapshot_operation(f):
"""Lock decorator for snapshot operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the snapshot. This lock can then be
used by other operations to avoid operation conflicts on shared snapshots.
Example use:
If a snapshot operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
snapshot e.g. delete SnapA while create volume VolA from SnapA is in
progress.
"""
def lso_inner1(inst, context, snapshot, **kwargs):
@utils.synchronized("%s-%s" % (snapshot.id, f.__name__), external=True)
def lso_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lso_inner2(inst, context, snapshot, **kwargs)
return lso_inner1
class VolumeManager(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = '2.0'
target = messaging.Target(version=RPC_API_VERSION)
# On cloning a volume, we shouldn't copy volume_type, consistencygroup
# and volume_attachment, because the db sets that according to [field]_id,
# which we do copy. We also skip some other values that are set during
# creation of Volume object.
_VOLUME_CLONE_SKIP_PROPERTIES = {
'id', '_name_id', 'name_id', 'name', 'status',
'attach_status', 'migration_status', 'volume_type',
'consistencygroup', 'volume_attachment'}
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_name='volume',
*args, **kwargs)
self.configuration = config.Configuration(volume_manager_opts,
config_group=service_name)
self.stats = {}
if not volume_driver:
# Get from configuration, which will get the default
# if its not using the multi backend
volume_driver = self.configuration.volume_driver
if volume_driver in MAPPING:
LOG.warning(_LW("Driver path %s is deprecated, update your "
"configuration to the new path."), volume_driver)
volume_driver = MAPPING[volume_driver]
vol_db_empty = self._set_voldb_empty_at_startup_indicator(
context.get_admin_context())
LOG.debug("Cinder Volume DB check: vol_db_empty=%s", vol_db_empty)
# We pass the current setting for service.active_backend_id to
# the driver on init, incase there was a restart or something
curr_active_backend_id = None
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
service = objects.Service.get_by_args(
context.get_admin_context(),
svc_host,
'cinder-volume')
except exception.ServiceNotFound:
# NOTE(jdg): This is to solve problems with unit tests
LOG.info(_LI("Service not found for updating "
"active_backend_id, assuming default "
"for driver init."))
else:
curr_active_backend_id = service.active_backend_id
if self.configuration.suppress_requests_ssl_warnings:
LOG.warning(_LW("Suppressing requests library SSL Warnings"))
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecurePlatformWarning)
self.driver = importutils.import_object(
volume_driver,
configuration=self.configuration,
db=self.db,
host=self.host,
is_vol_db_empty=vol_db_empty,
active_backend_id=curr_active_backend_id)
self.message_api = message_api.API()
if CONF.profiler.enabled and profiler is not None:
self.driver = profiler.trace_cls("driver")(self.driver)
try:
self.extra_capabilities = jsonutils.loads(
self.driver.configuration.extra_capabilities)
except AttributeError:
self.extra_capabilities = {}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Invalid JSON: %s"),
self.driver.configuration.extra_capabilities)
if self.driver.configuration.safe_get(
'image_volume_cache_enabled'):
max_cache_size = self.driver.configuration.safe_get(
'image_volume_cache_max_size_gb')
max_cache_entries = self.driver.configuration.safe_get(
'image_volume_cache_max_count')
self.image_volume_cache = image_cache.ImageVolumeCache(
self.db,
cinder_volume.API(),
max_cache_size,
max_cache_entries
)
LOG.info(_LI('Image-volume cache enabled for host %(host)s.'),
{'host': self.host})
else:
LOG.info(_LI('Image-volume cache disabled for host %(host)s.'),
{'host': self.host})
self.image_volume_cache = None
def _count_allocated_capacity(self, ctxt, volume):
pool = vol_utils.extract_host(volume['host'], 'pool')
if pool is None:
# No pool name encoded in host, so this is a legacy
# volume created before pool is introduced, ask
# driver to provide pool info if it has such
# knowledge and update the DB.
try:
pool = self.driver.get_pool(volume)
except Exception:
LOG.exception(_LE('Fetch volume pool name failed.'),
resource=volume)
return
if pool:
new_host = vol_utils.append_host(volume['host'],
pool)
self.db.volume_update(ctxt, volume['id'],
{'host': new_host})
else:
# Otherwise, put them into a special fixed pool with
# volume_backend_name being the pool name, if
# volume_backend_name is None, use default pool name.
# This is only for counting purpose, doesn't update DB.
pool = (self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume['host'], 'pool', True))
try:
pool_stat = self.stats['pools'][pool]
except KeyError:
# First volume in the pool
self.stats['pools'][pool] = dict(
allocated_capacity_gb=0)
pool_stat = self.stats['pools'][pool]
pool_sum = pool_stat['allocated_capacity_gb']
pool_sum += volume['size']
self.stats['pools'][pool]['allocated_capacity_gb'] = pool_sum
self.stats['allocated_capacity_gb'] += volume['size']
def _set_voldb_empty_at_startup_indicator(self, ctxt):
"""Determine if the Cinder volume DB is empty.
A check of the volume DB is done to determine whether it is empty or
not at this point.
:param ctxt: our working context
"""
vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None)
if len(vol_entries) == 0:
LOG.info(_LI("Determined volume DB was empty at startup."))
return True
else:
LOG.info(_LI("Determined volume DB was not empty at startup."))
return False
def _sync_provider_info(self, ctxt, volumes, snapshots):
# NOTE(jdg): For now this just updates provider_id, we can add more
# add more items to the update if theyr'e releveant but we need
# to be safe in what we allow and add a list of allowed keys
# things that make sense are provider_*, replication_status etc
updates, snapshot_updates = self.driver.update_provider_info(
volumes, snapshots)
if updates:
for volume in volumes:
# NOTE(JDG): Make sure returned item is in this hosts volumes
update = (
[updt for updt in updates if updt['id'] ==
volume['id']][0])
if update:
self.db.volume_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
# NOTE(jdg): snapshots are slighty harder, because
# we do not have a host column and of course no get
# all by host, so we use a get_all and bounce our
# response off of it
if snapshot_updates:
cinder_snaps = self.db.snapshot_get_all(ctxt)
for snap in cinder_snaps:
# NOTE(jdg): For now we only update those that have no entry
if not snap.get('provider_id', None):
update = (
[updt for updt in snapshot_updates if updt['id'] ==
snap['id']][0])
if update:
self.db.snapshot_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
def init_host(self):
"""Perform any required initialization."""
ctxt = context.get_admin_context()
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception:
LOG.exception(_LE("Failed to initialize driver."),
resource={'type': 'driver',
'id': self.__class__.__name__})
# we don't want to continue since we failed
# to initialize the driver correctly.
return
# Initialize backend capabilities list
self.driver.init_capabilities()
volumes = objects.VolumeList.get_all_by_host(ctxt, self.host)
snapshots = self.db.snapshot_get_by_host(ctxt, self.host)
self._sync_provider_info(ctxt, volumes, snapshots)
# FIXME volume count for exporting is wrong
try:
self.stats['pools'] = {}
self.stats.update({'allocated_capacity_gb': 0})
for volume in volumes:
# available volume should also be counted into allocated
if volume['status'] in ['in-use', 'available']:
# calculate allocated capacity for driver
self._count_allocated_capacity(ctxt, volume)
try:
if volume['status'] in ['in-use']:
self.driver.ensure_export(ctxt, volume)
except Exception:
LOG.exception(_LE("Failed to re-export volume, "
"setting to ERROR."),
resource=volume)
volume.status = 'error'
volume.save()
elif volume['status'] in ('downloading', 'creating'):
LOG.warning(_LW("Detected volume stuck "
"in %(curr_status)s "
"status, setting to ERROR."),
{'curr_status': volume['status']},
resource=volume)
if volume['status'] == 'downloading':
self.driver.clear_download(ctxt, volume)
volume.status = 'error'
volume.save()
elif volume.status == 'uploading':
# Set volume status to available or in-use.
self.db.volume_update_status_based_on_attachment(
ctxt, volume.id)
else:
pass
snapshots = objects.SnapshotList.get_by_host(
ctxt, self.host, {'status': fields.SnapshotStatus.CREATING})
for snapshot in snapshots:
LOG.warning(_LW("Detected snapshot stuck in creating "
"status, setting to ERROR."), resource=snapshot)
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
except Exception:
LOG.exception(_LE("Error during re-export on driver init."),
resource=volume)
return
self.driver.set_throttle()
# at this point the driver is considered initialized.
# NOTE(jdg): Careful though because that doesn't mean
# that an entry exists in the service table
self.driver.set_initialized()
for volume in volumes:
if volume['status'] == 'deleting':
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
# from being blocked.
self._add_to_threadpool(self.delete_volume, ctxt,
volume['id'], volume=volume)
else:
# By default, delete volumes sequentially
self.delete_volume(ctxt, volume['id'], volume=volume)
LOG.info(_LI("Resume volume delete completed successfully."),
resource=volume)
# collect and publish service capabilities
self.publish_service_capabilities(ctxt)
LOG.info(_LI("Driver initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
def init_host_with_rpc(self):
LOG.info(_LI("Initializing RPC dependent components of volume "
"driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
stats = self.driver.get_volume_stats(refresh=True)
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
service = objects.Service.get_by_args(
context.get_admin_context(),
svc_host,
'cinder-volume')
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Service not found for updating "
"replication_status."))
if service.replication_status != (
fields.ReplicationStatus.FAILED_OVER):
if stats and stats.get('replication_enabled', False):
service.replication_status = fields.ReplicationStatus.ENABLED
else:
service.replication_status = fields.ReplicationStatus.DISABLED
service.save()
LOG.info(_LI("Driver post RPC initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
def is_working(self):
"""Return if Manager is ready to accept requests.
This is to inform Service class that in case of volume driver
initialization failure the manager is actually down and not ready to
accept any requests.
"""
return self.driver.initialized
def create_volume(self, context, volume_id, request_spec=None,
filter_properties=None, allow_reschedule=True,
volume=None):
"""Creates the volume."""
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
context_elevated = context.elevated()
if filter_properties is None:
filter_properties = {}
if request_spec is None:
request_spec = {}
try:
# NOTE(flaper87): Driver initialization is
# verified by the task itself.
flow_engine = create_volume.get_flow(
context_elevated,
self,
self.db,
self.driver,
self.scheduler_rpcapi,
self.host,
volume.id,
allow_reschedule,
context,
request_spec,
filter_properties,
image_volume_cache=self.image_volume_cache,
)
except Exception:
msg = _("Create manager volume flow failed.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
raise exception.CinderException(msg)
snapshot_id = request_spec.get('snapshot_id')
source_volid = request_spec.get('source_volid')
source_replicaid = request_spec.get('source_replicaid')
if snapshot_id is not None:
# Make sure the snapshot is not deleted until we are done with it.
locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot')
elif source_volid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_volid, 'delete_volume')
elif source_replicaid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_replicaid, 'delete_volume')
else:
locked_action = None
def _run_flow():
# This code executes create volume flow. If something goes wrong,
# flow reverts all job that was done and reraises an exception.
# Otherwise, all data that was generated by flow becomes available
# in flow engine's storage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
@utils.synchronized(locked_action, external=True)
def _run_flow_locked():
_run_flow()
# NOTE(dulek): Flag to indicate if volume was rescheduled. Used to
# decide if allocated_capacity should be incremented.
rescheduled = False
vol_ref = None
try:
if locked_action is None:
_run_flow()
else:
_run_flow_locked()
finally:
try:
vol_ref = flow_engine.storage.fetch('volume_ref')
except tfe.NotFound:
# If there's no vol_ref, then flow is reverted. Lets check out
# if rescheduling occurred.
try:
rescheduled = flow_engine.storage.get_revert_result(
create_volume.OnFailureRescheduleTask.make_name(
[create_volume.ACTION]))
except tfe.NotFound:
pass
if not rescheduled:
if not vol_ref:
# Flow was reverted and not rescheduled, fetching
# volume_ref from the DB, because it will be needed.
vol_ref = objects.Volume.get_by_id(context, volume.id)
# NOTE(dulek): Volume wasn't rescheduled so we need to update
# volume stats as these are decremented on delete.
self._update_allocated_capacity(vol_ref)
LOG.info(_LI("Created volume successfully."), resource=vol_ref)
return vol_ref.id
@locked_volume_operation
def delete_volume(self, context, volume_id,
unmanage_only=False,
volume=None,
cascade=False):
"""Deletes and unexports volume.
1. Delete a volume(normal case)
Delete a volume and update quotas.
2. Delete a migration volume
If deleting the volume in a migration, we want to skip
quotas but we need database updates for the volume.
"""
context = context.elevated()
try:
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
volume = objects.Volume.get_by_id(context, volume_id)
else:
volume.refresh()
except exception.VolumeNotFound:
# NOTE(thingee): It could be possible for a volume to
# be deleted when resuming deletes from init_host().
LOG.debug("Attempted delete of non-existent volume: %s", volume_id)
return
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
if volume['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_id)
if vol_utils.extract_host(volume.host) != self.host:
raise exception.InvalidVolume(
reason=_("volume is not local to this node"))
if unmanage_only and cascade:
# This could be done, but is ruled out for now just
# for simplicity.
raise exception.Invalid(
reason=_("Unmanage and cascade delete options "
"are mutually exclusive."))
# The status 'deleting' is not included, because it only applies to
# the source volume to be deleted after a migration. No quota
# needs to be handled for it.
is_migrating = volume.migration_status not in (None, 'error',
'success')
is_migrating_dest = (is_migrating and
volume.migration_status.startswith(
'target:'))
self._notify_about_volume_usage(context, volume, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context, volume)
if unmanage_only:
self.driver.unmanage(volume)
elif cascade:
LOG.debug('Performing cascade delete.')
snapshots = objects.SnapshotList.get_all_for_volume(context,
volume.id)
for s in snapshots:
if s.status != 'deleting':
self._clear_db(context, is_migrating_dest, volume,
'error_deleting')
msg = (_("Snapshot %(id)s was found in state "
"%(state)s rather than 'deleting' during "
"cascade delete.") % {'id': s.id,
'state': s.status})
raise exception.InvalidSnapshot(reason=msg)
self.delete_snapshot(context, s)
LOG.debug('Snapshots deleted, issuing volume delete')
self.driver.delete_volume(volume)
else:
self.driver.delete_volume(volume)
except exception.VolumeIsBusy:
LOG.error(_LE("Unable to delete busy volume."),
resource=volume)
# If this is a destination volume, we have to clear the database
# record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume,
'available')
return
except Exception:
with excutils.save_and_reraise_exception():
# If this is a destination volume, we have to clear the
# database record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume,
'error_deleting')
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
# Get reservations
try:
reserve_opts = {'volumes': -1,
'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting volume."),
resource=volume)
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
volume.destroy()
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
self._notify_about_volume_usage(context, volume, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
pool = vol_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume.host, 'pool', True)
size = volume.size
try:
self.stats['pools'][pool]['allocated_capacity_gb'] -= size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=-size)
self.publish_service_capabilities(context)
LOG.info(_LI("Deleted volume successfully."), resource=volume)
def _clear_db(self, context, is_migrating_dest, volume_ref, status):
# This method is called when driver.unmanage() or
# driver.delete_volume() fails in delete_volume(), so it is already
# in the exception handling part.
if is_migrating_dest:
volume_ref.destroy()
LOG.error(_LE("Unable to delete the destination volume "
"during volume migration, (NOTE: database "
"record needs to be deleted)."), resource=volume_ref)
else:
volume_ref.status = status
volume_ref.save()
def create_snapshot(self, context, volume_id, snapshot):
"""Creates and exports the snapshot."""
context = context.elevated()
self._notify_about_snapshot_usage(
context, snapshot, "create.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
model_update = self.driver.create_snapshot(snapshot)
if model_update:
snapshot.update(model_update)
snapshot.save()
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot.id, volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.exception(_LE("Failed updating snapshot"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id}, resource=snapshot)
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.progress = '100%'
snapshot.save()
self._notify_about_snapshot_usage(context, snapshot, "create.end")
LOG.info(_LI("Create snapshot completed successfully"),
resource=snapshot)
return snapshot.id
@locked_snapshot_operation
def delete_snapshot(self, context, snapshot, unmanage_only=False):
"""Deletes and unexports snapshot."""
context = context.elevated()
snapshot._context = context
project_id = snapshot.project_id
self._notify_about_snapshot_usage(
context, snapshot, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
snapshot.save()
if unmanage_only:
self.driver.unmanage_snapshot(snapshot)
else:
self.driver.delete_snapshot(snapshot)
except exception.SnapshotIsBusy:
LOG.error(_LE("Delete snapshot failed, due to snapshot busy."),
resource=snapshot)
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.save()
return
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR_DELETING
snapshot.save()
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot.volume_size,
}
volume_ref = self.db.volume_get(context, snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Update snapshot usages failed."),
resource=snapshot)
self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id)
snapshot.destroy()
self._notify_about_snapshot_usage(context, snapshot, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
LOG.info(_LI("Delete snapshot completed successfully"),
resource=snapshot)
def attach_volume(self, context, volume_id, instance_uuid, host_name,
mountpoint, mode):
"""Updates db to show volume is attached."""
@utils.synchronized(volume_id, external=True)
def do_attach():
# check the volume status before attaching
volume = self.db.volume_get(context, volume_id)
volume_metadata = self.db.volume_admin_metadata_get(
context.elevated(), volume_id)
if volume['status'] == 'attaching':
if (volume_metadata.get('attached_mode') and
volume_metadata.get('attached_mode') != mode):
raise exception.InvalidVolume(
reason=_("being attached by different mode"))
if (volume['status'] == 'in-use' and not volume['multiattach']
and not volume['migration_status']):
raise exception.InvalidVolume(
reason=_("volume is already attached"))
host_name_sanitized = utils.sanitize_hostname(
host_name) if host_name else None
if instance_uuid:
attachments = \
self.db.volume_attachment_get_all_by_instance_uuid(
context, volume_id, instance_uuid)
else:
attachments = (
self.db.volume_attachment_get_all_by_host(
context,
volume_id,
host_name_sanitized))
if attachments:
self.db.volume_update(context, volume_id,
{'status': 'in-use'})
return
self._notify_about_volume_usage(context, volume,
"attach.start")
values = {'volume_id': volume_id,
'attach_status': 'attaching', }
attachment = self.db.volume_attach(context.elevated(), values)
volume_metadata = self.db.volume_admin_metadata_update(
context.elevated(), volume_id,
{"attached_mode": mode}, False)
attachment_id = attachment['id']
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
self.db.volume_attachment_update(context, attachment_id,
{'attach_status':
'error_attaching'})
raise exception.InvalidUUID(uuid=instance_uuid)
volume = self.db.volume_get(context, volume_id)
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
self.db.volume_update(context, volume_id,
{'status': 'error_attaching'})
self.message_api.create(
context, defined_messages.ATTACH_READONLY_VOLUME,
context.project_id, resource_type=resource_types.VOLUME,
resource_uuid=volume_id)
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.debug('Attaching volume %(volume_id)s to instance '
'%(instance)s at mountpoint %(mount)s on host '
'%(host)s.',
{'volume_id': volume_id, 'instance': instance_uuid,
'mount': mountpoint, 'host': host_name_sanitized},
resource=volume)
self.driver.attach_volume(context,
volume,
instance_uuid,
host_name_sanitized,
mountpoint)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment_id,
{'attach_status': 'error_attaching'})
volume = self.db.volume_attached(context.elevated(),
attachment_id,
instance_uuid,
host_name_sanitized,
mountpoint,
mode)
self._notify_about_volume_usage(context, volume, "attach.end")
LOG.info(_LI("Attach volume completed successfully."),
resource=volume)
return self.db.volume_attachment_get(context, attachment_id)
return do_attach()
@locked_detach_operation
def detach_volume(self, context, volume_id, attachment_id=None):
"""Updates db to show volume is detached."""
# TODO(vish): refactor this into a more general "unreserve"
volume = self.db.volume_get(context, volume_id)
attachment = None
if attachment_id:
try:
attachment = self.db.volume_attachment_get(context,
attachment_id)
except exception.VolumeAttachmentNotFound:
LOG.info(_LI("Volume detach called, but volume not attached."),
resource=volume)
# We need to make sure the volume status is set to the correct
# status. It could be in detaching status now, and we don't
# want to leave it there.
self.db.volume_detached(context, volume_id, attachment_id)
return
else:
# We can try and degrade gracefully here by trying to detach
# a volume without the attachment_id here if the volume only has
# one attachment. This is for backwards compatibility.
attachments = self.db.volume_attachment_get_all_by_volume_id(
context, volume_id)
if len(attachments) > 1:
# There are more than 1 attachments for this volume
# we have to have an attachment id.
msg = _("Detach volume failed: More than one attachment, "
"but no attachment_id provided.")
LOG.error(msg, resource=volume)
raise exception.InvalidVolume(reason=msg)
elif len(attachments) == 1:
attachment = attachments[0]
else:
# there aren't any attachments for this volume.
# so set the status to available and move on.
LOG.info(_LI("Volume detach called, but volume not attached."),
resource=volume)
self.db.volume_update(context, volume_id,
{'status': 'available',
'attach_status': 'detached'})
return
self._notify_about_volume_usage(context, volume, "detach.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.debug('Detaching volume %(volume_id)s from instance '
'%(instance)s.',
{'volume_id': volume_id,
'instance': attachment.get('instance_uuid')},
resource=volume)
self.driver.detach_volume(context, volume, attachment)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment.get('id'),
{'attach_status': 'error_detaching'})
self.db.volume_detached(context.elevated(), volume_id,
attachment.get('id'))
self.db.volume_admin_metadata_delete(context.elevated(), volume_id,
'attached_mode')
# NOTE(jdg): We used to do an ensure export here to
# catch upgrades while volumes were attached (E->F)
# this was necessary to convert in-use volumes from
# int ID's to UUID's. Don't need this any longer
# We're going to remove the export here
# (delete the iscsi target)
volume = self.db.volume_get(context, volume_id)
try:
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context.elevated(), volume)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Detach volume failed, due to "
"uninitialized driver."),
resource=volume)
except Exception as ex:
LOG.exception(_LE("Detach volume failed, due to "
"remove-export failure."),
resource=volume)
raise exception.RemoveExportException(volume=volume_id,
reason=six.text_type(ex))
self._notify_about_volume_usage(context, volume, "detach.end")
LOG.info(_LI("Detach volume completed successfully."), resource=volume)
def _create_image_cache_volume_entry(self, ctx, volume_ref,
image_id, image_meta):
"""Create a new image-volume and cache entry for it.
This assumes that the image has already been downloaded and stored
in the volume described by the volume_ref.
"""
image_volume = None
try:
if not self.image_volume_cache.ensure_space(
ctx,
volume_ref['size'],
volume_ref['host']):
LOG.warning(_LW('Unable to ensure space for image-volume in'
' cache. Will skip creating entry for image'
' %(image)s on host %(host)s.'),
{'image': image_id, 'host': volume_ref['host']})
return
image_volume = self._clone_image_volume(ctx,
volume_ref,
image_meta)
if not image_volume:
LOG.warning(_LW('Unable to clone image_volume for image '
'%(image_id)s will not create cache entry.'),
{'image_id': image_id})
return
self.image_volume_cache.create_cache_entry(
ctx,
image_volume,
image_id,
image_meta
)
except exception.CinderException as e:
LOG.warning(_LW('Failed to create new image-volume cache entry.'
' Error: %(exception)s'), {'exception': e})
if image_volume:
self.delete_volume(ctx, image_volume.id)
def _clone_image_volume(self, ctx, volume, image_meta):
volume_type_id = volume.get('volume_type_id')
reserve_opts = {'volumes': 1, 'gigabytes': volume.size}
QUOTAS.add_volume_type_opts(ctx, reserve_opts, volume_type_id)
reservations = QUOTAS.reserve(ctx, **reserve_opts)
try:
new_vol_values = {k: volume[k] for k in set(volume.keys()) -
self._VOLUME_CLONE_SKIP_PROPERTIES}
new_vol_values['volume_type_id'] = volume_type_id
new_vol_values['attach_status'] = 'detached'
new_vol_values['status'] = 'creating'
new_vol_values['project_id'] = ctx.project_id
new_vol_values['display_name'] = 'image-%s' % image_meta['id']
new_vol_values['source_volid'] = volume.id
LOG.debug('Creating image volume entry: %s.', new_vol_values)
image_volume = objects.Volume(context=ctx, **new_vol_values)
image_volume.create()
except Exception as ex:
LOG.exception(_LE('Create clone_image_volume: %(volume_id)s'
'for image %(image_id)s, '
'failed (Exception: %(except)s)'),
{'volume_id': volume.id,
'image_id': image_meta['id'],
'except': ex})
QUOTAS.rollback(ctx, reservations)
return
QUOTAS.commit(ctx, reservations,
project_id=new_vol_values['project_id'])
try:
self.create_volume(ctx, image_volume.id,
allow_reschedule=False, volume=image_volume)
image_volume = self.db.volume_get(ctx, image_volume.id)
if image_volume.status != 'available':
raise exception.InvalidVolume(_('Volume is not available.'))
self.db.volume_admin_metadata_update(ctx.elevated(),
image_volume.id,
{'readonly': 'True'},
False)
return image_volume
except exception.CinderException:
LOG.exception(_LE('Failed to clone volume %(volume_id)s for '
'image %(image_id)s.'),
{'volume_id': volume.id,
'image_id': image_meta['id']})
try:
self.delete_volume(ctx, image_volume.id)
except exception.CinderException:
LOG.exception(_LE('Could not delete the image volume %(id)s.'),
{'id': volume.id})
return
def _clone_image_volume_and_add_location(self, ctx, volume, image_service,
image_meta):
"""Create a cloned volume and register its location to the image."""
if (image_meta['disk_format'] != 'raw' or
image_meta['container_format'] != 'bare'):
return False
image_volume_context = ctx
if self.driver.configuration.image_upload_use_internal_tenant:
internal_ctx = context.get_internal_tenant_context()
if internal_ctx:
image_volume_context = internal_ctx
image_volume = self._clone_image_volume(image_volume_context,
volume,
image_meta)
if not image_volume:
return False
uri = 'cinder://%s' % image_volume.id
image_registered = None
try:
image_registered = image_service.add_location(
ctx, image_meta['id'], uri, {})
except (exception.NotAuthorized, exception.Invalid,
exception.NotFound):
LOG.exception(_LE('Failed to register image volume location '
'%(uri)s.'), {'uri': uri})
if not image_registered:
LOG.warning(_LW('Registration of image volume URI %(uri)s '
'to image %(image_id)s failed.'),
{'uri': uri, 'image_id': image_meta['id']})
try:
self.delete_volume(image_volume_context, image_volume)
except exception.CinderException:
LOG.exception(_LE('Could not delete failed image volume '
'%(id)s.'), {'id': image_volume.id})
return False
image_volume_meta = {'glance_image_id': image_meta['id'],
'image_owner': ctx.project_id}
self.db.volume_metadata_update(image_volume_context,
image_volume.id,
image_volume_meta,
False)
return True
def copy_volume_to_image(self, context, volume_id, image_meta):
"""Uploads the specified volume to Glance.
image_meta is a dictionary containing the following keys:
'id', 'container_format', 'disk_format'
"""
payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
image_service = None
try:
volume = self.db.volume_get(context, volume_id)
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
image_service, image_id = \
glance.get_remote_image_service(context, image_meta['id'])
if (self.driver.configuration.image_upload_use_cinder_backend
and self._clone_image_volume_and_add_location(
context, volume, image_service, image_meta)):
LOG.debug("Registered image volume location to glance "
"image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
else:
self.driver.copy_volume_to_image(context, volume,
image_service, image_meta)
LOG.debug("Uploaded volume to glance image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
except Exception as error:
LOG.error(_LE("Upload volume to image encountered an error "
"(image-id: %(image_id)s)."),
{'image_id': image_meta['id']},
resource=volume)
if image_service is not None:
# Deletes the image if it is in queued or saving state
self._delete_image(context, image_meta['id'], image_service)
with excutils.save_and_reraise_exception():
payload['message'] = six.text_type(error)
if isinstance(error, exception.ImageLimitExceeded):
self.message_api.create(
context, defined_messages.IMAGE_FROM_VOLUME_OVER_QUOTA,
context.project_id,
resource_type=resource_types.VOLUME,
resource_uuid=volume_id)
finally:
self.db.volume_update_status_based_on_attachment(context,
volume_id)
LOG.info(_LI("Copy volume to image completed successfully."),
resource=volume)
def _delete_image(self, context, image_id, image_service):
"""Deletes an image stuck in queued or saving state."""
try:
image_meta = image_service.show(context, image_id)
image_status = image_meta.get('status')
if image_status == 'queued' or image_status == 'saving':
LOG.warning(_LW("Deleting image in unexpected status: "
"%(image_status)s."),
{'image_status': image_status},
resource={'type': 'image', 'id': image_id})
image_service.delete(context, image_id)
except Exception:
LOG.warning(_LW("Image delete encountered an error."),
exc_info=True, resource={'type': 'image',
'id': image_id})
def initialize_connection(self, context, volume_id, connector):
"""Prepare volume for connection from host represented by connector.
This method calls the driver initialize_connection and returns
it to the caller. The connector parameter is a dictionary with
information about the host that will connect to the volume in the
following format::
{
'ip': ip,
'initiator': initiator,
}
ip: the ip address of the connecting machine
initiator: the iscsi initiator name of the connecting machine.
This can be None if the connecting machine does not support iscsi
connections.
driver is responsible for doing any necessary security setup and
returning a connection_info dictionary in the following format::
{
'driver_volume_type': driver_volume_type,
'data': data,
}
driver_volume_type: a string to identify the type of volume. This
can be used by the calling code to determine the
strategy for connecting to the volume. This could
be 'iscsi', 'rbd', 'sheepdog', etc.
data: this is the data that the calling code will use to connect
to the volume. Keep in mind that this will be serialized to
json in various places, so it should not contain any non-json
data types.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume = self.db.volume_get(context, volume_id)
model_update = None
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate volume connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
model_update = self.driver.create_export(context.elevated(),
volume, connector)
except exception.CinderException:
err_msg = (_("Create export for volume failed."))
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
if model_update:
volume = self.db.volume_update(context,
volume_id,
model_update)
except exception.CinderException as ex:
LOG.exception(_LE("Model update failed."), resource=volume)
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn_info = self.driver.initialize_connection(volume, connector)
except Exception as err:
err_msg = (_("Driver initialize connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
self.driver.remove_export(context.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
# Add qos_specs to connection info
typeid = volume['volume_type_id']
specs = None
if typeid:
res = volume_types.get_volume_type_qos_specs(typeid)
qos = res['qos_specs']
# only pass qos_specs that is designated to be consumed by
# front-end, or both front-end and back-end.
if qos and qos.get('consumer') in ['front-end', 'both']:
specs = qos.get('specs')
qos_spec = dict(qos_specs=specs)
conn_info['data'].update(qos_spec)
# Add access_mode to connection info
volume_metadata = self.db.volume_admin_metadata_get(context.elevated(),
volume_id)
access_mode = volume_metadata.get('attached_mode')
if access_mode is None:
# NOTE(zhiyan): client didn't call 'os-attach' before
access_mode = ('ro'
if volume_metadata.get('readonly') == 'True'
else 'rw')
conn_info['data']['access_mode'] = access_mode
# Add encrypted flag to connection_info if not set in the driver.
if conn_info['data'].get('encrypted') is None:
encrypted = bool(volume.get('encryption_key_id'))
conn_info['data']['encrypted'] = encrypted
# Add discard flag to connection_info if not set in the driver and
# configured to be reported.
if conn_info['data'].get('discard') is None:
discard_supported = (self.driver.configuration
.safe_get('report_discard_supported'))
if discard_supported:
conn_info['data']['discard'] = True
LOG.info(_LI("Initialize volume connection completed successfully."),
resource=volume)
return conn_info
def terminate_connection(self, context, volume_id, connector, force=False):
"""Cleanup connection from host represented by connector.
The format of connector is the same as for initialize_connection.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.terminate_connection(volume_ref, connector,
force=force)
except Exception as err:
err_msg = (_('Terminate volume connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info(_LI("Terminate volume connection completed successfully."),
resource=volume_ref)
def remove_export(self, context, volume_id):
"""Removes an export for a volume."""
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.remove_export(context, volume_ref)
except Exception:
msg = _("Remove volume export failed.")
LOG.exception(msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("Remove volume export completed successfully."),
resource=volume_ref)
def accept_transfer(self, context, volume_id, new_user, new_project):
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
# NOTE(jdg): need elevated context as we haven't "given" the vol
# yet
volume_ref = self.db.volume_get(context.elevated(), volume_id)
# NOTE(jdg): Some drivers tie provider info (CHAP) to tenant
# for those that do allow them to return updated model info
model_update = self.driver.accept_transfer(context,
volume_ref,
new_user,
new_project)
if model_update:
try:
self.db.volume_update(context.elevated(),
volume_id,
model_update)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Update volume model for "
"transfer operation failed."),
resource=volume_ref)
self.db.volume_update(context.elevated(),
volume_id,
{'status': 'error'})
LOG.info(_LI("Transfer volume completed successfully."),
resource=volume_ref)
return model_update
def _connect_device(self, conn):
use_multipath = self.configuration.use_multipath_for_image_xfer
device_scan_attempts = self.configuration.num_volume_device_scan_tries
protocol = conn['driver_volume_type']
connector = utils.brick_get_connector(
protocol,
use_multipath=use_multipath,
device_scan_attempts=device_scan_attempts,
conn=conn)
vol_handle = connector.connect_volume(conn['data'])
root_access = True
if not connector.check_valid_device(vol_handle['path'], root_access):
if isinstance(vol_handle['path'], six.string_types):
raise exception.DeviceUnavailable(
path=vol_handle['path'],
reason=(_("Unable to access the backend storage via the "
"path %(path)s.") %
{'path': vol_handle['path']}))
else:
raise exception.DeviceUnavailable(
path=None,
reason=(_("Unable to access the backend storage via file "
"handle.")))
return {'conn': conn, 'device': vol_handle, 'connector': connector}
def _attach_volume(self, ctxt, volume, properties, remote=False):
status = volume['status']
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
try:
conn = rpcapi.initialize_connection(ctxt, volume, properties)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach volume %(vol)s."),
{'vol': volume['id']})
self.db.volume_update(ctxt, volume['id'],
{'status': status})
else:
conn = self.initialize_connection(ctxt, volume['id'], properties)
return self._connect_device(conn)
def _detach_volume(self, ctxt, attach_info, volume, properties,
force=False, remote=False):
connector = attach_info['connector']
connector.disconnect_volume(attach_info['conn']['data'],
attach_info['device'])
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.terminate_connection(ctxt, volume, properties, force=force)
rpcapi.remove_export(ctxt, volume)
else:
try:
self.terminate_connection(ctxt, volume['id'], properties,
force=force)
self.remove_export(ctxt, volume['id'])
except Exception as err:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Unable to terminate volume connection: '
'%(err)s.') % {'err': err})
def _copy_volume_data(self, ctxt, src_vol, dest_vol, remote=None):
"""Copy data from src_vol to dest_vol."""
LOG.debug('copy_data_between_volumes %(src)s -> %(dest)s.',
{'src': src_vol['name'], 'dest': dest_vol['name']})
properties = utils.brick_get_connector_properties()
dest_remote = remote in ['dest', 'both']
dest_attach_info = self._attach_volume(ctxt, dest_vol, properties,
remote=dest_remote)
try:
src_remote = remote in ['src', 'both']
src_attach_info = self._attach_volume(ctxt, src_vol, properties,
remote=src_remote)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach source volume for copy."))
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, remote=dest_remote)
# Check the backend capabilities of migration destination host.
rpcapi = volume_rpcapi.VolumeAPI()
capabilities = rpcapi.get_capabilities(ctxt, dest_vol['host'],
False)
sparse_copy_volume = bool(capabilities and
capabilities.get('sparse_copy_volume',
False))
copy_error = True
try:
size_in_mb = int(src_vol['size']) * units.Ki # vol size is in GB
vol_utils.copy_volume(src_attach_info['device']['path'],
dest_attach_info['device']['path'],
size_in_mb,
self.configuration.volume_dd_blocksize,
sparse=sparse_copy_volume)
copy_error = False
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to copy volume %(src)s to %(dest)s."),
{'src': src_vol['id'], 'dest': dest_vol['id']})
finally:
try:
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, force=copy_error,
remote=dest_remote)
finally:
self._detach_volume(ctxt, src_attach_info, src_vol,
properties, force=copy_error,
remote=src_remote)
def _migrate_volume_generic(self, ctxt, volume, host, new_type_id):
rpcapi = volume_rpcapi.VolumeAPI()
# Create new volume on remote host
skip = self._VOLUME_CLONE_SKIP_PROPERTIES | {'host'}
new_vol_values = {k: volume[k] for k in set(volume.keys()) - skip}
if new_type_id:
new_vol_values['volume_type_id'] = new_type_id
new_volume = objects.Volume(
context=ctxt,
host=host['host'],
status='creating',
attach_status='detached',
migration_status='target:%s' % volume['id'],
**new_vol_values
)
new_volume.create()
rpcapi.create_volume(ctxt, new_volume, host['host'],
None, None, allow_reschedule=False)
# Wait for new_volume to become ready
starttime = time.time()
deadline = starttime + CONF.migration_create_volume_timeout_secs
# TODO(thangp): Replace get_by_id with refresh when it is available
new_volume = objects.Volume.get_by_id(ctxt, new_volume.id)
tries = 0
while new_volume.status != 'available':
tries += 1
now = time.time()
if new_volume.status == 'error':
msg = _("failed to create new_volume on destination host")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
elif now > deadline:
msg = _("timeout creating new_volume on destination host")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
else:
time.sleep(tries ** 2)
# TODO(thangp): Replace get_by_id with refresh when it is
# available
new_volume = objects.Volume.get_by_id(ctxt, new_volume.id)
# Copy the source volume to the destination volume
try:
attachments = volume.volume_attachment
if not attachments:
# Pre- and post-copy driver-specific actions
self.driver.before_volume_copy(ctxt, volume, new_volume,
remote='dest')
self._copy_volume_data(ctxt, volume, new_volume, remote='dest')
self.driver.after_volume_copy(ctxt, volume, new_volume,
remote='dest')
# The above call is synchronous so we complete the migration
self.migrate_volume_completion(ctxt, volume.id,
new_volume.id,
error=False,
volume=volume,
new_volume=new_volume)
else:
nova_api = compute.API()
# This is an async call to Nova, which will call the completion
# when it's done
for attachment in attachments:
instance_uuid = attachment['instance_uuid']
nova_api.update_server_volume(ctxt, instance_uuid,
volume.id,
new_volume.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE(
"Failed to copy volume %(vol1)s to %(vol2)s"), {
'vol1': volume.id, 'vol2': new_volume.id})
self._clean_temporary_volume(ctxt, volume,
new_volume)
def _clean_temporary_volume(self, ctxt, volume, new_volume,
clean_db_only=False):
# If we're in the migrating phase, we need to cleanup
# destination volume because source volume is remaining
if volume.migration_status == 'migrating':
try:
if clean_db_only:
# The temporary volume is not created, only DB data
# is created
new_volume.destroy()
else:
# The temporary volume is already created
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.delete_volume(ctxt, new_volume)
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find the temporary volume "
"%(vol)s in the database. There is no need "
"to clean up this volume."),
{'vol': new_volume.id})
else:
# If we're in the completing phase don't delete the
# destination because we may have already deleted the
# source! But the migration_status in database should
# be cleared to handle volume after migration failure
try:
new_volume.migration_status = None
new_volume.save()
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find destination volume "
"%(vol)s in the database. The entry might be "
"successfully deleted during migration "
"completion phase."),
{'vol': new_volume.id})
LOG.warning(_LW("Failed to migrate volume. The destination "
"volume %(vol)s is not deleted since the "
"source volume may have been deleted."),
{'vol': new_volume.id})
def migrate_volume_completion(self, ctxt, volume_id, new_volume_id,
error=False, volume=None, new_volume=None):
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None or new_volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(ctxt, volume_id)
new_volume = objects.Volume.get_by_id(ctxt, new_volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
LOG.debug("migrate_volume_completion: completing migration for "
"volume %(vol1)s (temporary volume %(vol2)s",
{'vol1': volume.id, 'vol2': new_volume.id})
rpcapi = volume_rpcapi.VolumeAPI()
orig_volume_status = volume.previous_status
if error:
LOG.info(_LI("migrate_volume_completion is cleaning up an error "
"for volume %(vol1)s (temporary volume %(vol2)s"),
{'vol1': volume['id'], 'vol2': new_volume.id})
rpcapi.delete_volume(ctxt, new_volume)
updates = {'migration_status': 'error',
'status': orig_volume_status}
volume.update(updates)
volume.save()
return volume.id
volume.migration_status = 'completing'
volume.save()
# Detach the source volume (if it fails, don't fail the migration)
try:
if orig_volume_status == 'in-use':
attachments = volume.volume_attachment
for attachment in attachments:
self.detach_volume(ctxt, volume.id, attachment['id'])
except Exception as ex:
LOG.error(_LE("Detach migration source volume failed: %(err)s"),
{'err': ex}, resource=volume)
# Give driver (new_volume) a chance to update things as needed
# after a successful migration.
# Note this needs to go through rpc to the host of the new volume
# the current host and driver object is for the "existing" volume.
rpcapi.update_migrated_volume(ctxt, volume, new_volume,
orig_volume_status)
volume.refresh()
new_volume.refresh()
# Swap src and dest DB records so we can continue using the src id and
# asynchronously delete the destination id
updated_new = volume.finish_volume_migration(new_volume)
updates = {'status': orig_volume_status,
'previous_status': volume.status,
'migration_status': 'success'}
if orig_volume_status == 'in-use':
attachments = volume.volume_attachment
for attachment in attachments:
rpcapi.attach_volume(ctxt, volume,
attachment['instance_uuid'],
attachment['attached_host'],
attachment['mountpoint'],
'rw')
volume.update(updates)
volume.save()
# Asynchronous deletion of the source volume in the back-end (now
# pointed by the target volume id)
try:
rpcapi.delete_volume(ctxt, updated_new)
except Exception as ex:
LOG.error(_LE('Failed to request async delete of migration source '
'vol %(vol)s: %(err)s'),
{'vol': volume.id, 'err': ex})
LOG.info(_LI("Complete-Migrate volume completed successfully."),
resource=volume)
return volume.id
def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False,
new_type_id=None, volume=None):
"""Migrate the volume to the specified host (called on source host)."""
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(ctxt, volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
model_update = None
moved = False
status_update = None
if volume.status in ('retyping', 'maintenance'):
status_update = {'status': volume.previous_status}
volume.migration_status = 'migrating'
volume.save()
if not force_host_copy and new_type_id is None:
try:
LOG.debug("Issue driver.migrate_volume.", resource=volume)
moved, model_update = self.driver.migrate_volume(ctxt,
volume,
host)
if moved:
updates = {'host': host['host'],
'migration_status': 'success',
'previous_status': volume.status}
if status_update:
updates.update(status_update)
if model_update:
updates.update(model_update)
volume.update(updates)
volume.save()
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
if not moved:
try:
self._migrate_volume_generic(ctxt, volume, host,
new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
LOG.info(_LI("Migrate volume completed successfully."),
resource=volume)
@periodic_task.periodic_task
def _report_driver_status(self, context):
if not self.driver.initialized:
if self.driver.configuration.config_group is None:
config_group = ''
else:
config_group = ('(config name %s)' %
self.driver.configuration.config_group)
LOG.warning(_LW("Update driver status failed: %(config_group)s "
"is uninitialized."),
{'config_group': config_group},
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
else:
volume_stats = self.driver.get_volume_stats(refresh=True)
if self.extra_capabilities:
volume_stats.update(self.extra_capabilities)
if volume_stats:
# Append volume stats with 'allocated_capacity_gb'
self._append_volume_stats(volume_stats)
# Append filter and goodness function if needed
volume_stats = (
self._append_filter_goodness_functions(volume_stats))
# queue it to be sent to the Schedulers.
self.update_service_capabilities(volume_stats)
def _append_volume_stats(self, vol_stats):
pools = vol_stats.get('pools', None)
if pools and isinstance(pools, list):
for pool in pools:
pool_name = pool['pool_name']
try:
pool_stats = self.stats['pools'][pool_name]
except KeyError:
# Pool not found in volume manager
pool_stats = dict(allocated_capacity_gb=0)
pool.update(pool_stats)
def _append_filter_goodness_functions(self, volume_stats):
"""Returns volume_stats updated as needed."""
# Append filter_function if needed
if 'filter_function' not in volume_stats:
volume_stats['filter_function'] = (
self.driver.get_filter_function())
# Append goodness_function if needed
if 'goodness_function' not in volume_stats:
volume_stats['goodness_function'] = (
self.driver.get_goodness_function())
return volume_stats
def publish_service_capabilities(self, context):
"""Collect driver status and then publish."""
self._report_driver_status(context)
self._publish_service_capabilities(context)
def _notify_about_volume_usage(self,
context,
volume,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_snapshot_usage(self,
context,
snapshot,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_consistencygroup_usage(self,
context,
group,
event_suffix,
volumes=None,
extra_usage_info=None):
vol_utils.notify_about_consistencygroup_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not volumes:
volumes = self.db.volume_get_all_by_group(context, group.id)
if volumes:
for volume in volumes:
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_cgsnapshot_usage(self,
context,
cgsnapshot,
event_suffix,
snapshots=None,
extra_usage_info=None):
vol_utils.notify_about_cgsnapshot_usage(
context, cgsnapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not snapshots:
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
if snapshots:
for snapshot in snapshots:
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def extend_volume(self, context, volume_id, new_size, reservations,
volume=None):
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.status = 'error_extending'
volume.save()
project_id = volume.project_id
size_increase = (int(new_size)) - volume.size
self._notify_about_volume_usage(context, volume, "resize.start")
try:
self.driver.extend_volume(volume, new_size)
except Exception:
LOG.exception(_LE("Extend volume failed."),
resource=volume)
try:
self.db.volume_update(context, volume.id,
{'status': 'error_extending'})
raise exception.CinderException(_("Volume %s: Error trying "
"to extend volume") %
volume.id)
finally:
QUOTAS.rollback(context, reservations, project_id=project_id)
return
QUOTAS.commit(context, reservations, project_id=project_id)
volume.update({'size': int(new_size), 'status': 'available'})
volume.save()
pool = vol_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume.host, 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += size_increase
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=size_increase)
self._notify_about_volume_usage(
context, volume, "resize.end",
extra_usage_info={'size': int(new_size)})
LOG.info(_LI("Extend volume completed successfully."),
resource=volume)
def retype(self, ctxt, volume_id, new_type_id, host,
migration_policy='never', reservations=None,
volume=None, old_reservations=None):
def _retype_error(context, volume, old_reservations,
new_reservations, status_update):
try:
volume.update(status_update)
volume.save()
finally:
QUOTAS.rollback(context, old_reservations)
QUOTAS.rollback(context, new_reservations)
context = ctxt.elevated()
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
status_update = {'status': volume.previous_status}
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
# NOTE(flaper87): Other exceptions in this method don't
# set the volume status to error. Should that be done
# here? Setting the volume back to it's original status
# for now.
volume.update(status_update)
volume.save()
# If old_reservations has been passed in from the API, we should
# skip quotas.
# TODO(ntpttr): These reservation checks are left in to be backwards
# compatible with Liberty and can be removed in N.
if not old_reservations:
# Get old reservations
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
# NOTE(wanghao): We don't need to reserve volumes and gigabytes
# quota for retyping operation since they didn't changed, just
# reserve volume_type and type gigabytes is fine.
reserve_opts.pop('volumes')
reserve_opts.pop('gigabytes')
old_reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
volume.update(status_update)
volume.save()
msg = _("Failed to update quota usage while retyping volume.")
LOG.exception(msg, resource=volume)
raise exception.CinderException(msg)
# We already got the new reservations
new_reservations = reservations
# If volume types have the same contents, no need to do anything
retyped = False
diff, all_equal = volume_types.volume_types_diff(
context, volume.volume_type_id, new_type_id)
if all_equal:
retyped = True
# Call driver to try and change the type
retype_model_update = None
# NOTE(jdg): Check to see if the destination host is the same
# as the current. If it's not don't call the driver.retype
# method, otherwise drivers that implement retype may report
# success, but it's invalid in the case of a migrate.
# We assume that those that support pools do this internally
# so we strip off the pools designation
if (not retyped and
vol_utils.hosts_are_equivalent(self.driver.host,
host['host'])):
try:
new_type = volume_types.get_volume_type(context, new_type_id)
ret = self.driver.retype(context,
volume,
new_type,
diff,
host)
# Check if the driver retype provided a model update or
# just a retype indication
if type(ret) == tuple:
retyped, retype_model_update = ret
else:
retyped = ret
if retyped:
LOG.info(_LI("Volume %s: retyped successfully"), volume.id)
except Exception:
retyped = False
LOG.exception(_LE("Volume %s: driver error when trying to "
"retype, falling back to generic "
"mechanism."), volume.id)
# We could not change the type, so we need to migrate the volume, where
# the destination volume will be of the new type
if not retyped:
if migration_policy == 'never':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Retype requires migration but is not allowed.")
raise exception.VolumeMigrationFailed(reason=msg)
snaps = objects.SnapshotList.get_all_for_volume(context,
volume.id)
if snaps:
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not have snapshots.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Don't allow volume with replicas to be migrated
rep_status = volume.replication_status
if rep_status is not None and rep_status != 'disabled':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not be replicated.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
volume.migration_status = 'starting'
volume.save()
try:
self.migrate_volume(context, volume.id, host,
new_type_id=new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
else:
model_update = {'volume_type_id': new_type_id,
'host': host['host'],
'status': status_update['status']}
if retype_model_update:
model_update.update(retype_model_update)
volume.update(model_update)
volume.save()
if old_reservations:
QUOTAS.commit(context, old_reservations, project_id=project_id)
if new_reservations:
QUOTAS.commit(context, new_reservations, project_id=project_id)
self._notify_about_volume_usage(
context, volume, "retype",
extra_usage_info={'volume_type': new_type_id})
self.publish_service_capabilities(context)
LOG.info(_LI("Retype volume completed successfully."),
resource=volume)
def manage_existing(self, ctxt, volume_id, ref=None):
try:
flow_engine = manage_existing.get_flow(
ctxt,
self.db,
self.driver,
self.host,
volume_id,
ref)
except Exception:
msg = _("Failed to create manage_existing flow.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume_id})
raise exception.CinderException(msg)
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# Fetch created volume from storage
vol_ref = flow_engine.storage.fetch('volume')
# Update volume stats
pool = vol_utils.extract_host(vol_ref['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
vol_ref['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] \
+= vol_ref['size']
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=vol_ref['size'])
LOG.info(_LI("Manage existing volume completed successfully."),
resource=vol_ref)
return vol_ref['id']
def promote_replica(self, ctxt, volume_id):
"""Promote volume replica secondary to be the primary volume."""
volume = self.db.volume_get(ctxt, volume_id)
model_update = None
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Promote volume replica failed."),
resource=volume)
try:
model_update = self.driver.promote_replica(ctxt, volume)
except exception.CinderException:
err_msg = (_('Error promoting secondary volume to primary'))
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
try:
if model_update:
volume = self.db.volume_update(ctxt,
volume_id,
model_update)
except exception.CinderException:
err_msg = (_("Failed updating model"
" with driver provided model %(model)s") %
{'model': model_update})
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
LOG.info(_LI("Promote volume replica completed successfully."),
resource=volume)
def reenable_replication(self, ctxt, volume_id):
"""Re-enable replication of secondary volume with primary volumes."""
volume = self.db.volume_get(ctxt, volume_id)
model_update = None
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Sync volume replica failed."),
resource=volume)
try:
model_update = self.driver.reenable_replication(ctxt, volume)
except exception.CinderException:
err_msg = (_("Synchronizing secondary volume to primary failed."))
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
try:
if model_update:
volume = self.db.volume_update(ctxt,
volume_id,
model_update)
except exception.CinderException:
err_msg = (_("Failed updating model"
" with driver provided model %(model)s") %
{'model': model_update})
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
def _update_replication_relationship_status(self, ctxt):
# Only want volumes that do not have a 'disabled' replication status
filters = {'replication_status': ['active', 'copying', 'error',
'active-stopped', 'inactive']}
volumes = self.db.volume_get_all_by_host(ctxt, self.host,
filters=filters)
for vol in volumes:
model_update = None
try:
model_update = self.driver.get_replication_status(
ctxt, vol)
if model_update:
self.db.volume_update(ctxt, vol['id'], model_update)
except Exception:
LOG.exception(_LE("Get replication status for volume failed."),
resource=vol)
def create_consistencygroup(self, context, group):
"""Creates the consistency group."""
context = context.elevated()
status = fields.ConsistencyGroupStatus.AVAILABLE
model_update = None
self._notify_about_consistencygroup_usage(
context, group, "create.start")
try:
utils.require_driver_initialized(self.driver)
LOG.info(_LI("Consistency group %s: creating"), group.name)
model_update = self.driver.create_consistencygroup(context,
group)
if model_update:
if (model_update['status'] ==
fields.ConsistencyGroupStatus.ERROR):
msg = (_('Create consistency group failed.'))
LOG.error(msg,
resource={'type': 'consistency_group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = fields.ConsistencyGroupStatus.ERROR
group.save()
LOG.error(_LE("Consistency group %s: create failed"),
group.name)
group.status = status
group.created_at = timeutils.utcnow()
group.save()
LOG.info(_LI("Consistency group %s: created successfully"),
group.name)
self._notify_about_consistencygroup_usage(
context, group, "create.end")
LOG.info(_LI("Create consistency group completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
return group
def create_consistencygroup_from_src(self, context, group,
cgsnapshot=None, source_cg=None):
"""Creates the consistency group from source.
The source can be a CG snapshot or a source CG.
"""
source_name = None
snapshots = None
source_vols = None
try:
volumes = self.db.volume_get_all_by_group(context, group.id)
if cgsnapshot:
try:
# Check if cgsnapshot still exists
cgsnapshot = objects.CGSnapshot.get_by_id(
context, cgsnapshot.id)
except exception.CgSnapshotNotFound:
LOG.error(_LE("Create consistency group "
"from snapshot-%(snap)s failed: "
"SnapshotNotFound."),
{'snap': cgsnapshot.id},
resource={'type': 'consistency_group',
'id': group.id})
raise
source_name = _("snapshot-%s") % cgsnapshot.id
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
for snap in snapshots:
if (snap.status not in
VALID_CREATE_CG_SRC_SNAP_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because snapshot %(snap)s is "
"not in a valid state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'snap': snap['id'],
'valid': VALID_CREATE_CG_SRC_SNAP_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
if source_cg:
try:
source_cg = objects.ConsistencyGroup.get_by_id(
context, source_cg.id)
except exception.ConsistencyGroupNotFound:
LOG.error(_LE("Create consistency group "
"from source cg-%(cg)s failed: "
"ConsistencyGroupNotFound."),
{'cg': source_cg.id},
resource={'type': 'consistency_group',
'id': group.id})
raise
source_name = _("cg-%s") % source_cg.id
source_vols = self.db.volume_get_all_by_group(
context, source_cg.id)
for source_vol in source_vols:
if (source_vol['status'] not in
VALID_CREATE_CG_SRC_CG_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because source volume "
"%(source_vol)s is not in a valid "
"state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'source_vol': source_vol['id'],
'valid': VALID_CREATE_CG_SRC_CG_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes.
sorted_snapshots = None
if cgsnapshot and snapshots:
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
# Sort source volumes so that they are in the same order as their
# corresponding target volumes.
sorted_source_vols = None
if source_cg and source_vols:
sorted_source_vols = self._sort_source_vols(volumes,
source_vols)
self._notify_about_consistencygroup_usage(
context, group, "create.start")
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
context, group, volumes, cgsnapshot,
sorted_snapshots, source_cg, sorted_source_vols))
if volumes_model_update:
for update in volumes_model_update:
self.db.volume_update(context, update['id'], update)
if model_update:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
LOG.error(_LE("Create consistency group "
"from source %(source)s failed."),
{'source': source_name},
resource={'type': 'consistency_group',
'id': group.id})
# Update volume status to 'error' as well.
for vol in volumes:
self.db.volume_update(
context, vol['id'], {'status': 'error'})
now = timeutils.utcnow()
status = 'available'
for vol in volumes:
update = {'status': status, 'created_at': now}
self._update_volume_from_src(context, vol, update, group=group)
self._update_allocated_capacity(vol)
group.status = status
group.created_at = now
group.save()
self._notify_about_consistencygroup_usage(
context, group, "create.end")
LOG.info(_LI("Create consistency group "
"from source-%(source)s completed successfully."),
{'source': source_name},
resource={'type': 'consistency_group',
'id': group.id})
return group
def _sort_snapshots(self, volumes, snapshots):
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes. Each source snapshot in the snapshots
# list should have a corresponding target volume in the volumes list.
if not volumes or not snapshots or len(volumes) != len(snapshots):
msg = _("Input volumes or snapshots are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_snapshots = []
for vol in volumes:
found_snaps = [snap for snap in snapshots
if snap['id'] == vol['snapshot_id']]
if not found_snaps:
LOG.error(_LE("Source snapshot cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.SnapshotNotFound(
snapshot_id=vol['snapshot_id'])
sorted_snapshots.extend(found_snaps)
return sorted_snapshots
def _sort_source_vols(self, volumes, source_vols):
# Sort source volumes so that they are in the same order as their
# corresponding target volumes. Each source volume in the source_vols
# list should have a corresponding target volume in the volumes list.
if not volumes or not source_vols or len(volumes) != len(source_vols):
msg = _("Input volumes or source volumes are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_source_vols = []
for vol in volumes:
found_source_vols = [source_vol for source_vol in source_vols
if source_vol['id'] == vol['source_volid']]
if not found_source_vols:
LOG.error(_LE("Source volumes cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.VolumeNotFound(
volume_id=vol['source_volid'])
sorted_source_vols.extend(found_source_vols)
return sorted_source_vols
def _update_volume_from_src(self, context, vol, update, group=None):
try:
snapshot_id = vol.get('snapshot_id')
if snapshot_id:
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
orig_vref = self.db.volume_get(context,
snapshot.volume_id)
if orig_vref.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_to_volume(
context, vol['id'], snapshot_id)
except exception.SnapshotNotFound:
LOG.error(_LE("Source snapshot %(snapshot_id)s cannot be found."),
{'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise
except exception.VolumeNotFound:
LOG.error(_LE("The source volume %(volume_id)s "
"cannot be found."),
{'volume_id': snapshot.volume_id})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise
except exception.CinderException as ex:
LOG.error(_LE("Failed to update %(volume_id)s"
" metadata using the provided snapshot"
" %(snapshot_id)s metadata."),
{'volume_id': vol['id'],
'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
self.db.volume_update(context, vol['id'], update)
def _update_allocated_capacity(self, vol):
# Update allocated capacity in volume stats
pool = vol_utils.extract_host(vol['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
vol['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += (
vol['size'])
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=vol['size'])
def delete_consistencygroup(self, context, group):
"""Deletes consistency group and the volumes in the group."""
context = context.elevated()
project_id = group.project_id
if context.project_id != group.project_id:
project_id = group.project_id
else:
project_id = context.project_id
volumes = self.db.volume_get_all_by_group(context, group.id)
for volume_ref in volumes:
if volume_ref['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_ref['id'])
# self.host is 'host@backend'
# volume_ref['host'] is 'host@backend#pool'
# Extract host before doing comparison
if volume_ref['host']:
new_host = vol_utils.extract_host(volume_ref['host'])
if new_host != self.host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node"))
self._notify_about_consistencygroup_usage(
context, group, "delete.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.delete_consistencygroup(context, group, volumes))
if volumes_model_update:
for volume in volumes_model_update:
update = {'status': volume['status']}
self.db.volume_update(context, volume['id'],
update)
# If we failed to delete a volume, make sure the status
# for the cg is set to error as well
if (volume['status'] in ['error_deleting', 'error'] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = volume['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Delete consistency group failed.'))
LOG.error(msg,
resource={'type': 'consistency_group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
# Update volume status to 'error' if driver returns
# None for volumes_model_update.
if not volumes_model_update:
for vol in volumes:
self.db.volume_update(
context, vol['id'], {'status': 'error'})
# Get reservations for group
try:
reserve_opts = {'consistencygroups': -1}
cgreservations = CGQUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
cgreservations = None
LOG.exception(_LE("Delete consistency group "
"failed to update usages."),
resource={'type': 'consistency_group',
'id': group.id})
for volume_ref in volumes:
# Get reservations for volume
try:
volume_id = volume_ref['id']
reserve_opts = {'volumes': -1,
'gigabytes': -volume_ref['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Delete consistency group "
"failed to update usages."),
resource={'type': 'consistency_group',
'id': group.id})
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
self.db.volume_destroy(context, volume_id)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= volume_ref['size']
if cgreservations:
CGQUOTAS.commit(context, cgreservations,
project_id=project_id)
group.destroy()
self._notify_about_consistencygroup_usage(
context, group, "delete.end", volumes)
self.publish_service_capabilities(context)
LOG.info(_LI("Delete consistency group "
"completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates consistency group.
Update consistency group by adding volumes to the group,
or removing volumes from the group.
"""
add_volumes_ref = []
remove_volumes_ref = []
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes_list = remove_volumes.split(',')
for add_vol in add_volumes_list:
try:
add_vol_ref = self.db.volume_get(context, add_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update consistency group "
"failed to add volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': add_vol_ref['id']},
resource={'type': 'consistency_group',
'id': group.id})
raise
if add_vol_ref['status'] not in VALID_ADD_VOL_TO_CG_STATUS:
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id,
'status': add_vol_ref['status'],
'valid': VALID_ADD_VOL_TO_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
# self.host is 'host@backend'
# volume_ref['host'] is 'host@backend#pool'
# Extract host before doing comparison
new_host = vol_utils.extract_host(add_vol_ref['host'])
if new_host != self.host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node."))
add_volumes_ref.append(add_vol_ref)
for remove_vol in remove_volumes_list:
try:
remove_vol_ref = self.db.volume_get(context, remove_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update consistency group "
"failed to remove volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': remove_vol_ref['id']},
resource={'type': 'consistency_group',
'id': group.id})
raise
if remove_vol_ref['status'] not in VALID_REMOVE_VOL_FROM_CG_STATUS:
msg = (_("Cannot remove volume %(volume_id)s from consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': remove_vol_ref['id'],
'group_id': group.id,
'status': remove_vol_ref['status'],
'valid': VALID_REMOVE_VOL_FROM_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
remove_volumes_ref.append(remove_vol_ref)
self._notify_about_consistencygroup_usage(
context, group, "update.start")
try:
utils.require_driver_initialized(self.driver)
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
if add_volumes_update:
for update in add_volumes_update:
self.db.volume_update(context, update['id'], update)
if remove_volumes_update:
for update in remove_volumes_update:
self.db.volume_update(context, update['id'], update)
if model_update:
if model_update['status'] in (
[fields.ConsistencyGroupStatus.ERROR]):
msg = (_('Error occurred when updating consistency group '
'%s.') % group.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group.update(model_update)
group.save()
except exception.VolumeDriverException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred in the volume driver when "
"updating consistency group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when updating consistency "
"group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
now = timeutils.utcnow()
group.status = 'available'
group.update_at = now
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'consistencygroup_id': group.id,
'updated_at': now})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'consistencygroup_id': None,
'updated_at': now})
self._notify_about_consistencygroup_usage(
context, group, "update.end")
LOG.info(_LI("Update consistency group "
"completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
def create_cgsnapshot(self, context, cgsnapshot):
"""Creates the cgsnapshot."""
caller_context = context
context = context.elevated()
LOG.info(_LI("Cgsnapshot %s: creating."), cgsnapshot.id)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "create.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Cgsnapshot %(cgsnap_id)s: creating.",
{'cgsnap_id': cgsnapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
model_update, snapshots_model_update = (
self.driver.create_cgsnapshot(context, cgsnapshot,
snapshots))
if snapshots_model_update:
for snap_model in snapshots_model_update:
# Update db for snapshot.
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
self.db.snapshot_update(context,
snap_model['id'],
snap_model)
if (snap_model['status'] in [
fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] == 'error':
msg = (_('Error occurred when creating cgsnapshot '
'%s.') % cgsnapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
cgsnapshot.update(model_update)
cgsnapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
cgsnapshot.status = 'error'
cgsnapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
volume_id = snapshot['volume_id']
snapshot_id = snapshot['id']
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot_id, volume_id)
except exception.CinderException as ex:
LOG.error(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
# TODO(thangp): Switch over to use snapshot.update()
# after cgsnapshot-objects bugs are fixed
self.db.snapshot_update(
context, snapshot_id, {
'status': fields.SnapshotStatus.ERROR})
raise exception.MetadataCopyFailure(
reason=six.text_type(ex))
self.db.snapshot_update(context,
snapshot['id'],
{'status': fields.SnapshotStatus.AVAILABLE,
'progress': '100%'})
cgsnapshot.status = 'available'
cgsnapshot.save()
LOG.info(_LI("cgsnapshot %s: created successfully"),
cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "create.end")
return cgsnapshot
def delete_cgsnapshot(self, context, cgsnapshot):
"""Deletes cgsnapshot."""
caller_context = context
context = context.elevated()
project_id = cgsnapshot.project_id
LOG.info(_LI("cgsnapshot %s: deleting"), cgsnapshot.id)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "delete.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("cgsnapshot %(cgsnap_id)s: deleting",
{'cgsnap_id': cgsnapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(context, cgsnapshot,
snapshots))
if snapshots_model_update:
for snap_model in snapshots_model_update:
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap = next((item for item in snapshots if
item.id == snap_model['id']), None)
if snap:
snap.status = snap_model['status']
snap.save()
if (snap_model['status'] in
[fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting cgsnapshot '
'%s.') % cgsnapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
cgsnapshot.update(model_update)
cgsnapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
cgsnapshot.status = 'error'
cgsnapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot['volume_size'],
}
volume_ref = self.db.volume_get(context, snapshot['volume_id'])
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot['id'])
# TODO(thangp): Switch over to use snapshot.destroy()
# after cgsnapshot-objects bugs are fixed
self.db.snapshot_destroy(context, snapshot['id'])
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
cgsnapshot.destroy()
LOG.info(_LI("cgsnapshot %s: deleted successfully"), cgsnapshot.id)
self._notify_about_cgsnapshot_usage(context, cgsnapshot, "delete.end",
snapshots)
def update_migrated_volume(self, ctxt, volume, new_volume, volume_status):
"""Finalize migration process on backend device."""
model_update = None
model_update_default = {'_name_id': new_volume.name_id,
'provider_location':
new_volume.provider_location}
try:
model_update = self.driver.update_migrated_volume(ctxt,
volume,
new_volume,
volume_status)
except NotImplementedError:
# If update_migrated_volume is not implemented for the driver,
# _name_id and provider_location will be set with the values
# from new_volume.
model_update = model_update_default
if model_update:
model_update_default.update(model_update)
# Swap keys that were changed in the source so we keep their values
# in the temporary volume's DB record.
# Need to convert 'metadata' and 'admin_metadata' since
# they are not keys of volume, their corresponding keys are
# 'volume_metadata' and 'volume_admin_metadata'.
model_update_new = dict()
for key in model_update:
if key == 'metadata':
if volume.get('volume_metadata'):
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_metadata}
elif key == 'admin_metadata':
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_admin_metadata}
else:
model_update_new[key] = volume[key]
with new_volume.obj_as_admin():
new_volume.update(model_update_new)
new_volume.save()
with volume.obj_as_admin():
volume.update(model_update_default)
volume.save()
# Replication V2.1 methods
def failover_host(self, context,
secondary_backend_id=None):
"""Failover a backend to a secondary replication target.
Instructs a replication capable/configured backend to failover
to one of it's secondary replication targets. host=None is
an acceptable input, and leaves it to the driver to failover
to the only configured target, or to choose a target on it's
own. All of the hosts volumes will be passed on to the driver
in order for it to determine the replicated volumes on the host,
if needed.
:param context: security context
:param secondary_backend_id: Specifies backend_id to fail over to
"""
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
'cinder-volume')
volumes = objects.VolumeList.get_all_by_host(context, self.host)
exception_encountered = False
try:
# expected form of volume_update_list:
# [{volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}},
# {volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}}]
(active_backend_id, volume_update_list) = (
self.driver.failover_host(
context,
volumes,
secondary_id=secondary_backend_id))
except exception.UnableToFailOver:
LOG.exception(_LE("Failed to perform replication failover"))
service.replication_status = (
fields.ReplicationStatus.FAILOVER_ERROR)
service.save()
exception_encountered = True
except exception.InvalidReplicationTarget:
LOG.exception(_LE("Invalid replication target specified "
"for failover"))
# Preserve the replication_status
if secondary_backend_id == "default":
service.replication_status = (
fields.ReplicationStatus.FAILED_OVER)
else:
service.replication_status = fields.ReplicationStatus.ENABLED
service.save()
exception_encountered = True
except exception.VolumeDriverException:
# NOTE(jdg): Drivers need to be aware if they fail during
# a failover sequence, we're expecting them to cleanup
# and make sure the driver state is such that the original
# backend is still set as primary as per driver memory
LOG.error(_LE("Driver reported error during "
"replication failover."))
service.status = 'error'
service.save()
exception_encountered = True
if exception_encountered:
LOG.error(
_LE("Error encountered during failover on host: "
"%(host)s invalid target ID %(backend_id)s"),
{'host': self.host, 'backend_id':
secondary_backend_id})
return
if secondary_backend_id == "default":
service.replication_status = fields.ReplicationStatus.ENABLED
service.active_backend_id = ""
if service.frozen:
service.disabled = True
service.disabled_reason = "frozen"
else:
service.disabled = False
service.disabled_reason = ""
service.save()
else:
service.replication_status = fields.ReplicationStatus.FAILED_OVER
service.active_backend_id = active_backend_id
service.disabled = True
service.disabled_reason = "failed-over"
service.save()
for update in volume_update_list:
# Response must include an id key: {volume_id: <cinder-uuid>}
if not update.get('volume_id'):
raise exception.UnableToFailOver(
reason=_("Update list, doesn't include volume_id"))
# Key things to consider (attaching failed-over volumes):
# provider_location
# provider_auth
# provider_id
# replication_status
vobj = objects.Volume.get_by_id(context, update['volume_id'])
vobj.update(update.get('updates', {}))
vobj.save()
LOG.info(_LI("Failed over to replication target successfully."))
def freeze_host(self, context):
"""Freeze management plane on this backend.
Basically puts the control/management plane into a
Read Only state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.freeze_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): In the case of freeze, we don't really
# need the backend's consent or anything, we'll just
# disable the service, so we can just log this and
# go about our business
LOG.warning(_LW('Error encountered on Cinder backend during '
'freeze operation, service is frozen, however '
'notification to driver has failed.'))
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
'cinder-volume')
service.disabled = True
service.disabled_reason = "frozen"
service.save()
LOG.info(_LI("Set backend status to frozen successfully."))
return True
# MASKED: thaw_host function (lines 3359-3390)
def manage_existing_snapshot(self, ctxt, snapshot, ref=None):
LOG.debug('manage_existing_snapshot: managing %s.', ref)
try:
flow_engine = manage_existing_snapshot.get_flow(
ctxt,
self.db,
self.driver,
self.host,
snapshot.id,
ref)
except Exception:
msg = _LE("Failed to create manage_existing flow: "
"%(object_type)s %(object_id)s.")
LOG.exception(msg, {'object_type': 'snapshot',
'object_id': snapshot.id})
raise exception.CinderException(
_("Failed to create manage existing flow."))
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
return snapshot.id
def get_capabilities(self, context, discover):
"""Get capabilities of backend storage."""
if discover:
self.driver.init_capabilities()
capabilities = self.driver.capabilities
LOG.debug("Obtained capabilities list: %s.", capabilities)
return capabilities
def get_backup_device(self, ctxt, backup):
(backup_device, is_snapshot) = (
self.driver.get_backup_device(ctxt, backup))
secure_enabled = self.driver.secure_file_operations_enabled()
backup_device_dict = {'backup_device': backup_device,
'secure_enabled': secure_enabled,
'is_snapshot': is_snapshot, }
return backup_device_dict
def secure_file_operations_enabled(self, ctxt, volume):
secure_enabled = self.driver.secure_file_operations_enabled()
return secure_enabled
|
def thaw_host(self, context):
"""UnFreeze management plane on this backend.
Basically puts the control/management plane back into
a normal state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.thaw_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): Thaw actually matters, if this call
# to the backend fails, we're stuck and can't re-enable
LOG.error(_LE('Error encountered on Cinder backend during '
'thaw operation, service will remain frozen.'))
return False
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
'cinder-volume')
service.disabled = False
service.disabled_reason = ""
service.save()
LOG.info(_LI("Thawed backend successfully."))
return True
| 3,359 | 3,390 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume manager manages creating, attaching, detaching, and persistent storage.
Persistent storage volumes keep their state independent of instances. You can
attach to an instance, terminate the instance, spawn a new instance (even
one from a different image) and re-attach the volume with the same data
intact.
**Related Flags**
:volume_topic: What :mod:`rpc` topic to listen to (default: `cinder-volume`).
:volume_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.volume.manager.Manager`).
:volume_driver: Used by :class:`Manager`. Defaults to
:class:`cinder.volume.drivers.lvm.LVMVolumeDriver`.
:volume_group: Name of the group that will contain exported volumes (default:
`cinder-volumes`)
:num_shell_tries: Number of times to attempt to run commands (default: 3)
"""
import requests
import time
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
profiler = importutils.try_import('osprofiler.profiler')
import six
from taskflow import exceptions as tfe
from cinder import compute
from cinder import context
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import cache as image_cache
from cinder.image import glance
from cinder import manager
from cinder.message import api as message_api
from cinder.message import defined_messages
from cinder.message import resource_types
from cinder import objects
from cinder.objects import fields
from cinder import quota
from cinder import utils
from cinder import volume as cinder_volume
from cinder.volume import configuration as config
from cinder.volume.flows.manager import create_volume
from cinder.volume.flows.manager import manage_existing
from cinder.volume.flows.manager import manage_existing_snapshot
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
CGQUOTAS = quota.CGQUOTAS
VALID_REMOVE_VOL_FROM_CG_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_ADD_VOL_TO_CG_STATUS = (
'available',
'in-use')
VALID_CREATE_CG_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
VALID_CREATE_CG_SRC_CG_STATUS = ('available',)
volume_manager_opts = [
cfg.StrOpt('volume_driver',
default='cinder.volume.drivers.lvm.LVMVolumeDriver',
help='Driver to use for volume creation'),
cfg.IntOpt('migration_create_volume_timeout_secs',
default=300,
help='Timeout for creating the volume to migrate to '
'when performing volume migration (seconds)'),
cfg.BoolOpt('volume_service_inithost_offload',
default=False,
help='Offload pending volume delete during '
'volume service startup'),
cfg.StrOpt('zoning_mode',
help='FC Zoning mode configured'),
cfg.StrOpt('extra_capabilities',
default='{}',
help='User defined capabilities, a JSON formatted string '
'specifying key/value pairs. The key/value pairs can '
'be used by the CapabilitiesFilter to select between '
'backends when requests specify volume types. For '
'example, specifying a service level or the geographical '
'location of a backend, then creating a volume type to '
'allow the user to select by these different '
'properties.'),
cfg.BoolOpt('suppress_requests_ssl_warnings',
default=False,
help='Suppress requests library SSL certificate warnings.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_manager_opts)
MAPPING = {
'cinder.volume.drivers.huawei.huawei_18000.Huawei18000ISCSIDriver':
'cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver',
'cinder.volume.drivers.huawei.huawei_driver.Huawei18000ISCSIDriver':
'cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver',
'cinder.volume.drivers.huawei.huawei_18000.Huawei18000FCDriver':
'cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver',
'cinder.volume.drivers.huawei.huawei_driver.Huawei18000FCDriver':
'cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver',
'cinder.volume.drivers.fujitsu_eternus_dx_fc.FJDXFCDriver':
'cinder.volume.drivers.fujitsu.eternus_dx_fc.FJDXFCDriver',
'cinder.volume.drivers.fujitsu_eternus_dx_iscsi.FJDXISCSIDriver':
'cinder.volume.drivers.fujitsu.eternus_dx_iscsi.FJDXISCSIDriver',
'cinder.volume.drivers.hds.nfs.HDSNFSDriver':
'cinder.volume.drivers.hitachi.hnas_nfs.HDSNFSDriver',
'cinder.volume.drivers.hds.iscsi.HDSISCSIDriver':
'cinder.volume.drivers.hitachi.hnas_iscsi.HDSISCSIDriver',
'cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver':
'cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver',
'cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver':
'cinder.volume.drivers.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver',
'cinder.volume.drivers.san.hp.hp_lefthand_iscsi.HPLeftHandISCSIDriver':
'cinder.volume.drivers.hpe.hpe_lefthand_iscsi.HPELeftHandISCSIDriver',
'cinder.volume.drivers.san.hp.hp_xp_fc.HPXPFCDriver':
'cinder.volume.drivers.hpe.hpe_xp_fc.HPEXPFCDriver',
}
def locked_volume_operation(f):
"""Lock decorator for volume operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the volume. This lock can then be used
by other operations to avoid operation conflicts on shared volumes.
Example use:
If a volume operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
volume e.g. delete VolA while create volume VolB from VolA is in progress.
"""
def lvo_inner1(inst, context, volume_id, **kwargs):
@utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True)
def lvo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lvo_inner2(inst, context, volume_id, **kwargs)
return lvo_inner1
def locked_detach_operation(f):
"""Lock decorator for volume detach operations.
Takes a named lock prior to executing the detach call. The lock is named
with the operation executed and the id of the volume. This lock can then
be used by other operations to avoid operation conflicts on shared volumes.
This locking mechanism is only for detach calls. We can't use the
locked_volume_operation, because detach requires an additional
attachment_id in the parameter list.
"""
def ldo_inner1(inst, context, volume_id, attachment_id=None, **kwargs):
@utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True)
def ldo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return ldo_inner2(inst, context, volume_id, attachment_id, **kwargs)
return ldo_inner1
def locked_snapshot_operation(f):
"""Lock decorator for snapshot operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the snapshot. This lock can then be
used by other operations to avoid operation conflicts on shared snapshots.
Example use:
If a snapshot operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
snapshot e.g. delete SnapA while create volume VolA from SnapA is in
progress.
"""
def lso_inner1(inst, context, snapshot, **kwargs):
@utils.synchronized("%s-%s" % (snapshot.id, f.__name__), external=True)
def lso_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lso_inner2(inst, context, snapshot, **kwargs)
return lso_inner1
class VolumeManager(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = '2.0'
target = messaging.Target(version=RPC_API_VERSION)
# On cloning a volume, we shouldn't copy volume_type, consistencygroup
# and volume_attachment, because the db sets that according to [field]_id,
# which we do copy. We also skip some other values that are set during
# creation of Volume object.
_VOLUME_CLONE_SKIP_PROPERTIES = {
'id', '_name_id', 'name_id', 'name', 'status',
'attach_status', 'migration_status', 'volume_type',
'consistencygroup', 'volume_attachment'}
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_name='volume',
*args, **kwargs)
self.configuration = config.Configuration(volume_manager_opts,
config_group=service_name)
self.stats = {}
if not volume_driver:
# Get from configuration, which will get the default
# if its not using the multi backend
volume_driver = self.configuration.volume_driver
if volume_driver in MAPPING:
LOG.warning(_LW("Driver path %s is deprecated, update your "
"configuration to the new path."), volume_driver)
volume_driver = MAPPING[volume_driver]
vol_db_empty = self._set_voldb_empty_at_startup_indicator(
context.get_admin_context())
LOG.debug("Cinder Volume DB check: vol_db_empty=%s", vol_db_empty)
# We pass the current setting for service.active_backend_id to
# the driver on init, incase there was a restart or something
curr_active_backend_id = None
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
service = objects.Service.get_by_args(
context.get_admin_context(),
svc_host,
'cinder-volume')
except exception.ServiceNotFound:
# NOTE(jdg): This is to solve problems with unit tests
LOG.info(_LI("Service not found for updating "
"active_backend_id, assuming default "
"for driver init."))
else:
curr_active_backend_id = service.active_backend_id
if self.configuration.suppress_requests_ssl_warnings:
LOG.warning(_LW("Suppressing requests library SSL Warnings"))
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecurePlatformWarning)
self.driver = importutils.import_object(
volume_driver,
configuration=self.configuration,
db=self.db,
host=self.host,
is_vol_db_empty=vol_db_empty,
active_backend_id=curr_active_backend_id)
self.message_api = message_api.API()
if CONF.profiler.enabled and profiler is not None:
self.driver = profiler.trace_cls("driver")(self.driver)
try:
self.extra_capabilities = jsonutils.loads(
self.driver.configuration.extra_capabilities)
except AttributeError:
self.extra_capabilities = {}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Invalid JSON: %s"),
self.driver.configuration.extra_capabilities)
if self.driver.configuration.safe_get(
'image_volume_cache_enabled'):
max_cache_size = self.driver.configuration.safe_get(
'image_volume_cache_max_size_gb')
max_cache_entries = self.driver.configuration.safe_get(
'image_volume_cache_max_count')
self.image_volume_cache = image_cache.ImageVolumeCache(
self.db,
cinder_volume.API(),
max_cache_size,
max_cache_entries
)
LOG.info(_LI('Image-volume cache enabled for host %(host)s.'),
{'host': self.host})
else:
LOG.info(_LI('Image-volume cache disabled for host %(host)s.'),
{'host': self.host})
self.image_volume_cache = None
def _count_allocated_capacity(self, ctxt, volume):
pool = vol_utils.extract_host(volume['host'], 'pool')
if pool is None:
# No pool name encoded in host, so this is a legacy
# volume created before pool is introduced, ask
# driver to provide pool info if it has such
# knowledge and update the DB.
try:
pool = self.driver.get_pool(volume)
except Exception:
LOG.exception(_LE('Fetch volume pool name failed.'),
resource=volume)
return
if pool:
new_host = vol_utils.append_host(volume['host'],
pool)
self.db.volume_update(ctxt, volume['id'],
{'host': new_host})
else:
# Otherwise, put them into a special fixed pool with
# volume_backend_name being the pool name, if
# volume_backend_name is None, use default pool name.
# This is only for counting purpose, doesn't update DB.
pool = (self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume['host'], 'pool', True))
try:
pool_stat = self.stats['pools'][pool]
except KeyError:
# First volume in the pool
self.stats['pools'][pool] = dict(
allocated_capacity_gb=0)
pool_stat = self.stats['pools'][pool]
pool_sum = pool_stat['allocated_capacity_gb']
pool_sum += volume['size']
self.stats['pools'][pool]['allocated_capacity_gb'] = pool_sum
self.stats['allocated_capacity_gb'] += volume['size']
def _set_voldb_empty_at_startup_indicator(self, ctxt):
"""Determine if the Cinder volume DB is empty.
A check of the volume DB is done to determine whether it is empty or
not at this point.
:param ctxt: our working context
"""
vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None)
if len(vol_entries) == 0:
LOG.info(_LI("Determined volume DB was empty at startup."))
return True
else:
LOG.info(_LI("Determined volume DB was not empty at startup."))
return False
def _sync_provider_info(self, ctxt, volumes, snapshots):
# NOTE(jdg): For now this just updates provider_id, we can add more
# add more items to the update if theyr'e releveant but we need
# to be safe in what we allow and add a list of allowed keys
# things that make sense are provider_*, replication_status etc
updates, snapshot_updates = self.driver.update_provider_info(
volumes, snapshots)
if updates:
for volume in volumes:
# NOTE(JDG): Make sure returned item is in this hosts volumes
update = (
[updt for updt in updates if updt['id'] ==
volume['id']][0])
if update:
self.db.volume_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
# NOTE(jdg): snapshots are slighty harder, because
# we do not have a host column and of course no get
# all by host, so we use a get_all and bounce our
# response off of it
if snapshot_updates:
cinder_snaps = self.db.snapshot_get_all(ctxt)
for snap in cinder_snaps:
# NOTE(jdg): For now we only update those that have no entry
if not snap.get('provider_id', None):
update = (
[updt for updt in snapshot_updates if updt['id'] ==
snap['id']][0])
if update:
self.db.snapshot_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
def init_host(self):
"""Perform any required initialization."""
ctxt = context.get_admin_context()
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception:
LOG.exception(_LE("Failed to initialize driver."),
resource={'type': 'driver',
'id': self.__class__.__name__})
# we don't want to continue since we failed
# to initialize the driver correctly.
return
# Initialize backend capabilities list
self.driver.init_capabilities()
volumes = objects.VolumeList.get_all_by_host(ctxt, self.host)
snapshots = self.db.snapshot_get_by_host(ctxt, self.host)
self._sync_provider_info(ctxt, volumes, snapshots)
# FIXME volume count for exporting is wrong
try:
self.stats['pools'] = {}
self.stats.update({'allocated_capacity_gb': 0})
for volume in volumes:
# available volume should also be counted into allocated
if volume['status'] in ['in-use', 'available']:
# calculate allocated capacity for driver
self._count_allocated_capacity(ctxt, volume)
try:
if volume['status'] in ['in-use']:
self.driver.ensure_export(ctxt, volume)
except Exception:
LOG.exception(_LE("Failed to re-export volume, "
"setting to ERROR."),
resource=volume)
volume.status = 'error'
volume.save()
elif volume['status'] in ('downloading', 'creating'):
LOG.warning(_LW("Detected volume stuck "
"in %(curr_status)s "
"status, setting to ERROR."),
{'curr_status': volume['status']},
resource=volume)
if volume['status'] == 'downloading':
self.driver.clear_download(ctxt, volume)
volume.status = 'error'
volume.save()
elif volume.status == 'uploading':
# Set volume status to available or in-use.
self.db.volume_update_status_based_on_attachment(
ctxt, volume.id)
else:
pass
snapshots = objects.SnapshotList.get_by_host(
ctxt, self.host, {'status': fields.SnapshotStatus.CREATING})
for snapshot in snapshots:
LOG.warning(_LW("Detected snapshot stuck in creating "
"status, setting to ERROR."), resource=snapshot)
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
except Exception:
LOG.exception(_LE("Error during re-export on driver init."),
resource=volume)
return
self.driver.set_throttle()
# at this point the driver is considered initialized.
# NOTE(jdg): Careful though because that doesn't mean
# that an entry exists in the service table
self.driver.set_initialized()
for volume in volumes:
if volume['status'] == 'deleting':
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
# from being blocked.
self._add_to_threadpool(self.delete_volume, ctxt,
volume['id'], volume=volume)
else:
# By default, delete volumes sequentially
self.delete_volume(ctxt, volume['id'], volume=volume)
LOG.info(_LI("Resume volume delete completed successfully."),
resource=volume)
# collect and publish service capabilities
self.publish_service_capabilities(ctxt)
LOG.info(_LI("Driver initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
def init_host_with_rpc(self):
LOG.info(_LI("Initializing RPC dependent components of volume "
"driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
stats = self.driver.get_volume_stats(refresh=True)
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
service = objects.Service.get_by_args(
context.get_admin_context(),
svc_host,
'cinder-volume')
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Service not found for updating "
"replication_status."))
if service.replication_status != (
fields.ReplicationStatus.FAILED_OVER):
if stats and stats.get('replication_enabled', False):
service.replication_status = fields.ReplicationStatus.ENABLED
else:
service.replication_status = fields.ReplicationStatus.DISABLED
service.save()
LOG.info(_LI("Driver post RPC initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
def is_working(self):
"""Return if Manager is ready to accept requests.
This is to inform Service class that in case of volume driver
initialization failure the manager is actually down and not ready to
accept any requests.
"""
return self.driver.initialized
def create_volume(self, context, volume_id, request_spec=None,
filter_properties=None, allow_reschedule=True,
volume=None):
"""Creates the volume."""
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
context_elevated = context.elevated()
if filter_properties is None:
filter_properties = {}
if request_spec is None:
request_spec = {}
try:
# NOTE(flaper87): Driver initialization is
# verified by the task itself.
flow_engine = create_volume.get_flow(
context_elevated,
self,
self.db,
self.driver,
self.scheduler_rpcapi,
self.host,
volume.id,
allow_reschedule,
context,
request_spec,
filter_properties,
image_volume_cache=self.image_volume_cache,
)
except Exception:
msg = _("Create manager volume flow failed.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
raise exception.CinderException(msg)
snapshot_id = request_spec.get('snapshot_id')
source_volid = request_spec.get('source_volid')
source_replicaid = request_spec.get('source_replicaid')
if snapshot_id is not None:
# Make sure the snapshot is not deleted until we are done with it.
locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot')
elif source_volid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_volid, 'delete_volume')
elif source_replicaid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_replicaid, 'delete_volume')
else:
locked_action = None
def _run_flow():
# This code executes create volume flow. If something goes wrong,
# flow reverts all job that was done and reraises an exception.
# Otherwise, all data that was generated by flow becomes available
# in flow engine's storage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
@utils.synchronized(locked_action, external=True)
def _run_flow_locked():
_run_flow()
# NOTE(dulek): Flag to indicate if volume was rescheduled. Used to
# decide if allocated_capacity should be incremented.
rescheduled = False
vol_ref = None
try:
if locked_action is None:
_run_flow()
else:
_run_flow_locked()
finally:
try:
vol_ref = flow_engine.storage.fetch('volume_ref')
except tfe.NotFound:
# If there's no vol_ref, then flow is reverted. Lets check out
# if rescheduling occurred.
try:
rescheduled = flow_engine.storage.get_revert_result(
create_volume.OnFailureRescheduleTask.make_name(
[create_volume.ACTION]))
except tfe.NotFound:
pass
if not rescheduled:
if not vol_ref:
# Flow was reverted and not rescheduled, fetching
# volume_ref from the DB, because it will be needed.
vol_ref = objects.Volume.get_by_id(context, volume.id)
# NOTE(dulek): Volume wasn't rescheduled so we need to update
# volume stats as these are decremented on delete.
self._update_allocated_capacity(vol_ref)
LOG.info(_LI("Created volume successfully."), resource=vol_ref)
return vol_ref.id
@locked_volume_operation
def delete_volume(self, context, volume_id,
unmanage_only=False,
volume=None,
cascade=False):
"""Deletes and unexports volume.
1. Delete a volume(normal case)
Delete a volume and update quotas.
2. Delete a migration volume
If deleting the volume in a migration, we want to skip
quotas but we need database updates for the volume.
"""
context = context.elevated()
try:
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
volume = objects.Volume.get_by_id(context, volume_id)
else:
volume.refresh()
except exception.VolumeNotFound:
# NOTE(thingee): It could be possible for a volume to
# be deleted when resuming deletes from init_host().
LOG.debug("Attempted delete of non-existent volume: %s", volume_id)
return
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
if volume['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_id)
if vol_utils.extract_host(volume.host) != self.host:
raise exception.InvalidVolume(
reason=_("volume is not local to this node"))
if unmanage_only and cascade:
# This could be done, but is ruled out for now just
# for simplicity.
raise exception.Invalid(
reason=_("Unmanage and cascade delete options "
"are mutually exclusive."))
# The status 'deleting' is not included, because it only applies to
# the source volume to be deleted after a migration. No quota
# needs to be handled for it.
is_migrating = volume.migration_status not in (None, 'error',
'success')
is_migrating_dest = (is_migrating and
volume.migration_status.startswith(
'target:'))
self._notify_about_volume_usage(context, volume, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context, volume)
if unmanage_only:
self.driver.unmanage(volume)
elif cascade:
LOG.debug('Performing cascade delete.')
snapshots = objects.SnapshotList.get_all_for_volume(context,
volume.id)
for s in snapshots:
if s.status != 'deleting':
self._clear_db(context, is_migrating_dest, volume,
'error_deleting')
msg = (_("Snapshot %(id)s was found in state "
"%(state)s rather than 'deleting' during "
"cascade delete.") % {'id': s.id,
'state': s.status})
raise exception.InvalidSnapshot(reason=msg)
self.delete_snapshot(context, s)
LOG.debug('Snapshots deleted, issuing volume delete')
self.driver.delete_volume(volume)
else:
self.driver.delete_volume(volume)
except exception.VolumeIsBusy:
LOG.error(_LE("Unable to delete busy volume."),
resource=volume)
# If this is a destination volume, we have to clear the database
# record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume,
'available')
return
except Exception:
with excutils.save_and_reraise_exception():
# If this is a destination volume, we have to clear the
# database record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume,
'error_deleting')
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
# Get reservations
try:
reserve_opts = {'volumes': -1,
'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting volume."),
resource=volume)
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
volume.destroy()
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
self._notify_about_volume_usage(context, volume, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
pool = vol_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume.host, 'pool', True)
size = volume.size
try:
self.stats['pools'][pool]['allocated_capacity_gb'] -= size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=-size)
self.publish_service_capabilities(context)
LOG.info(_LI("Deleted volume successfully."), resource=volume)
def _clear_db(self, context, is_migrating_dest, volume_ref, status):
# This method is called when driver.unmanage() or
# driver.delete_volume() fails in delete_volume(), so it is already
# in the exception handling part.
if is_migrating_dest:
volume_ref.destroy()
LOG.error(_LE("Unable to delete the destination volume "
"during volume migration, (NOTE: database "
"record needs to be deleted)."), resource=volume_ref)
else:
volume_ref.status = status
volume_ref.save()
def create_snapshot(self, context, volume_id, snapshot):
"""Creates and exports the snapshot."""
context = context.elevated()
self._notify_about_snapshot_usage(
context, snapshot, "create.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
model_update = self.driver.create_snapshot(snapshot)
if model_update:
snapshot.update(model_update)
snapshot.save()
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot.id, volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.exception(_LE("Failed updating snapshot"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id}, resource=snapshot)
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.progress = '100%'
snapshot.save()
self._notify_about_snapshot_usage(context, snapshot, "create.end")
LOG.info(_LI("Create snapshot completed successfully"),
resource=snapshot)
return snapshot.id
@locked_snapshot_operation
def delete_snapshot(self, context, snapshot, unmanage_only=False):
"""Deletes and unexports snapshot."""
context = context.elevated()
snapshot._context = context
project_id = snapshot.project_id
self._notify_about_snapshot_usage(
context, snapshot, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
snapshot.save()
if unmanage_only:
self.driver.unmanage_snapshot(snapshot)
else:
self.driver.delete_snapshot(snapshot)
except exception.SnapshotIsBusy:
LOG.error(_LE("Delete snapshot failed, due to snapshot busy."),
resource=snapshot)
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.save()
return
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR_DELETING
snapshot.save()
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot.volume_size,
}
volume_ref = self.db.volume_get(context, snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Update snapshot usages failed."),
resource=snapshot)
self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id)
snapshot.destroy()
self._notify_about_snapshot_usage(context, snapshot, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
LOG.info(_LI("Delete snapshot completed successfully"),
resource=snapshot)
def attach_volume(self, context, volume_id, instance_uuid, host_name,
mountpoint, mode):
"""Updates db to show volume is attached."""
@utils.synchronized(volume_id, external=True)
def do_attach():
# check the volume status before attaching
volume = self.db.volume_get(context, volume_id)
volume_metadata = self.db.volume_admin_metadata_get(
context.elevated(), volume_id)
if volume['status'] == 'attaching':
if (volume_metadata.get('attached_mode') and
volume_metadata.get('attached_mode') != mode):
raise exception.InvalidVolume(
reason=_("being attached by different mode"))
if (volume['status'] == 'in-use' and not volume['multiattach']
and not volume['migration_status']):
raise exception.InvalidVolume(
reason=_("volume is already attached"))
host_name_sanitized = utils.sanitize_hostname(
host_name) if host_name else None
if instance_uuid:
attachments = \
self.db.volume_attachment_get_all_by_instance_uuid(
context, volume_id, instance_uuid)
else:
attachments = (
self.db.volume_attachment_get_all_by_host(
context,
volume_id,
host_name_sanitized))
if attachments:
self.db.volume_update(context, volume_id,
{'status': 'in-use'})
return
self._notify_about_volume_usage(context, volume,
"attach.start")
values = {'volume_id': volume_id,
'attach_status': 'attaching', }
attachment = self.db.volume_attach(context.elevated(), values)
volume_metadata = self.db.volume_admin_metadata_update(
context.elevated(), volume_id,
{"attached_mode": mode}, False)
attachment_id = attachment['id']
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
self.db.volume_attachment_update(context, attachment_id,
{'attach_status':
'error_attaching'})
raise exception.InvalidUUID(uuid=instance_uuid)
volume = self.db.volume_get(context, volume_id)
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
self.db.volume_update(context, volume_id,
{'status': 'error_attaching'})
self.message_api.create(
context, defined_messages.ATTACH_READONLY_VOLUME,
context.project_id, resource_type=resource_types.VOLUME,
resource_uuid=volume_id)
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.debug('Attaching volume %(volume_id)s to instance '
'%(instance)s at mountpoint %(mount)s on host '
'%(host)s.',
{'volume_id': volume_id, 'instance': instance_uuid,
'mount': mountpoint, 'host': host_name_sanitized},
resource=volume)
self.driver.attach_volume(context,
volume,
instance_uuid,
host_name_sanitized,
mountpoint)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment_id,
{'attach_status': 'error_attaching'})
volume = self.db.volume_attached(context.elevated(),
attachment_id,
instance_uuid,
host_name_sanitized,
mountpoint,
mode)
self._notify_about_volume_usage(context, volume, "attach.end")
LOG.info(_LI("Attach volume completed successfully."),
resource=volume)
return self.db.volume_attachment_get(context, attachment_id)
return do_attach()
@locked_detach_operation
def detach_volume(self, context, volume_id, attachment_id=None):
"""Updates db to show volume is detached."""
# TODO(vish): refactor this into a more general "unreserve"
volume = self.db.volume_get(context, volume_id)
attachment = None
if attachment_id:
try:
attachment = self.db.volume_attachment_get(context,
attachment_id)
except exception.VolumeAttachmentNotFound:
LOG.info(_LI("Volume detach called, but volume not attached."),
resource=volume)
# We need to make sure the volume status is set to the correct
# status. It could be in detaching status now, and we don't
# want to leave it there.
self.db.volume_detached(context, volume_id, attachment_id)
return
else:
# We can try and degrade gracefully here by trying to detach
# a volume without the attachment_id here if the volume only has
# one attachment. This is for backwards compatibility.
attachments = self.db.volume_attachment_get_all_by_volume_id(
context, volume_id)
if len(attachments) > 1:
# There are more than 1 attachments for this volume
# we have to have an attachment id.
msg = _("Detach volume failed: More than one attachment, "
"but no attachment_id provided.")
LOG.error(msg, resource=volume)
raise exception.InvalidVolume(reason=msg)
elif len(attachments) == 1:
attachment = attachments[0]
else:
# there aren't any attachments for this volume.
# so set the status to available and move on.
LOG.info(_LI("Volume detach called, but volume not attached."),
resource=volume)
self.db.volume_update(context, volume_id,
{'status': 'available',
'attach_status': 'detached'})
return
self._notify_about_volume_usage(context, volume, "detach.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.debug('Detaching volume %(volume_id)s from instance '
'%(instance)s.',
{'volume_id': volume_id,
'instance': attachment.get('instance_uuid')},
resource=volume)
self.driver.detach_volume(context, volume, attachment)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment.get('id'),
{'attach_status': 'error_detaching'})
self.db.volume_detached(context.elevated(), volume_id,
attachment.get('id'))
self.db.volume_admin_metadata_delete(context.elevated(), volume_id,
'attached_mode')
# NOTE(jdg): We used to do an ensure export here to
# catch upgrades while volumes were attached (E->F)
# this was necessary to convert in-use volumes from
# int ID's to UUID's. Don't need this any longer
# We're going to remove the export here
# (delete the iscsi target)
volume = self.db.volume_get(context, volume_id)
try:
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context.elevated(), volume)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Detach volume failed, due to "
"uninitialized driver."),
resource=volume)
except Exception as ex:
LOG.exception(_LE("Detach volume failed, due to "
"remove-export failure."),
resource=volume)
raise exception.RemoveExportException(volume=volume_id,
reason=six.text_type(ex))
self._notify_about_volume_usage(context, volume, "detach.end")
LOG.info(_LI("Detach volume completed successfully."), resource=volume)
def _create_image_cache_volume_entry(self, ctx, volume_ref,
image_id, image_meta):
"""Create a new image-volume and cache entry for it.
This assumes that the image has already been downloaded and stored
in the volume described by the volume_ref.
"""
image_volume = None
try:
if not self.image_volume_cache.ensure_space(
ctx,
volume_ref['size'],
volume_ref['host']):
LOG.warning(_LW('Unable to ensure space for image-volume in'
' cache. Will skip creating entry for image'
' %(image)s on host %(host)s.'),
{'image': image_id, 'host': volume_ref['host']})
return
image_volume = self._clone_image_volume(ctx,
volume_ref,
image_meta)
if not image_volume:
LOG.warning(_LW('Unable to clone image_volume for image '
'%(image_id)s will not create cache entry.'),
{'image_id': image_id})
return
self.image_volume_cache.create_cache_entry(
ctx,
image_volume,
image_id,
image_meta
)
except exception.CinderException as e:
LOG.warning(_LW('Failed to create new image-volume cache entry.'
' Error: %(exception)s'), {'exception': e})
if image_volume:
self.delete_volume(ctx, image_volume.id)
def _clone_image_volume(self, ctx, volume, image_meta):
volume_type_id = volume.get('volume_type_id')
reserve_opts = {'volumes': 1, 'gigabytes': volume.size}
QUOTAS.add_volume_type_opts(ctx, reserve_opts, volume_type_id)
reservations = QUOTAS.reserve(ctx, **reserve_opts)
try:
new_vol_values = {k: volume[k] for k in set(volume.keys()) -
self._VOLUME_CLONE_SKIP_PROPERTIES}
new_vol_values['volume_type_id'] = volume_type_id
new_vol_values['attach_status'] = 'detached'
new_vol_values['status'] = 'creating'
new_vol_values['project_id'] = ctx.project_id
new_vol_values['display_name'] = 'image-%s' % image_meta['id']
new_vol_values['source_volid'] = volume.id
LOG.debug('Creating image volume entry: %s.', new_vol_values)
image_volume = objects.Volume(context=ctx, **new_vol_values)
image_volume.create()
except Exception as ex:
LOG.exception(_LE('Create clone_image_volume: %(volume_id)s'
'for image %(image_id)s, '
'failed (Exception: %(except)s)'),
{'volume_id': volume.id,
'image_id': image_meta['id'],
'except': ex})
QUOTAS.rollback(ctx, reservations)
return
QUOTAS.commit(ctx, reservations,
project_id=new_vol_values['project_id'])
try:
self.create_volume(ctx, image_volume.id,
allow_reschedule=False, volume=image_volume)
image_volume = self.db.volume_get(ctx, image_volume.id)
if image_volume.status != 'available':
raise exception.InvalidVolume(_('Volume is not available.'))
self.db.volume_admin_metadata_update(ctx.elevated(),
image_volume.id,
{'readonly': 'True'},
False)
return image_volume
except exception.CinderException:
LOG.exception(_LE('Failed to clone volume %(volume_id)s for '
'image %(image_id)s.'),
{'volume_id': volume.id,
'image_id': image_meta['id']})
try:
self.delete_volume(ctx, image_volume.id)
except exception.CinderException:
LOG.exception(_LE('Could not delete the image volume %(id)s.'),
{'id': volume.id})
return
def _clone_image_volume_and_add_location(self, ctx, volume, image_service,
image_meta):
"""Create a cloned volume and register its location to the image."""
if (image_meta['disk_format'] != 'raw' or
image_meta['container_format'] != 'bare'):
return False
image_volume_context = ctx
if self.driver.configuration.image_upload_use_internal_tenant:
internal_ctx = context.get_internal_tenant_context()
if internal_ctx:
image_volume_context = internal_ctx
image_volume = self._clone_image_volume(image_volume_context,
volume,
image_meta)
if not image_volume:
return False
uri = 'cinder://%s' % image_volume.id
image_registered = None
try:
image_registered = image_service.add_location(
ctx, image_meta['id'], uri, {})
except (exception.NotAuthorized, exception.Invalid,
exception.NotFound):
LOG.exception(_LE('Failed to register image volume location '
'%(uri)s.'), {'uri': uri})
if not image_registered:
LOG.warning(_LW('Registration of image volume URI %(uri)s '
'to image %(image_id)s failed.'),
{'uri': uri, 'image_id': image_meta['id']})
try:
self.delete_volume(image_volume_context, image_volume)
except exception.CinderException:
LOG.exception(_LE('Could not delete failed image volume '
'%(id)s.'), {'id': image_volume.id})
return False
image_volume_meta = {'glance_image_id': image_meta['id'],
'image_owner': ctx.project_id}
self.db.volume_metadata_update(image_volume_context,
image_volume.id,
image_volume_meta,
False)
return True
def copy_volume_to_image(self, context, volume_id, image_meta):
"""Uploads the specified volume to Glance.
image_meta is a dictionary containing the following keys:
'id', 'container_format', 'disk_format'
"""
payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
image_service = None
try:
volume = self.db.volume_get(context, volume_id)
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
image_service, image_id = \
glance.get_remote_image_service(context, image_meta['id'])
if (self.driver.configuration.image_upload_use_cinder_backend
and self._clone_image_volume_and_add_location(
context, volume, image_service, image_meta)):
LOG.debug("Registered image volume location to glance "
"image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
else:
self.driver.copy_volume_to_image(context, volume,
image_service, image_meta)
LOG.debug("Uploaded volume to glance image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
except Exception as error:
LOG.error(_LE("Upload volume to image encountered an error "
"(image-id: %(image_id)s)."),
{'image_id': image_meta['id']},
resource=volume)
if image_service is not None:
# Deletes the image if it is in queued or saving state
self._delete_image(context, image_meta['id'], image_service)
with excutils.save_and_reraise_exception():
payload['message'] = six.text_type(error)
if isinstance(error, exception.ImageLimitExceeded):
self.message_api.create(
context, defined_messages.IMAGE_FROM_VOLUME_OVER_QUOTA,
context.project_id,
resource_type=resource_types.VOLUME,
resource_uuid=volume_id)
finally:
self.db.volume_update_status_based_on_attachment(context,
volume_id)
LOG.info(_LI("Copy volume to image completed successfully."),
resource=volume)
def _delete_image(self, context, image_id, image_service):
"""Deletes an image stuck in queued or saving state."""
try:
image_meta = image_service.show(context, image_id)
image_status = image_meta.get('status')
if image_status == 'queued' or image_status == 'saving':
LOG.warning(_LW("Deleting image in unexpected status: "
"%(image_status)s."),
{'image_status': image_status},
resource={'type': 'image', 'id': image_id})
image_service.delete(context, image_id)
except Exception:
LOG.warning(_LW("Image delete encountered an error."),
exc_info=True, resource={'type': 'image',
'id': image_id})
def initialize_connection(self, context, volume_id, connector):
"""Prepare volume for connection from host represented by connector.
This method calls the driver initialize_connection and returns
it to the caller. The connector parameter is a dictionary with
information about the host that will connect to the volume in the
following format::
{
'ip': ip,
'initiator': initiator,
}
ip: the ip address of the connecting machine
initiator: the iscsi initiator name of the connecting machine.
This can be None if the connecting machine does not support iscsi
connections.
driver is responsible for doing any necessary security setup and
returning a connection_info dictionary in the following format::
{
'driver_volume_type': driver_volume_type,
'data': data,
}
driver_volume_type: a string to identify the type of volume. This
can be used by the calling code to determine the
strategy for connecting to the volume. This could
be 'iscsi', 'rbd', 'sheepdog', etc.
data: this is the data that the calling code will use to connect
to the volume. Keep in mind that this will be serialized to
json in various places, so it should not contain any non-json
data types.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume = self.db.volume_get(context, volume_id)
model_update = None
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate volume connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
model_update = self.driver.create_export(context.elevated(),
volume, connector)
except exception.CinderException:
err_msg = (_("Create export for volume failed."))
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
if model_update:
volume = self.db.volume_update(context,
volume_id,
model_update)
except exception.CinderException as ex:
LOG.exception(_LE("Model update failed."), resource=volume)
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn_info = self.driver.initialize_connection(volume, connector)
except Exception as err:
err_msg = (_("Driver initialize connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
self.driver.remove_export(context.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
# Add qos_specs to connection info
typeid = volume['volume_type_id']
specs = None
if typeid:
res = volume_types.get_volume_type_qos_specs(typeid)
qos = res['qos_specs']
# only pass qos_specs that is designated to be consumed by
# front-end, or both front-end and back-end.
if qos and qos.get('consumer') in ['front-end', 'both']:
specs = qos.get('specs')
qos_spec = dict(qos_specs=specs)
conn_info['data'].update(qos_spec)
# Add access_mode to connection info
volume_metadata = self.db.volume_admin_metadata_get(context.elevated(),
volume_id)
access_mode = volume_metadata.get('attached_mode')
if access_mode is None:
# NOTE(zhiyan): client didn't call 'os-attach' before
access_mode = ('ro'
if volume_metadata.get('readonly') == 'True'
else 'rw')
conn_info['data']['access_mode'] = access_mode
# Add encrypted flag to connection_info if not set in the driver.
if conn_info['data'].get('encrypted') is None:
encrypted = bool(volume.get('encryption_key_id'))
conn_info['data']['encrypted'] = encrypted
# Add discard flag to connection_info if not set in the driver and
# configured to be reported.
if conn_info['data'].get('discard') is None:
discard_supported = (self.driver.configuration
.safe_get('report_discard_supported'))
if discard_supported:
conn_info['data']['discard'] = True
LOG.info(_LI("Initialize volume connection completed successfully."),
resource=volume)
return conn_info
def terminate_connection(self, context, volume_id, connector, force=False):
"""Cleanup connection from host represented by connector.
The format of connector is the same as for initialize_connection.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.terminate_connection(volume_ref, connector,
force=force)
except Exception as err:
err_msg = (_('Terminate volume connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info(_LI("Terminate volume connection completed successfully."),
resource=volume_ref)
def remove_export(self, context, volume_id):
"""Removes an export for a volume."""
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.remove_export(context, volume_ref)
except Exception:
msg = _("Remove volume export failed.")
LOG.exception(msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("Remove volume export completed successfully."),
resource=volume_ref)
def accept_transfer(self, context, volume_id, new_user, new_project):
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
# NOTE(jdg): need elevated context as we haven't "given" the vol
# yet
volume_ref = self.db.volume_get(context.elevated(), volume_id)
# NOTE(jdg): Some drivers tie provider info (CHAP) to tenant
# for those that do allow them to return updated model info
model_update = self.driver.accept_transfer(context,
volume_ref,
new_user,
new_project)
if model_update:
try:
self.db.volume_update(context.elevated(),
volume_id,
model_update)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Update volume model for "
"transfer operation failed."),
resource=volume_ref)
self.db.volume_update(context.elevated(),
volume_id,
{'status': 'error'})
LOG.info(_LI("Transfer volume completed successfully."),
resource=volume_ref)
return model_update
def _connect_device(self, conn):
use_multipath = self.configuration.use_multipath_for_image_xfer
device_scan_attempts = self.configuration.num_volume_device_scan_tries
protocol = conn['driver_volume_type']
connector = utils.brick_get_connector(
protocol,
use_multipath=use_multipath,
device_scan_attempts=device_scan_attempts,
conn=conn)
vol_handle = connector.connect_volume(conn['data'])
root_access = True
if not connector.check_valid_device(vol_handle['path'], root_access):
if isinstance(vol_handle['path'], six.string_types):
raise exception.DeviceUnavailable(
path=vol_handle['path'],
reason=(_("Unable to access the backend storage via the "
"path %(path)s.") %
{'path': vol_handle['path']}))
else:
raise exception.DeviceUnavailable(
path=None,
reason=(_("Unable to access the backend storage via file "
"handle.")))
return {'conn': conn, 'device': vol_handle, 'connector': connector}
def _attach_volume(self, ctxt, volume, properties, remote=False):
status = volume['status']
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
try:
conn = rpcapi.initialize_connection(ctxt, volume, properties)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach volume %(vol)s."),
{'vol': volume['id']})
self.db.volume_update(ctxt, volume['id'],
{'status': status})
else:
conn = self.initialize_connection(ctxt, volume['id'], properties)
return self._connect_device(conn)
def _detach_volume(self, ctxt, attach_info, volume, properties,
force=False, remote=False):
connector = attach_info['connector']
connector.disconnect_volume(attach_info['conn']['data'],
attach_info['device'])
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.terminate_connection(ctxt, volume, properties, force=force)
rpcapi.remove_export(ctxt, volume)
else:
try:
self.terminate_connection(ctxt, volume['id'], properties,
force=force)
self.remove_export(ctxt, volume['id'])
except Exception as err:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Unable to terminate volume connection: '
'%(err)s.') % {'err': err})
def _copy_volume_data(self, ctxt, src_vol, dest_vol, remote=None):
"""Copy data from src_vol to dest_vol."""
LOG.debug('copy_data_between_volumes %(src)s -> %(dest)s.',
{'src': src_vol['name'], 'dest': dest_vol['name']})
properties = utils.brick_get_connector_properties()
dest_remote = remote in ['dest', 'both']
dest_attach_info = self._attach_volume(ctxt, dest_vol, properties,
remote=dest_remote)
try:
src_remote = remote in ['src', 'both']
src_attach_info = self._attach_volume(ctxt, src_vol, properties,
remote=src_remote)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach source volume for copy."))
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, remote=dest_remote)
# Check the backend capabilities of migration destination host.
rpcapi = volume_rpcapi.VolumeAPI()
capabilities = rpcapi.get_capabilities(ctxt, dest_vol['host'],
False)
sparse_copy_volume = bool(capabilities and
capabilities.get('sparse_copy_volume',
False))
copy_error = True
try:
size_in_mb = int(src_vol['size']) * units.Ki # vol size is in GB
vol_utils.copy_volume(src_attach_info['device']['path'],
dest_attach_info['device']['path'],
size_in_mb,
self.configuration.volume_dd_blocksize,
sparse=sparse_copy_volume)
copy_error = False
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to copy volume %(src)s to %(dest)s."),
{'src': src_vol['id'], 'dest': dest_vol['id']})
finally:
try:
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, force=copy_error,
remote=dest_remote)
finally:
self._detach_volume(ctxt, src_attach_info, src_vol,
properties, force=copy_error,
remote=src_remote)
def _migrate_volume_generic(self, ctxt, volume, host, new_type_id):
rpcapi = volume_rpcapi.VolumeAPI()
# Create new volume on remote host
skip = self._VOLUME_CLONE_SKIP_PROPERTIES | {'host'}
new_vol_values = {k: volume[k] for k in set(volume.keys()) - skip}
if new_type_id:
new_vol_values['volume_type_id'] = new_type_id
new_volume = objects.Volume(
context=ctxt,
host=host['host'],
status='creating',
attach_status='detached',
migration_status='target:%s' % volume['id'],
**new_vol_values
)
new_volume.create()
rpcapi.create_volume(ctxt, new_volume, host['host'],
None, None, allow_reschedule=False)
# Wait for new_volume to become ready
starttime = time.time()
deadline = starttime + CONF.migration_create_volume_timeout_secs
# TODO(thangp): Replace get_by_id with refresh when it is available
new_volume = objects.Volume.get_by_id(ctxt, new_volume.id)
tries = 0
while new_volume.status != 'available':
tries += 1
now = time.time()
if new_volume.status == 'error':
msg = _("failed to create new_volume on destination host")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
elif now > deadline:
msg = _("timeout creating new_volume on destination host")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
else:
time.sleep(tries ** 2)
# TODO(thangp): Replace get_by_id with refresh when it is
# available
new_volume = objects.Volume.get_by_id(ctxt, new_volume.id)
# Copy the source volume to the destination volume
try:
attachments = volume.volume_attachment
if not attachments:
# Pre- and post-copy driver-specific actions
self.driver.before_volume_copy(ctxt, volume, new_volume,
remote='dest')
self._copy_volume_data(ctxt, volume, new_volume, remote='dest')
self.driver.after_volume_copy(ctxt, volume, new_volume,
remote='dest')
# The above call is synchronous so we complete the migration
self.migrate_volume_completion(ctxt, volume.id,
new_volume.id,
error=False,
volume=volume,
new_volume=new_volume)
else:
nova_api = compute.API()
# This is an async call to Nova, which will call the completion
# when it's done
for attachment in attachments:
instance_uuid = attachment['instance_uuid']
nova_api.update_server_volume(ctxt, instance_uuid,
volume.id,
new_volume.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE(
"Failed to copy volume %(vol1)s to %(vol2)s"), {
'vol1': volume.id, 'vol2': new_volume.id})
self._clean_temporary_volume(ctxt, volume,
new_volume)
def _clean_temporary_volume(self, ctxt, volume, new_volume,
clean_db_only=False):
# If we're in the migrating phase, we need to cleanup
# destination volume because source volume is remaining
if volume.migration_status == 'migrating':
try:
if clean_db_only:
# The temporary volume is not created, only DB data
# is created
new_volume.destroy()
else:
# The temporary volume is already created
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.delete_volume(ctxt, new_volume)
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find the temporary volume "
"%(vol)s in the database. There is no need "
"to clean up this volume."),
{'vol': new_volume.id})
else:
# If we're in the completing phase don't delete the
# destination because we may have already deleted the
# source! But the migration_status in database should
# be cleared to handle volume after migration failure
try:
new_volume.migration_status = None
new_volume.save()
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find destination volume "
"%(vol)s in the database. The entry might be "
"successfully deleted during migration "
"completion phase."),
{'vol': new_volume.id})
LOG.warning(_LW("Failed to migrate volume. The destination "
"volume %(vol)s is not deleted since the "
"source volume may have been deleted."),
{'vol': new_volume.id})
def migrate_volume_completion(self, ctxt, volume_id, new_volume_id,
error=False, volume=None, new_volume=None):
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None or new_volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(ctxt, volume_id)
new_volume = objects.Volume.get_by_id(ctxt, new_volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
LOG.debug("migrate_volume_completion: completing migration for "
"volume %(vol1)s (temporary volume %(vol2)s",
{'vol1': volume.id, 'vol2': new_volume.id})
rpcapi = volume_rpcapi.VolumeAPI()
orig_volume_status = volume.previous_status
if error:
LOG.info(_LI("migrate_volume_completion is cleaning up an error "
"for volume %(vol1)s (temporary volume %(vol2)s"),
{'vol1': volume['id'], 'vol2': new_volume.id})
rpcapi.delete_volume(ctxt, new_volume)
updates = {'migration_status': 'error',
'status': orig_volume_status}
volume.update(updates)
volume.save()
return volume.id
volume.migration_status = 'completing'
volume.save()
# Detach the source volume (if it fails, don't fail the migration)
try:
if orig_volume_status == 'in-use':
attachments = volume.volume_attachment
for attachment in attachments:
self.detach_volume(ctxt, volume.id, attachment['id'])
except Exception as ex:
LOG.error(_LE("Detach migration source volume failed: %(err)s"),
{'err': ex}, resource=volume)
# Give driver (new_volume) a chance to update things as needed
# after a successful migration.
# Note this needs to go through rpc to the host of the new volume
# the current host and driver object is for the "existing" volume.
rpcapi.update_migrated_volume(ctxt, volume, new_volume,
orig_volume_status)
volume.refresh()
new_volume.refresh()
# Swap src and dest DB records so we can continue using the src id and
# asynchronously delete the destination id
updated_new = volume.finish_volume_migration(new_volume)
updates = {'status': orig_volume_status,
'previous_status': volume.status,
'migration_status': 'success'}
if orig_volume_status == 'in-use':
attachments = volume.volume_attachment
for attachment in attachments:
rpcapi.attach_volume(ctxt, volume,
attachment['instance_uuid'],
attachment['attached_host'],
attachment['mountpoint'],
'rw')
volume.update(updates)
volume.save()
# Asynchronous deletion of the source volume in the back-end (now
# pointed by the target volume id)
try:
rpcapi.delete_volume(ctxt, updated_new)
except Exception as ex:
LOG.error(_LE('Failed to request async delete of migration source '
'vol %(vol)s: %(err)s'),
{'vol': volume.id, 'err': ex})
LOG.info(_LI("Complete-Migrate volume completed successfully."),
resource=volume)
return volume.id
def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False,
new_type_id=None, volume=None):
"""Migrate the volume to the specified host (called on source host)."""
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(ctxt, volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
model_update = None
moved = False
status_update = None
if volume.status in ('retyping', 'maintenance'):
status_update = {'status': volume.previous_status}
volume.migration_status = 'migrating'
volume.save()
if not force_host_copy and new_type_id is None:
try:
LOG.debug("Issue driver.migrate_volume.", resource=volume)
moved, model_update = self.driver.migrate_volume(ctxt,
volume,
host)
if moved:
updates = {'host': host['host'],
'migration_status': 'success',
'previous_status': volume.status}
if status_update:
updates.update(status_update)
if model_update:
updates.update(model_update)
volume.update(updates)
volume.save()
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
if not moved:
try:
self._migrate_volume_generic(ctxt, volume, host,
new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
LOG.info(_LI("Migrate volume completed successfully."),
resource=volume)
@periodic_task.periodic_task
def _report_driver_status(self, context):
if not self.driver.initialized:
if self.driver.configuration.config_group is None:
config_group = ''
else:
config_group = ('(config name %s)' %
self.driver.configuration.config_group)
LOG.warning(_LW("Update driver status failed: %(config_group)s "
"is uninitialized."),
{'config_group': config_group},
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
else:
volume_stats = self.driver.get_volume_stats(refresh=True)
if self.extra_capabilities:
volume_stats.update(self.extra_capabilities)
if volume_stats:
# Append volume stats with 'allocated_capacity_gb'
self._append_volume_stats(volume_stats)
# Append filter and goodness function if needed
volume_stats = (
self._append_filter_goodness_functions(volume_stats))
# queue it to be sent to the Schedulers.
self.update_service_capabilities(volume_stats)
def _append_volume_stats(self, vol_stats):
pools = vol_stats.get('pools', None)
if pools and isinstance(pools, list):
for pool in pools:
pool_name = pool['pool_name']
try:
pool_stats = self.stats['pools'][pool_name]
except KeyError:
# Pool not found in volume manager
pool_stats = dict(allocated_capacity_gb=0)
pool.update(pool_stats)
def _append_filter_goodness_functions(self, volume_stats):
"""Returns volume_stats updated as needed."""
# Append filter_function if needed
if 'filter_function' not in volume_stats:
volume_stats['filter_function'] = (
self.driver.get_filter_function())
# Append goodness_function if needed
if 'goodness_function' not in volume_stats:
volume_stats['goodness_function'] = (
self.driver.get_goodness_function())
return volume_stats
def publish_service_capabilities(self, context):
"""Collect driver status and then publish."""
self._report_driver_status(context)
self._publish_service_capabilities(context)
def _notify_about_volume_usage(self,
context,
volume,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_snapshot_usage(self,
context,
snapshot,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_consistencygroup_usage(self,
context,
group,
event_suffix,
volumes=None,
extra_usage_info=None):
vol_utils.notify_about_consistencygroup_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not volumes:
volumes = self.db.volume_get_all_by_group(context, group.id)
if volumes:
for volume in volumes:
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_cgsnapshot_usage(self,
context,
cgsnapshot,
event_suffix,
snapshots=None,
extra_usage_info=None):
vol_utils.notify_about_cgsnapshot_usage(
context, cgsnapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not snapshots:
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
if snapshots:
for snapshot in snapshots:
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def extend_volume(self, context, volume_id, new_size, reservations,
volume=None):
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.status = 'error_extending'
volume.save()
project_id = volume.project_id
size_increase = (int(new_size)) - volume.size
self._notify_about_volume_usage(context, volume, "resize.start")
try:
self.driver.extend_volume(volume, new_size)
except Exception:
LOG.exception(_LE("Extend volume failed."),
resource=volume)
try:
self.db.volume_update(context, volume.id,
{'status': 'error_extending'})
raise exception.CinderException(_("Volume %s: Error trying "
"to extend volume") %
volume.id)
finally:
QUOTAS.rollback(context, reservations, project_id=project_id)
return
QUOTAS.commit(context, reservations, project_id=project_id)
volume.update({'size': int(new_size), 'status': 'available'})
volume.save()
pool = vol_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume.host, 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += size_increase
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=size_increase)
self._notify_about_volume_usage(
context, volume, "resize.end",
extra_usage_info={'size': int(new_size)})
LOG.info(_LI("Extend volume completed successfully."),
resource=volume)
def retype(self, ctxt, volume_id, new_type_id, host,
migration_policy='never', reservations=None,
volume=None, old_reservations=None):
def _retype_error(context, volume, old_reservations,
new_reservations, status_update):
try:
volume.update(status_update)
volume.save()
finally:
QUOTAS.rollback(context, old_reservations)
QUOTAS.rollback(context, new_reservations)
context = ctxt.elevated()
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
status_update = {'status': volume.previous_status}
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
# NOTE(flaper87): Other exceptions in this method don't
# set the volume status to error. Should that be done
# here? Setting the volume back to it's original status
# for now.
volume.update(status_update)
volume.save()
# If old_reservations has been passed in from the API, we should
# skip quotas.
# TODO(ntpttr): These reservation checks are left in to be backwards
# compatible with Liberty and can be removed in N.
if not old_reservations:
# Get old reservations
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
# NOTE(wanghao): We don't need to reserve volumes and gigabytes
# quota for retyping operation since they didn't changed, just
# reserve volume_type and type gigabytes is fine.
reserve_opts.pop('volumes')
reserve_opts.pop('gigabytes')
old_reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
volume.update(status_update)
volume.save()
msg = _("Failed to update quota usage while retyping volume.")
LOG.exception(msg, resource=volume)
raise exception.CinderException(msg)
# We already got the new reservations
new_reservations = reservations
# If volume types have the same contents, no need to do anything
retyped = False
diff, all_equal = volume_types.volume_types_diff(
context, volume.volume_type_id, new_type_id)
if all_equal:
retyped = True
# Call driver to try and change the type
retype_model_update = None
# NOTE(jdg): Check to see if the destination host is the same
# as the current. If it's not don't call the driver.retype
# method, otherwise drivers that implement retype may report
# success, but it's invalid in the case of a migrate.
# We assume that those that support pools do this internally
# so we strip off the pools designation
if (not retyped and
vol_utils.hosts_are_equivalent(self.driver.host,
host['host'])):
try:
new_type = volume_types.get_volume_type(context, new_type_id)
ret = self.driver.retype(context,
volume,
new_type,
diff,
host)
# Check if the driver retype provided a model update or
# just a retype indication
if type(ret) == tuple:
retyped, retype_model_update = ret
else:
retyped = ret
if retyped:
LOG.info(_LI("Volume %s: retyped successfully"), volume.id)
except Exception:
retyped = False
LOG.exception(_LE("Volume %s: driver error when trying to "
"retype, falling back to generic "
"mechanism."), volume.id)
# We could not change the type, so we need to migrate the volume, where
# the destination volume will be of the new type
if not retyped:
if migration_policy == 'never':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Retype requires migration but is not allowed.")
raise exception.VolumeMigrationFailed(reason=msg)
snaps = objects.SnapshotList.get_all_for_volume(context,
volume.id)
if snaps:
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not have snapshots.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Don't allow volume with replicas to be migrated
rep_status = volume.replication_status
if rep_status is not None and rep_status != 'disabled':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not be replicated.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
volume.migration_status = 'starting'
volume.save()
try:
self.migrate_volume(context, volume.id, host,
new_type_id=new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
else:
model_update = {'volume_type_id': new_type_id,
'host': host['host'],
'status': status_update['status']}
if retype_model_update:
model_update.update(retype_model_update)
volume.update(model_update)
volume.save()
if old_reservations:
QUOTAS.commit(context, old_reservations, project_id=project_id)
if new_reservations:
QUOTAS.commit(context, new_reservations, project_id=project_id)
self._notify_about_volume_usage(
context, volume, "retype",
extra_usage_info={'volume_type': new_type_id})
self.publish_service_capabilities(context)
LOG.info(_LI("Retype volume completed successfully."),
resource=volume)
def manage_existing(self, ctxt, volume_id, ref=None):
try:
flow_engine = manage_existing.get_flow(
ctxt,
self.db,
self.driver,
self.host,
volume_id,
ref)
except Exception:
msg = _("Failed to create manage_existing flow.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume_id})
raise exception.CinderException(msg)
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# Fetch created volume from storage
vol_ref = flow_engine.storage.fetch('volume')
# Update volume stats
pool = vol_utils.extract_host(vol_ref['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
vol_ref['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] \
+= vol_ref['size']
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=vol_ref['size'])
LOG.info(_LI("Manage existing volume completed successfully."),
resource=vol_ref)
return vol_ref['id']
def promote_replica(self, ctxt, volume_id):
"""Promote volume replica secondary to be the primary volume."""
volume = self.db.volume_get(ctxt, volume_id)
model_update = None
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Promote volume replica failed."),
resource=volume)
try:
model_update = self.driver.promote_replica(ctxt, volume)
except exception.CinderException:
err_msg = (_('Error promoting secondary volume to primary'))
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
try:
if model_update:
volume = self.db.volume_update(ctxt,
volume_id,
model_update)
except exception.CinderException:
err_msg = (_("Failed updating model"
" with driver provided model %(model)s") %
{'model': model_update})
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
LOG.info(_LI("Promote volume replica completed successfully."),
resource=volume)
def reenable_replication(self, ctxt, volume_id):
"""Re-enable replication of secondary volume with primary volumes."""
volume = self.db.volume_get(ctxt, volume_id)
model_update = None
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Sync volume replica failed."),
resource=volume)
try:
model_update = self.driver.reenable_replication(ctxt, volume)
except exception.CinderException:
err_msg = (_("Synchronizing secondary volume to primary failed."))
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
try:
if model_update:
volume = self.db.volume_update(ctxt,
volume_id,
model_update)
except exception.CinderException:
err_msg = (_("Failed updating model"
" with driver provided model %(model)s") %
{'model': model_update})
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
def _update_replication_relationship_status(self, ctxt):
# Only want volumes that do not have a 'disabled' replication status
filters = {'replication_status': ['active', 'copying', 'error',
'active-stopped', 'inactive']}
volumes = self.db.volume_get_all_by_host(ctxt, self.host,
filters=filters)
for vol in volumes:
model_update = None
try:
model_update = self.driver.get_replication_status(
ctxt, vol)
if model_update:
self.db.volume_update(ctxt, vol['id'], model_update)
except Exception:
LOG.exception(_LE("Get replication status for volume failed."),
resource=vol)
def create_consistencygroup(self, context, group):
"""Creates the consistency group."""
context = context.elevated()
status = fields.ConsistencyGroupStatus.AVAILABLE
model_update = None
self._notify_about_consistencygroup_usage(
context, group, "create.start")
try:
utils.require_driver_initialized(self.driver)
LOG.info(_LI("Consistency group %s: creating"), group.name)
model_update = self.driver.create_consistencygroup(context,
group)
if model_update:
if (model_update['status'] ==
fields.ConsistencyGroupStatus.ERROR):
msg = (_('Create consistency group failed.'))
LOG.error(msg,
resource={'type': 'consistency_group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = fields.ConsistencyGroupStatus.ERROR
group.save()
LOG.error(_LE("Consistency group %s: create failed"),
group.name)
group.status = status
group.created_at = timeutils.utcnow()
group.save()
LOG.info(_LI("Consistency group %s: created successfully"),
group.name)
self._notify_about_consistencygroup_usage(
context, group, "create.end")
LOG.info(_LI("Create consistency group completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
return group
def create_consistencygroup_from_src(self, context, group,
cgsnapshot=None, source_cg=None):
"""Creates the consistency group from source.
The source can be a CG snapshot or a source CG.
"""
source_name = None
snapshots = None
source_vols = None
try:
volumes = self.db.volume_get_all_by_group(context, group.id)
if cgsnapshot:
try:
# Check if cgsnapshot still exists
cgsnapshot = objects.CGSnapshot.get_by_id(
context, cgsnapshot.id)
except exception.CgSnapshotNotFound:
LOG.error(_LE("Create consistency group "
"from snapshot-%(snap)s failed: "
"SnapshotNotFound."),
{'snap': cgsnapshot.id},
resource={'type': 'consistency_group',
'id': group.id})
raise
source_name = _("snapshot-%s") % cgsnapshot.id
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
for snap in snapshots:
if (snap.status not in
VALID_CREATE_CG_SRC_SNAP_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because snapshot %(snap)s is "
"not in a valid state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'snap': snap['id'],
'valid': VALID_CREATE_CG_SRC_SNAP_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
if source_cg:
try:
source_cg = objects.ConsistencyGroup.get_by_id(
context, source_cg.id)
except exception.ConsistencyGroupNotFound:
LOG.error(_LE("Create consistency group "
"from source cg-%(cg)s failed: "
"ConsistencyGroupNotFound."),
{'cg': source_cg.id},
resource={'type': 'consistency_group',
'id': group.id})
raise
source_name = _("cg-%s") % source_cg.id
source_vols = self.db.volume_get_all_by_group(
context, source_cg.id)
for source_vol in source_vols:
if (source_vol['status'] not in
VALID_CREATE_CG_SRC_CG_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because source volume "
"%(source_vol)s is not in a valid "
"state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'source_vol': source_vol['id'],
'valid': VALID_CREATE_CG_SRC_CG_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes.
sorted_snapshots = None
if cgsnapshot and snapshots:
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
# Sort source volumes so that they are in the same order as their
# corresponding target volumes.
sorted_source_vols = None
if source_cg and source_vols:
sorted_source_vols = self._sort_source_vols(volumes,
source_vols)
self._notify_about_consistencygroup_usage(
context, group, "create.start")
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
context, group, volumes, cgsnapshot,
sorted_snapshots, source_cg, sorted_source_vols))
if volumes_model_update:
for update in volumes_model_update:
self.db.volume_update(context, update['id'], update)
if model_update:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
LOG.error(_LE("Create consistency group "
"from source %(source)s failed."),
{'source': source_name},
resource={'type': 'consistency_group',
'id': group.id})
# Update volume status to 'error' as well.
for vol in volumes:
self.db.volume_update(
context, vol['id'], {'status': 'error'})
now = timeutils.utcnow()
status = 'available'
for vol in volumes:
update = {'status': status, 'created_at': now}
self._update_volume_from_src(context, vol, update, group=group)
self._update_allocated_capacity(vol)
group.status = status
group.created_at = now
group.save()
self._notify_about_consistencygroup_usage(
context, group, "create.end")
LOG.info(_LI("Create consistency group "
"from source-%(source)s completed successfully."),
{'source': source_name},
resource={'type': 'consistency_group',
'id': group.id})
return group
def _sort_snapshots(self, volumes, snapshots):
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes. Each source snapshot in the snapshots
# list should have a corresponding target volume in the volumes list.
if not volumes or not snapshots or len(volumes) != len(snapshots):
msg = _("Input volumes or snapshots are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_snapshots = []
for vol in volumes:
found_snaps = [snap for snap in snapshots
if snap['id'] == vol['snapshot_id']]
if not found_snaps:
LOG.error(_LE("Source snapshot cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.SnapshotNotFound(
snapshot_id=vol['snapshot_id'])
sorted_snapshots.extend(found_snaps)
return sorted_snapshots
def _sort_source_vols(self, volumes, source_vols):
# Sort source volumes so that they are in the same order as their
# corresponding target volumes. Each source volume in the source_vols
# list should have a corresponding target volume in the volumes list.
if not volumes or not source_vols or len(volumes) != len(source_vols):
msg = _("Input volumes or source volumes are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_source_vols = []
for vol in volumes:
found_source_vols = [source_vol for source_vol in source_vols
if source_vol['id'] == vol['source_volid']]
if not found_source_vols:
LOG.error(_LE("Source volumes cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.VolumeNotFound(
volume_id=vol['source_volid'])
sorted_source_vols.extend(found_source_vols)
return sorted_source_vols
def _update_volume_from_src(self, context, vol, update, group=None):
try:
snapshot_id = vol.get('snapshot_id')
if snapshot_id:
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
orig_vref = self.db.volume_get(context,
snapshot.volume_id)
if orig_vref.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_to_volume(
context, vol['id'], snapshot_id)
except exception.SnapshotNotFound:
LOG.error(_LE("Source snapshot %(snapshot_id)s cannot be found."),
{'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise
except exception.VolumeNotFound:
LOG.error(_LE("The source volume %(volume_id)s "
"cannot be found."),
{'volume_id': snapshot.volume_id})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise
except exception.CinderException as ex:
LOG.error(_LE("Failed to update %(volume_id)s"
" metadata using the provided snapshot"
" %(snapshot_id)s metadata."),
{'volume_id': vol['id'],
'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
self.db.volume_update(context, vol['id'], update)
def _update_allocated_capacity(self, vol):
# Update allocated capacity in volume stats
pool = vol_utils.extract_host(vol['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
vol['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += (
vol['size'])
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=vol['size'])
def delete_consistencygroup(self, context, group):
"""Deletes consistency group and the volumes in the group."""
context = context.elevated()
project_id = group.project_id
if context.project_id != group.project_id:
project_id = group.project_id
else:
project_id = context.project_id
volumes = self.db.volume_get_all_by_group(context, group.id)
for volume_ref in volumes:
if volume_ref['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_ref['id'])
# self.host is 'host@backend'
# volume_ref['host'] is 'host@backend#pool'
# Extract host before doing comparison
if volume_ref['host']:
new_host = vol_utils.extract_host(volume_ref['host'])
if new_host != self.host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node"))
self._notify_about_consistencygroup_usage(
context, group, "delete.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.delete_consistencygroup(context, group, volumes))
if volumes_model_update:
for volume in volumes_model_update:
update = {'status': volume['status']}
self.db.volume_update(context, volume['id'],
update)
# If we failed to delete a volume, make sure the status
# for the cg is set to error as well
if (volume['status'] in ['error_deleting', 'error'] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = volume['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Delete consistency group failed.'))
LOG.error(msg,
resource={'type': 'consistency_group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
# Update volume status to 'error' if driver returns
# None for volumes_model_update.
if not volumes_model_update:
for vol in volumes:
self.db.volume_update(
context, vol['id'], {'status': 'error'})
# Get reservations for group
try:
reserve_opts = {'consistencygroups': -1}
cgreservations = CGQUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
cgreservations = None
LOG.exception(_LE("Delete consistency group "
"failed to update usages."),
resource={'type': 'consistency_group',
'id': group.id})
for volume_ref in volumes:
# Get reservations for volume
try:
volume_id = volume_ref['id']
reserve_opts = {'volumes': -1,
'gigabytes': -volume_ref['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Delete consistency group "
"failed to update usages."),
resource={'type': 'consistency_group',
'id': group.id})
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
self.db.volume_destroy(context, volume_id)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= volume_ref['size']
if cgreservations:
CGQUOTAS.commit(context, cgreservations,
project_id=project_id)
group.destroy()
self._notify_about_consistencygroup_usage(
context, group, "delete.end", volumes)
self.publish_service_capabilities(context)
LOG.info(_LI("Delete consistency group "
"completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates consistency group.
Update consistency group by adding volumes to the group,
or removing volumes from the group.
"""
add_volumes_ref = []
remove_volumes_ref = []
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes_list = remove_volumes.split(',')
for add_vol in add_volumes_list:
try:
add_vol_ref = self.db.volume_get(context, add_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update consistency group "
"failed to add volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': add_vol_ref['id']},
resource={'type': 'consistency_group',
'id': group.id})
raise
if add_vol_ref['status'] not in VALID_ADD_VOL_TO_CG_STATUS:
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id,
'status': add_vol_ref['status'],
'valid': VALID_ADD_VOL_TO_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
# self.host is 'host@backend'
# volume_ref['host'] is 'host@backend#pool'
# Extract host before doing comparison
new_host = vol_utils.extract_host(add_vol_ref['host'])
if new_host != self.host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node."))
add_volumes_ref.append(add_vol_ref)
for remove_vol in remove_volumes_list:
try:
remove_vol_ref = self.db.volume_get(context, remove_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update consistency group "
"failed to remove volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': remove_vol_ref['id']},
resource={'type': 'consistency_group',
'id': group.id})
raise
if remove_vol_ref['status'] not in VALID_REMOVE_VOL_FROM_CG_STATUS:
msg = (_("Cannot remove volume %(volume_id)s from consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': remove_vol_ref['id'],
'group_id': group.id,
'status': remove_vol_ref['status'],
'valid': VALID_REMOVE_VOL_FROM_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
remove_volumes_ref.append(remove_vol_ref)
self._notify_about_consistencygroup_usage(
context, group, "update.start")
try:
utils.require_driver_initialized(self.driver)
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
if add_volumes_update:
for update in add_volumes_update:
self.db.volume_update(context, update['id'], update)
if remove_volumes_update:
for update in remove_volumes_update:
self.db.volume_update(context, update['id'], update)
if model_update:
if model_update['status'] in (
[fields.ConsistencyGroupStatus.ERROR]):
msg = (_('Error occurred when updating consistency group '
'%s.') % group.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group.update(model_update)
group.save()
except exception.VolumeDriverException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred in the volume driver when "
"updating consistency group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when updating consistency "
"group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
now = timeutils.utcnow()
group.status = 'available'
group.update_at = now
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'consistencygroup_id': group.id,
'updated_at': now})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'consistencygroup_id': None,
'updated_at': now})
self._notify_about_consistencygroup_usage(
context, group, "update.end")
LOG.info(_LI("Update consistency group "
"completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
def create_cgsnapshot(self, context, cgsnapshot):
"""Creates the cgsnapshot."""
caller_context = context
context = context.elevated()
LOG.info(_LI("Cgsnapshot %s: creating."), cgsnapshot.id)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "create.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Cgsnapshot %(cgsnap_id)s: creating.",
{'cgsnap_id': cgsnapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
model_update, snapshots_model_update = (
self.driver.create_cgsnapshot(context, cgsnapshot,
snapshots))
if snapshots_model_update:
for snap_model in snapshots_model_update:
# Update db for snapshot.
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
self.db.snapshot_update(context,
snap_model['id'],
snap_model)
if (snap_model['status'] in [
fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] == 'error':
msg = (_('Error occurred when creating cgsnapshot '
'%s.') % cgsnapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
cgsnapshot.update(model_update)
cgsnapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
cgsnapshot.status = 'error'
cgsnapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
volume_id = snapshot['volume_id']
snapshot_id = snapshot['id']
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot_id, volume_id)
except exception.CinderException as ex:
LOG.error(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
# TODO(thangp): Switch over to use snapshot.update()
# after cgsnapshot-objects bugs are fixed
self.db.snapshot_update(
context, snapshot_id, {
'status': fields.SnapshotStatus.ERROR})
raise exception.MetadataCopyFailure(
reason=six.text_type(ex))
self.db.snapshot_update(context,
snapshot['id'],
{'status': fields.SnapshotStatus.AVAILABLE,
'progress': '100%'})
cgsnapshot.status = 'available'
cgsnapshot.save()
LOG.info(_LI("cgsnapshot %s: created successfully"),
cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "create.end")
return cgsnapshot
def delete_cgsnapshot(self, context, cgsnapshot):
"""Deletes cgsnapshot."""
caller_context = context
context = context.elevated()
project_id = cgsnapshot.project_id
LOG.info(_LI("cgsnapshot %s: deleting"), cgsnapshot.id)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "delete.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("cgsnapshot %(cgsnap_id)s: deleting",
{'cgsnap_id': cgsnapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(context, cgsnapshot,
snapshots))
if snapshots_model_update:
for snap_model in snapshots_model_update:
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap = next((item for item in snapshots if
item.id == snap_model['id']), None)
if snap:
snap.status = snap_model['status']
snap.save()
if (snap_model['status'] in
[fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting cgsnapshot '
'%s.') % cgsnapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
cgsnapshot.update(model_update)
cgsnapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
cgsnapshot.status = 'error'
cgsnapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot['volume_size'],
}
volume_ref = self.db.volume_get(context, snapshot['volume_id'])
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot['id'])
# TODO(thangp): Switch over to use snapshot.destroy()
# after cgsnapshot-objects bugs are fixed
self.db.snapshot_destroy(context, snapshot['id'])
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
cgsnapshot.destroy()
LOG.info(_LI("cgsnapshot %s: deleted successfully"), cgsnapshot.id)
self._notify_about_cgsnapshot_usage(context, cgsnapshot, "delete.end",
snapshots)
def update_migrated_volume(self, ctxt, volume, new_volume, volume_status):
"""Finalize migration process on backend device."""
model_update = None
model_update_default = {'_name_id': new_volume.name_id,
'provider_location':
new_volume.provider_location}
try:
model_update = self.driver.update_migrated_volume(ctxt,
volume,
new_volume,
volume_status)
except NotImplementedError:
# If update_migrated_volume is not implemented for the driver,
# _name_id and provider_location will be set with the values
# from new_volume.
model_update = model_update_default
if model_update:
model_update_default.update(model_update)
# Swap keys that were changed in the source so we keep their values
# in the temporary volume's DB record.
# Need to convert 'metadata' and 'admin_metadata' since
# they are not keys of volume, their corresponding keys are
# 'volume_metadata' and 'volume_admin_metadata'.
model_update_new = dict()
for key in model_update:
if key == 'metadata':
if volume.get('volume_metadata'):
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_metadata}
elif key == 'admin_metadata':
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_admin_metadata}
else:
model_update_new[key] = volume[key]
with new_volume.obj_as_admin():
new_volume.update(model_update_new)
new_volume.save()
with volume.obj_as_admin():
volume.update(model_update_default)
volume.save()
# Replication V2.1 methods
def failover_host(self, context,
secondary_backend_id=None):
"""Failover a backend to a secondary replication target.
Instructs a replication capable/configured backend to failover
to one of it's secondary replication targets. host=None is
an acceptable input, and leaves it to the driver to failover
to the only configured target, or to choose a target on it's
own. All of the hosts volumes will be passed on to the driver
in order for it to determine the replicated volumes on the host,
if needed.
:param context: security context
:param secondary_backend_id: Specifies backend_id to fail over to
"""
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
'cinder-volume')
volumes = objects.VolumeList.get_all_by_host(context, self.host)
exception_encountered = False
try:
# expected form of volume_update_list:
# [{volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}},
# {volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}}]
(active_backend_id, volume_update_list) = (
self.driver.failover_host(
context,
volumes,
secondary_id=secondary_backend_id))
except exception.UnableToFailOver:
LOG.exception(_LE("Failed to perform replication failover"))
service.replication_status = (
fields.ReplicationStatus.FAILOVER_ERROR)
service.save()
exception_encountered = True
except exception.InvalidReplicationTarget:
LOG.exception(_LE("Invalid replication target specified "
"for failover"))
# Preserve the replication_status
if secondary_backend_id == "default":
service.replication_status = (
fields.ReplicationStatus.FAILED_OVER)
else:
service.replication_status = fields.ReplicationStatus.ENABLED
service.save()
exception_encountered = True
except exception.VolumeDriverException:
# NOTE(jdg): Drivers need to be aware if they fail during
# a failover sequence, we're expecting them to cleanup
# and make sure the driver state is such that the original
# backend is still set as primary as per driver memory
LOG.error(_LE("Driver reported error during "
"replication failover."))
service.status = 'error'
service.save()
exception_encountered = True
if exception_encountered:
LOG.error(
_LE("Error encountered during failover on host: "
"%(host)s invalid target ID %(backend_id)s"),
{'host': self.host, 'backend_id':
secondary_backend_id})
return
if secondary_backend_id == "default":
service.replication_status = fields.ReplicationStatus.ENABLED
service.active_backend_id = ""
if service.frozen:
service.disabled = True
service.disabled_reason = "frozen"
else:
service.disabled = False
service.disabled_reason = ""
service.save()
else:
service.replication_status = fields.ReplicationStatus.FAILED_OVER
service.active_backend_id = active_backend_id
service.disabled = True
service.disabled_reason = "failed-over"
service.save()
for update in volume_update_list:
# Response must include an id key: {volume_id: <cinder-uuid>}
if not update.get('volume_id'):
raise exception.UnableToFailOver(
reason=_("Update list, doesn't include volume_id"))
# Key things to consider (attaching failed-over volumes):
# provider_location
# provider_auth
# provider_id
# replication_status
vobj = objects.Volume.get_by_id(context, update['volume_id'])
vobj.update(update.get('updates', {}))
vobj.save()
LOG.info(_LI("Failed over to replication target successfully."))
def freeze_host(self, context):
"""Freeze management plane on this backend.
Basically puts the control/management plane into a
Read Only state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.freeze_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): In the case of freeze, we don't really
# need the backend's consent or anything, we'll just
# disable the service, so we can just log this and
# go about our business
LOG.warning(_LW('Error encountered on Cinder backend during '
'freeze operation, service is frozen, however '
'notification to driver has failed.'))
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
'cinder-volume')
service.disabled = True
service.disabled_reason = "frozen"
service.save()
LOG.info(_LI("Set backend status to frozen successfully."))
return True
def thaw_host(self, context):
"""UnFreeze management plane on this backend.
Basically puts the control/management plane back into
a normal state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.thaw_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): Thaw actually matters, if this call
# to the backend fails, we're stuck and can't re-enable
LOG.error(_LE('Error encountered on Cinder backend during '
'thaw operation, service will remain frozen.'))
return False
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
'cinder-volume')
service.disabled = False
service.disabled_reason = ""
service.save()
LOG.info(_LI("Thawed backend successfully."))
return True
def manage_existing_snapshot(self, ctxt, snapshot, ref=None):
LOG.debug('manage_existing_snapshot: managing %s.', ref)
try:
flow_engine = manage_existing_snapshot.get_flow(
ctxt,
self.db,
self.driver,
self.host,
snapshot.id,
ref)
except Exception:
msg = _LE("Failed to create manage_existing flow: "
"%(object_type)s %(object_id)s.")
LOG.exception(msg, {'object_type': 'snapshot',
'object_id': snapshot.id})
raise exception.CinderException(
_("Failed to create manage existing flow."))
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
return snapshot.id
def get_capabilities(self, context, discover):
"""Get capabilities of backend storage."""
if discover:
self.driver.init_capabilities()
capabilities = self.driver.capabilities
LOG.debug("Obtained capabilities list: %s.", capabilities)
return capabilities
def get_backup_device(self, ctxt, backup):
(backup_device, is_snapshot) = (
self.driver.get_backup_device(ctxt, backup))
secure_enabled = self.driver.secure_file_operations_enabled()
backup_device_dict = {'backup_device': backup_device,
'secure_enabled': secure_enabled,
'is_snapshot': is_snapshot, }
return backup_device_dict
def secure_file_operations_enabled(self, ctxt, volume):
secure_enabled = self.driver.secure_file_operations_enabled()
return secure_enabled
|
__reduce__
|
Provide pickling support. Normally, this just dispatches to Python's
standard handling. However, for models with deferred field loading, we
need to do things manually, as they're dynamically created classes and
only module-level classes can be pickled by the default path.
|
# -*- coding: utf-8 -*-
from datetime import date
import json
from operator import itemgetter
import os
import warnings
from django.core.urlresolvers import NoReverseMatch
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.db import models
from django.db.models import signals, Model
from django.db.models.base import model_unpickle, ModelBase
from django.db.models.query_utils import DeferredAttribute
from django.utils import six, timezone
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.safestring import mark_safe
from django.utils.six.moves import filter
from django.utils.translation import ugettext_lazy as _
from cms.exceptions import DontUsePageAttributeWarning
from cms.models.placeholdermodel import Placeholder
from cms.plugin_rendering import PluginContext, render_plugin
from cms.utils import get_cms_setting
from cms.utils.helpers import reversion_register
from cms.utils.urlutils import admin_reverse
from treebeard.mp_tree import MP_Node
class BoundRenderMeta(object):
def __init__(self, meta):
self.index = 0
self.total = 1
self.text_enabled = getattr(meta, 'text_enabled', False)
class PluginModelBase(ModelBase):
"""
Metaclass for all CMSPlugin subclasses. This class should not be used for
any other type of models.
"""
def __new__(cls, name, bases, attrs):
# remove RenderMeta from the plugin class
attr_meta = attrs.pop('RenderMeta', None)
# create a new class (using the super-metaclass)
new_class = super(PluginModelBase, cls).__new__(cls, name, bases, attrs)
# if there is a RenderMeta in attrs, use this one
# else try to use the one from the superclass (if present)
meta = attr_meta or getattr(new_class, '_render_meta', None)
treebeard_view_fields = (f for f in new_class._meta.fields
if f.name in ('depth', 'numchild', 'path'))
for field in treebeard_view_fields:
field.editable = False
# set a new BoundRenderMeta to prevent leaking of state
new_class._render_meta = BoundRenderMeta(meta)
return new_class
@python_2_unicode_compatible
class CMSPlugin(six.with_metaclass(PluginModelBase, MP_Node)):
'''
The base class for a CMS plugin model. When defining a new custom plugin, you should
store plugin-instance specific information on a subclass of this class.
An example for this would be to store the number of pictures to display in a galery.
Two restrictions apply when subclassing this to use in your own models:
1. Subclasses of CMSPlugin *cannot be further subclassed*
2. Subclasses of CMSPlugin cannot define a "text" field.
'''
placeholder = models.ForeignKey(Placeholder, editable=False, null=True)
parent = models.ForeignKey('self', blank=True, null=True, editable=False)
position = models.PositiveSmallIntegerField(_("position"), blank=True, null=True, editable=False)
language = models.CharField(_("language"), max_length=15, blank=False, db_index=True, editable=False)
plugin_type = models.CharField(_("plugin_name"), max_length=50, db_index=True, editable=False)
creation_date = models.DateTimeField(_("creation date"), editable=False, default=timezone.now)
changed_date = models.DateTimeField(auto_now=True)
child_plugin_instances = None
translatable_content_excluded_fields = []
class Meta:
app_label = 'cms'
class RenderMeta:
index = 0
total = 1
text_enabled = False
# MASKED: __reduce__ function (lines 94-111)
def __str__(self):
return force_text(self.pk)
def get_plugin_name(self):
from cms.plugin_pool import plugin_pool
return plugin_pool.get_plugin(self.plugin_type).name
def get_short_description(self):
instance = self.get_plugin_instance()[0]
if instance is not None:
return force_text(instance)
return _("<Empty>")
def get_plugin_class(self):
from cms.plugin_pool import plugin_pool
return plugin_pool.get_plugin(self.plugin_type)
def get_plugin_class_instance(self, admin=None):
plugin_class = self.get_plugin_class()
# needed so we have the same signature as the original ModelAdmin
return plugin_class(plugin_class.model, admin)
def get_plugin_instance(self, admin=None):
'''
Given a plugin instance (usually as a CMSPluginBase), this method
returns a tuple containing:
instance - The instance AS THE APPROPRIATE SUBCLASS OF
CMSPluginBase and not necessarily just 'self', which is
often just a CMSPluginBase,
plugin - the associated plugin class instance (subclass
of CMSPlugin)
'''
plugin = self.get_plugin_class_instance(admin)
if hasattr(self, "_inst"):
return self._inst, plugin
if plugin.model != self.__class__: # and self.__class__ == CMSPlugin:
# (if self is actually a subclass, getattr below would break)
try:
instance = plugin.model.objects.get(cmsplugin_ptr=self)
instance._render_meta = self._render_meta
except (AttributeError, ObjectDoesNotExist):
instance = None
else:
instance = self
self._inst = instance
return self._inst, plugin
def render_plugin(self, context=None, placeholder=None, admin=False, processors=None):
instance, plugin = self.get_plugin_instance()
if instance and not (admin and not plugin.admin_preview):
if not placeholder or not isinstance(placeholder, Placeholder):
placeholder = instance.placeholder
placeholder_slot = placeholder.slot
current_app = context.current_app if context else None
context = PluginContext(context, instance, placeholder, current_app=current_app)
context = plugin.render(context, instance, placeholder_slot)
request = context.get('request', None)
page = None
if request:
page = request.current_page
context['allowed_child_classes'] = plugin.get_child_classes(placeholder_slot, page)
if plugin.render_plugin:
template = plugin._get_render_template(context, instance, placeholder)
if not template:
raise ValidationError("plugin has no render_template: %s" % plugin.__class__)
else:
template = None
return render_plugin(context, instance, placeholder, template, processors, context.current_app)
else:
from cms.middleware.toolbar import toolbar_plugin_processor
if processors and toolbar_plugin_processor in processors:
if not placeholder:
placeholder = self.placeholder
current_app = context.current_app if context else None
context = PluginContext(context, self, placeholder, current_app=current_app)
template = None
return render_plugin(context, self, placeholder, template, processors, context.current_app)
return ""
def get_media_path(self, filename):
pages = self.placeholder.page_set.all()
if pages.count():
return pages[0].get_media_path(filename)
else: # django 1.0.2 compatibility
today = date.today()
return os.path.join(get_cms_setting('PAGE_MEDIA_PATH'),
str(today.year), str(today.month), str(today.day), filename)
@property
def page(self):
warnings.warn(
"Don't use the page attribute on CMSPlugins! CMSPlugins are not "
"guaranteed to have a page associated with them!",
DontUsePageAttributeWarning)
return self.placeholder.page if self.placeholder_id else None
def get_instance_icon_src(self):
"""
Get src URL for instance's icon
"""
instance, plugin = self.get_plugin_instance()
return plugin.icon_src(instance) if instance else u''
def get_instance_icon_alt(self):
"""
Get alt text for instance's icon
"""
instance, plugin = self.get_plugin_instance()
return force_text(plugin.icon_alt(instance)) if instance else u''
def save(self, no_signals=False, *args, **kwargs):
if not self.depth:
if self.parent_id or self.parent:
self.parent.add_child(instance=self)
else:
if not self.position and not self.position == 0:
self.position == CMSPlugin.objects.filter(parent__isnull=True,
placeholder_id=self.placeholder_id).count()
self.add_root(instance=self)
return
super(CMSPlugin, self).save()
def reload(self):
return CMSPlugin.objects.get(pk=self.pk)
def move(self, target, pos=None):
super(CMSPlugin, self).move(target, pos)
return self.reload()
def set_base_attr(self, plugin):
for attr in ['parent_id', 'placeholder', 'language', 'plugin_type', 'creation_date', 'depth', 'path',
'numchild', 'pk', 'position']:
setattr(plugin, attr, getattr(self, attr))
def copy_plugin(self, target_placeholder, target_language, parent_cache, no_signals=False):
"""
Copy this plugin and return the new plugin.
The logic of this method is the following:
# get a new generic plugin instance
# assign the position in the plugin tree
# save it to let mptt/treebeard calculate the tree attributes
# then get a copy of the current plugin instance
# assign to it the id of the generic plugin instance above;
this will effectively change the generic plugin created above
into a concrete one
# copy the tree related attributes from the generic plugin to
the concrete one
# save the concrete plugin
# trigger the copy relations
# return the generic plugin instance
This copy logic is required because we don't know what the fields of
the real plugin are. By getting another instance of it at step 4 and
then overwriting its ID at step 5, the ORM will copy the custom
fields for us.
"""
try:
plugin_instance, cls = self.get_plugin_instance()
except KeyError: # plugin type not found anymore
return
# set up some basic attributes on the new_plugin
new_plugin = CMSPlugin()
new_plugin.placeholder = target_placeholder
# we assign a parent to our new plugin
parent_cache[self.pk] = new_plugin
if self.parent:
parent = parent_cache[self.parent_id]
parent = CMSPlugin.objects.get(pk=parent.pk)
new_plugin.parent_id = parent.pk
new_plugin.parent = parent
new_plugin.language = target_language
new_plugin.plugin_type = self.plugin_type
if no_signals:
from cms.signals import pre_save_plugins
signals.pre_save.disconnect(pre_save_plugins, sender=CMSPlugin, dispatch_uid='cms_pre_save_plugin')
signals.pre_save.disconnect(pre_save_plugins, sender=CMSPlugin)
new_plugin._no_reorder = True
new_plugin.save()
if plugin_instance:
# get a new instance so references do not get mixed up
plugin_instance = plugin_instance.__class__.objects.get(pk=plugin_instance.pk)
plugin_instance.pk = new_plugin.pk
plugin_instance.id = new_plugin.pk
plugin_instance.placeholder = target_placeholder
plugin_instance.cmsplugin_ptr = new_plugin
plugin_instance.language = target_language
plugin_instance.parent = new_plugin.parent
plugin_instance.depth = new_plugin.depth
plugin_instance.path = new_plugin.path
plugin_instance.numchild = new_plugin.numchild
plugin_instance._no_reorder = True
plugin_instance.save()
old_instance = plugin_instance.__class__.objects.get(pk=self.pk)
plugin_instance.copy_relations(old_instance)
if no_signals:
signals.pre_save.connect(pre_save_plugins, sender=CMSPlugin, dispatch_uid='cms_pre_save_plugin')
return new_plugin
def post_copy(self, old_instance, new_old_ziplist):
"""
Handle more advanced cases (eg Text Plugins) after the original is
copied
"""
pass
def copy_relations(self, old_instance):
"""
Handle copying of any relations attached to this plugin. Custom plugins
have to do this themselves!
"""
pass
def has_change_permission(self, request):
page = self.placeholder.page if self.placeholder else None
if page:
return page.has_change_permission(request)
elif self.placeholder:
return self.placeholder.has_change_permission(request)
return False
def get_position_in_placeholder(self):
"""
1 based position!
"""
return self.position + 1
def get_breadcrumb(self):
from cms.models import Page
model = self.placeholder._get_attached_model() or Page
breadcrumb = []
if not self.parent_id:
try:
url = force_text(
admin_reverse("%s_%s_edit_plugin" % (model._meta.app_label, model._meta.model_name),
args=[self.pk]))
except NoReverseMatch:
url = force_text(
admin_reverse("%s_%s_edit_plugin" % (Page._meta.app_label, Page._meta.model_name),
args=[self.pk]))
breadcrumb.append({'title': force_text(self.get_plugin_name()), 'url': url})
return breadcrumb
for parent in self.get_ancestors().reverse():
try:
url = force_text(
admin_reverse("%s_%s_edit_plugin" % (model._meta.app_label, model._meta.model_name),
args=[parent.pk]))
except NoReverseMatch:
url = force_text(
admin_reverse("%s_%s_edit_plugin" % (Page._meta.app_label, Page._meta.model_name),
args=[parent.pk]))
breadcrumb.append({'title': force_text(parent.get_plugin_name()), 'url': url})
return breadcrumb
def get_breadcrumb_json(self):
result = json.dumps(self.get_breadcrumb())
result = mark_safe(result)
return result
def num_children(self):
return self.numchild
def notify_on_autoadd(self, request, conf):
"""
Method called when we auto add this plugin via default_plugins in
CMS_PLACEHOLDER_CONF.
Some specific plugins may have some special stuff to do when they are
auto added.
"""
pass
def notify_on_autoadd_children(self, request, conf, children):
"""
Method called when we auto add children to this plugin via
default_plugins/<plugin>/children in CMS_PLACEHOLDER_CONF.
Some specific plugins may have some special stuff to do when we add
children to them. ie : TextPlugin must update its content to add HTML
tags to be able to see his children in WYSIWYG.
"""
pass
def get_translatable_content(self):
"""
Returns {field_name: field_contents} for translatable fields, where
field_contents > ''
"""
fields = (f for f in self._meta.fields
if isinstance(f, (models.CharField, models.TextField)) and
f.editable and not f.choices and
f.name not in self.translatable_content_excluded_fields)
return dict(filter(itemgetter(1),
((f.name, getattr(self, f.name)) for f in fields)))
def set_translatable_content(self, fields):
for field, value in fields.items():
setattr(self, field, value)
self.save()
return all(getattr(self, field) == value
for field, value in fields.items())
def delete(self, no_mp=False, *args, **kwargs):
if no_mp:
Model.delete(self, *args, **kwargs)
else:
super(CMSPlugin, self).delete(*args, **kwargs)
@property
def add_url(self):
"""
Returns a custom url to add plugin instances
"""
return None
@property
def edit_url(self):
"""
Returns a custom url to edit plugin instances
"""
return None
@property
def move_url(self):
"""
Returns a custom url to move plugin instances
"""
return None
@property
def delete_url(self):
"""
Returns a custom url to delete plugin instances
"""
return None
@property
def copy_url(self):
"""
Returns a custom url to copy plugin instances
"""
return None
reversion_register(CMSPlugin)
def get_plugin_media_path(instance, filename):
"""
Django 1.7 requires that unbound function used in fields' definitions are defined outside the parent class
(see https://docs.djangoproject.com/en/dev/topics/migrations/#serializing-values)
This function is used withing field definition:
file = models.FileField(_("file"), upload_to=get_plugin_media_path)
and it invokes the bounded method on the given instance at runtime
"""
return instance.get_media_path(filename)
|
def __reduce__(self):
"""
Provide pickling support. Normally, this just dispatches to Python's
standard handling. However, for models with deferred field loading, we
need to do things manually, as they're dynamically created classes and
only module-level classes can be pickled by the default path.
"""
data = self.__dict__
# The obvious thing to do here is to invoke super().__reduce__()
# for the non-deferred case. Don't do that.
# On Python 2.4, there is something wierd with __reduce__,
# and as a result, the super call will cause an infinite recursion.
# See #10547 and #12121.
deferred_fields = [f for f in self._meta.fields
if isinstance(self.__class__.__dict__.get(f.attname),
DeferredAttribute)]
model = self._meta.proxy_for_model
return (model_unpickle, (model, deferred_fields), data)
| 94 | 111 |
# -*- coding: utf-8 -*-
from datetime import date
import json
from operator import itemgetter
import os
import warnings
from django.core.urlresolvers import NoReverseMatch
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.db import models
from django.db.models import signals, Model
from django.db.models.base import model_unpickle, ModelBase
from django.db.models.query_utils import DeferredAttribute
from django.utils import six, timezone
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.safestring import mark_safe
from django.utils.six.moves import filter
from django.utils.translation import ugettext_lazy as _
from cms.exceptions import DontUsePageAttributeWarning
from cms.models.placeholdermodel import Placeholder
from cms.plugin_rendering import PluginContext, render_plugin
from cms.utils import get_cms_setting
from cms.utils.helpers import reversion_register
from cms.utils.urlutils import admin_reverse
from treebeard.mp_tree import MP_Node
class BoundRenderMeta(object):
def __init__(self, meta):
self.index = 0
self.total = 1
self.text_enabled = getattr(meta, 'text_enabled', False)
class PluginModelBase(ModelBase):
"""
Metaclass for all CMSPlugin subclasses. This class should not be used for
any other type of models.
"""
def __new__(cls, name, bases, attrs):
# remove RenderMeta from the plugin class
attr_meta = attrs.pop('RenderMeta', None)
# create a new class (using the super-metaclass)
new_class = super(PluginModelBase, cls).__new__(cls, name, bases, attrs)
# if there is a RenderMeta in attrs, use this one
# else try to use the one from the superclass (if present)
meta = attr_meta or getattr(new_class, '_render_meta', None)
treebeard_view_fields = (f for f in new_class._meta.fields
if f.name in ('depth', 'numchild', 'path'))
for field in treebeard_view_fields:
field.editable = False
# set a new BoundRenderMeta to prevent leaking of state
new_class._render_meta = BoundRenderMeta(meta)
return new_class
@python_2_unicode_compatible
class CMSPlugin(six.with_metaclass(PluginModelBase, MP_Node)):
'''
The base class for a CMS plugin model. When defining a new custom plugin, you should
store plugin-instance specific information on a subclass of this class.
An example for this would be to store the number of pictures to display in a galery.
Two restrictions apply when subclassing this to use in your own models:
1. Subclasses of CMSPlugin *cannot be further subclassed*
2. Subclasses of CMSPlugin cannot define a "text" field.
'''
placeholder = models.ForeignKey(Placeholder, editable=False, null=True)
parent = models.ForeignKey('self', blank=True, null=True, editable=False)
position = models.PositiveSmallIntegerField(_("position"), blank=True, null=True, editable=False)
language = models.CharField(_("language"), max_length=15, blank=False, db_index=True, editable=False)
plugin_type = models.CharField(_("plugin_name"), max_length=50, db_index=True, editable=False)
creation_date = models.DateTimeField(_("creation date"), editable=False, default=timezone.now)
changed_date = models.DateTimeField(auto_now=True)
child_plugin_instances = None
translatable_content_excluded_fields = []
class Meta:
app_label = 'cms'
class RenderMeta:
index = 0
total = 1
text_enabled = False
def __reduce__(self):
"""
Provide pickling support. Normally, this just dispatches to Python's
standard handling. However, for models with deferred field loading, we
need to do things manually, as they're dynamically created classes and
only module-level classes can be pickled by the default path.
"""
data = self.__dict__
# The obvious thing to do here is to invoke super().__reduce__()
# for the non-deferred case. Don't do that.
# On Python 2.4, there is something wierd with __reduce__,
# and as a result, the super call will cause an infinite recursion.
# See #10547 and #12121.
deferred_fields = [f for f in self._meta.fields
if isinstance(self.__class__.__dict__.get(f.attname),
DeferredAttribute)]
model = self._meta.proxy_for_model
return (model_unpickle, (model, deferred_fields), data)
def __str__(self):
return force_text(self.pk)
def get_plugin_name(self):
from cms.plugin_pool import plugin_pool
return plugin_pool.get_plugin(self.plugin_type).name
def get_short_description(self):
instance = self.get_plugin_instance()[0]
if instance is not None:
return force_text(instance)
return _("<Empty>")
def get_plugin_class(self):
from cms.plugin_pool import plugin_pool
return plugin_pool.get_plugin(self.plugin_type)
def get_plugin_class_instance(self, admin=None):
plugin_class = self.get_plugin_class()
# needed so we have the same signature as the original ModelAdmin
return plugin_class(plugin_class.model, admin)
def get_plugin_instance(self, admin=None):
'''
Given a plugin instance (usually as a CMSPluginBase), this method
returns a tuple containing:
instance - The instance AS THE APPROPRIATE SUBCLASS OF
CMSPluginBase and not necessarily just 'self', which is
often just a CMSPluginBase,
plugin - the associated plugin class instance (subclass
of CMSPlugin)
'''
plugin = self.get_plugin_class_instance(admin)
if hasattr(self, "_inst"):
return self._inst, plugin
if plugin.model != self.__class__: # and self.__class__ == CMSPlugin:
# (if self is actually a subclass, getattr below would break)
try:
instance = plugin.model.objects.get(cmsplugin_ptr=self)
instance._render_meta = self._render_meta
except (AttributeError, ObjectDoesNotExist):
instance = None
else:
instance = self
self._inst = instance
return self._inst, plugin
def render_plugin(self, context=None, placeholder=None, admin=False, processors=None):
instance, plugin = self.get_plugin_instance()
if instance and not (admin and not plugin.admin_preview):
if not placeholder or not isinstance(placeholder, Placeholder):
placeholder = instance.placeholder
placeholder_slot = placeholder.slot
current_app = context.current_app if context else None
context = PluginContext(context, instance, placeholder, current_app=current_app)
context = plugin.render(context, instance, placeholder_slot)
request = context.get('request', None)
page = None
if request:
page = request.current_page
context['allowed_child_classes'] = plugin.get_child_classes(placeholder_slot, page)
if plugin.render_plugin:
template = plugin._get_render_template(context, instance, placeholder)
if not template:
raise ValidationError("plugin has no render_template: %s" % plugin.__class__)
else:
template = None
return render_plugin(context, instance, placeholder, template, processors, context.current_app)
else:
from cms.middleware.toolbar import toolbar_plugin_processor
if processors and toolbar_plugin_processor in processors:
if not placeholder:
placeholder = self.placeholder
current_app = context.current_app if context else None
context = PluginContext(context, self, placeholder, current_app=current_app)
template = None
return render_plugin(context, self, placeholder, template, processors, context.current_app)
return ""
def get_media_path(self, filename):
pages = self.placeholder.page_set.all()
if pages.count():
return pages[0].get_media_path(filename)
else: # django 1.0.2 compatibility
today = date.today()
return os.path.join(get_cms_setting('PAGE_MEDIA_PATH'),
str(today.year), str(today.month), str(today.day), filename)
@property
def page(self):
warnings.warn(
"Don't use the page attribute on CMSPlugins! CMSPlugins are not "
"guaranteed to have a page associated with them!",
DontUsePageAttributeWarning)
return self.placeholder.page if self.placeholder_id else None
def get_instance_icon_src(self):
"""
Get src URL for instance's icon
"""
instance, plugin = self.get_plugin_instance()
return plugin.icon_src(instance) if instance else u''
def get_instance_icon_alt(self):
"""
Get alt text for instance's icon
"""
instance, plugin = self.get_plugin_instance()
return force_text(plugin.icon_alt(instance)) if instance else u''
def save(self, no_signals=False, *args, **kwargs):
if not self.depth:
if self.parent_id or self.parent:
self.parent.add_child(instance=self)
else:
if not self.position and not self.position == 0:
self.position == CMSPlugin.objects.filter(parent__isnull=True,
placeholder_id=self.placeholder_id).count()
self.add_root(instance=self)
return
super(CMSPlugin, self).save()
def reload(self):
return CMSPlugin.objects.get(pk=self.pk)
def move(self, target, pos=None):
super(CMSPlugin, self).move(target, pos)
return self.reload()
def set_base_attr(self, plugin):
for attr in ['parent_id', 'placeholder', 'language', 'plugin_type', 'creation_date', 'depth', 'path',
'numchild', 'pk', 'position']:
setattr(plugin, attr, getattr(self, attr))
def copy_plugin(self, target_placeholder, target_language, parent_cache, no_signals=False):
"""
Copy this plugin and return the new plugin.
The logic of this method is the following:
# get a new generic plugin instance
# assign the position in the plugin tree
# save it to let mptt/treebeard calculate the tree attributes
# then get a copy of the current plugin instance
# assign to it the id of the generic plugin instance above;
this will effectively change the generic plugin created above
into a concrete one
# copy the tree related attributes from the generic plugin to
the concrete one
# save the concrete plugin
# trigger the copy relations
# return the generic plugin instance
This copy logic is required because we don't know what the fields of
the real plugin are. By getting another instance of it at step 4 and
then overwriting its ID at step 5, the ORM will copy the custom
fields for us.
"""
try:
plugin_instance, cls = self.get_plugin_instance()
except KeyError: # plugin type not found anymore
return
# set up some basic attributes on the new_plugin
new_plugin = CMSPlugin()
new_plugin.placeholder = target_placeholder
# we assign a parent to our new plugin
parent_cache[self.pk] = new_plugin
if self.parent:
parent = parent_cache[self.parent_id]
parent = CMSPlugin.objects.get(pk=parent.pk)
new_plugin.parent_id = parent.pk
new_plugin.parent = parent
new_plugin.language = target_language
new_plugin.plugin_type = self.plugin_type
if no_signals:
from cms.signals import pre_save_plugins
signals.pre_save.disconnect(pre_save_plugins, sender=CMSPlugin, dispatch_uid='cms_pre_save_plugin')
signals.pre_save.disconnect(pre_save_plugins, sender=CMSPlugin)
new_plugin._no_reorder = True
new_plugin.save()
if plugin_instance:
# get a new instance so references do not get mixed up
plugin_instance = plugin_instance.__class__.objects.get(pk=plugin_instance.pk)
plugin_instance.pk = new_plugin.pk
plugin_instance.id = new_plugin.pk
plugin_instance.placeholder = target_placeholder
plugin_instance.cmsplugin_ptr = new_plugin
plugin_instance.language = target_language
plugin_instance.parent = new_plugin.parent
plugin_instance.depth = new_plugin.depth
plugin_instance.path = new_plugin.path
plugin_instance.numchild = new_plugin.numchild
plugin_instance._no_reorder = True
plugin_instance.save()
old_instance = plugin_instance.__class__.objects.get(pk=self.pk)
plugin_instance.copy_relations(old_instance)
if no_signals:
signals.pre_save.connect(pre_save_plugins, sender=CMSPlugin, dispatch_uid='cms_pre_save_plugin')
return new_plugin
def post_copy(self, old_instance, new_old_ziplist):
"""
Handle more advanced cases (eg Text Plugins) after the original is
copied
"""
pass
def copy_relations(self, old_instance):
"""
Handle copying of any relations attached to this plugin. Custom plugins
have to do this themselves!
"""
pass
def has_change_permission(self, request):
page = self.placeholder.page if self.placeholder else None
if page:
return page.has_change_permission(request)
elif self.placeholder:
return self.placeholder.has_change_permission(request)
return False
def get_position_in_placeholder(self):
"""
1 based position!
"""
return self.position + 1
def get_breadcrumb(self):
from cms.models import Page
model = self.placeholder._get_attached_model() or Page
breadcrumb = []
if not self.parent_id:
try:
url = force_text(
admin_reverse("%s_%s_edit_plugin" % (model._meta.app_label, model._meta.model_name),
args=[self.pk]))
except NoReverseMatch:
url = force_text(
admin_reverse("%s_%s_edit_plugin" % (Page._meta.app_label, Page._meta.model_name),
args=[self.pk]))
breadcrumb.append({'title': force_text(self.get_plugin_name()), 'url': url})
return breadcrumb
for parent in self.get_ancestors().reverse():
try:
url = force_text(
admin_reverse("%s_%s_edit_plugin" % (model._meta.app_label, model._meta.model_name),
args=[parent.pk]))
except NoReverseMatch:
url = force_text(
admin_reverse("%s_%s_edit_plugin" % (Page._meta.app_label, Page._meta.model_name),
args=[parent.pk]))
breadcrumb.append({'title': force_text(parent.get_plugin_name()), 'url': url})
return breadcrumb
def get_breadcrumb_json(self):
result = json.dumps(self.get_breadcrumb())
result = mark_safe(result)
return result
def num_children(self):
return self.numchild
def notify_on_autoadd(self, request, conf):
"""
Method called when we auto add this plugin via default_plugins in
CMS_PLACEHOLDER_CONF.
Some specific plugins may have some special stuff to do when they are
auto added.
"""
pass
def notify_on_autoadd_children(self, request, conf, children):
"""
Method called when we auto add children to this plugin via
default_plugins/<plugin>/children in CMS_PLACEHOLDER_CONF.
Some specific plugins may have some special stuff to do when we add
children to them. ie : TextPlugin must update its content to add HTML
tags to be able to see his children in WYSIWYG.
"""
pass
def get_translatable_content(self):
"""
Returns {field_name: field_contents} for translatable fields, where
field_contents > ''
"""
fields = (f for f in self._meta.fields
if isinstance(f, (models.CharField, models.TextField)) and
f.editable and not f.choices and
f.name not in self.translatable_content_excluded_fields)
return dict(filter(itemgetter(1),
((f.name, getattr(self, f.name)) for f in fields)))
def set_translatable_content(self, fields):
for field, value in fields.items():
setattr(self, field, value)
self.save()
return all(getattr(self, field) == value
for field, value in fields.items())
def delete(self, no_mp=False, *args, **kwargs):
if no_mp:
Model.delete(self, *args, **kwargs)
else:
super(CMSPlugin, self).delete(*args, **kwargs)
@property
def add_url(self):
"""
Returns a custom url to add plugin instances
"""
return None
@property
def edit_url(self):
"""
Returns a custom url to edit plugin instances
"""
return None
@property
def move_url(self):
"""
Returns a custom url to move plugin instances
"""
return None
@property
def delete_url(self):
"""
Returns a custom url to delete plugin instances
"""
return None
@property
def copy_url(self):
"""
Returns a custom url to copy plugin instances
"""
return None
reversion_register(CMSPlugin)
def get_plugin_media_path(instance, filename):
"""
Django 1.7 requires that unbound function used in fields' definitions are defined outside the parent class
(see https://docs.djangoproject.com/en/dev/topics/migrations/#serializing-values)
This function is used withing field definition:
file = models.FileField(_("file"), upload_to=get_plugin_media_path)
and it invokes the bounded method on the given instance at runtime
"""
return instance.get_media_path(filename)
|
generate_mesh
|
Launch Mesh Generator to generate mesh.
@param meshing_dir: the meshing directory
@param params: the meshing parameters
@return: the mesh generation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of MeshingParameters is not
of valid value
@raise ServiceError: if error occurs during generating mesh
|
# -*- coding: utf-8 -*-
"""
This Python module provides various service functions.
Updated since version 1.1:
1. Added support for postprocess and visualization.
2. Added file path validation for parameters of all related methods.
Updated since version 1.2: Merge Code and Update GUI
1. Integrate New Nemoh using hdf5 and python.
"""
__author__ = "caoweiquan322, TCSASSEMBLER"
__copyright__ = "Copyright (C) 2014-2015 TopCoder Inc. All rights reserved."
__version__ = "1.2"
import collections
import uuid
from settings import *
import os
import time
import subprocess
from multiprocessing import Process, Manager
import logging
from openwarp import helper
from nemoh import utility
from nemoh import preprocessor
from nemoh import postprocessor
from nemoh import solver
import warnings
import fnmatch
import h5py
# This class represents parameters used in the meshing process.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
MeshingParameters = collections.namedtuple('MeshingParameters',
'infile outfile maxh minh fineness grading usetolerance tolerance')
# This class represents parameters used in the simulation process.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
SimulationParameters = collections.namedtuple('SimulationParameters', 'rho g depth xeff yeff wave_frequencies ' +
'min_wave_frequencies max_wave_frequencies wave_directions ' +
'max_wave_direction min_wave_directions floating_bodies ' +
'indiq_solver ires tol_gmres max_iterations save_potential ' +
'green_tabulation_numx green_tabulation_numz ' +
'green_tabulation_simpson_npoints use_ode_influence_coefficients ' +
'use_higher_order num_panel_higher_order b_spline_order ' +
'use_dipoles_implementation thin_panels compute_drift_forces ' +
'compute_yaw_moment remove_irregular_frequencies')
# This class represents a floating body used in the SimulationParameters.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
FloatingBody = collections.namedtuple('FloatingBody', 'mesh_file points panels degrees_of_freedom surge sway ' +
'heave roll_about_cdg pitch_about_cdg yaw_about_cdg ' +
'resulting_generalised_forces force_in_x_direction force_in_y_direction ' +
'force_in_z_direction moment_cdg_force_in_x_direction ' +
'moment_cdg_force_in_y_direction moment_cdg_force_in_z_direction ' +
'additional_info_lines')
# This class represents parameters used in the post-proessing.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
PostprocessingParameters = collections.namedtuple('PostprocessingParameters', 'irf show_pressure ' +
'kochin_function free_surface_elevation')
# The pre-defined config file name used by MESH_GENERATOR_BIN.
_CONFIG_FILE_NAME = 'config.txt'
# The pre-defined stdout log file name.
_LOG_FILE_NAME = 'log.txt'
# The logger object for logging.
_LOGGER = logging.getLogger(__name__)
class ServiceError(Exception):
'''
This exception indicates a service error.
It will be raised by methods of this module.
'''
pass
# MASKED: generate_mesh function (lines 81-132)
def simulate(simulation_dir, params):
'''
Run simulation.
@param simulation_dir: the simulation directory
@param params: the simulation parameters
@return: the simulation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of SimulationParameters is not
of valid value
@raise ServiceError: if any other error occurred when launching the simulation
'''
signature = __name__ + '.simulate()'
helper.log_entrance(_LOGGER, signature,
{'simulation_dir': simulation_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
helper.check_type_value(params, 'params', SimulationParameters, False)
helper.check_not_none_nor_empty(params.rho, 'params.rho')
helper.check_not_none_nor_empty(params.g, 'params.g')
helper.check_not_none_nor_empty(params.depth, 'params.depth')
helper.check_not_none_nor_empty(params.xeff, 'params.xeff')
helper.check_not_none_nor_empty(params.yeff, 'params.yeff')
helper.check_not_none_nor_empty(params.wave_frequencies, 'params.wave_frequencies')
helper.check_not_none_nor_empty(params.min_wave_frequencies, 'params.min_wave_frequencies')
helper.check_not_none_nor_empty(params.max_wave_frequencies, 'params.max_wave_frequencies')
helper.check_not_none_nor_empty(params.wave_directions, 'params.wave_directions')
helper.check_not_none_nor_empty(params.min_wave_directions, 'params.min_wave_directions')
helper.check_not_none_nor_empty(params.max_wave_direction, 'params.max_wave_direction')
helper.check_not_none_nor_empty(params.indiq_solver, 'params.indiq_solver')
helper.check_not_none_nor_empty(params.ires, 'params.ires')
helper.check_not_none_nor_empty(params.tol_gmres, 'params.tol_gmres')
helper.check_not_none_nor_empty(params.max_iterations, 'params.max_iterations')
helper.check_not_none_nor_empty(params.save_potential, 'params.save_potential')
helper.check_not_none_nor_empty(params.green_tabulation_numx, 'params.green_tabulation_numx')
helper.check_not_none_nor_empty(params.green_tabulation_numz, 'params.green_tabulation_numz')
helper.check_not_none_nor_empty(params.green_tabulation_simpson_npoints, 'params.green_tabulation_simpson_npoints')
helper.check_not_none_nor_empty(params.use_ode_influence_coefficients, 'params.use_ode_influence_coefficients')
helper.check_not_none_nor_empty(params.use_higher_order, 'params.use_higher_order')
helper.check_not_none_nor_empty(params.num_panel_higher_order, 'params.num_panel_higher_order')
helper.check_not_none_nor_empty(params.b_spline_order, 'params.b_spline_order')
helper.check_not_none_nor_empty(params.use_dipoles_implementation, 'params.use_dipoles_implementation')
helper.check_not_none_nor_empty(params.thin_panels, 'params.thin_panels')
helper.check_not_none_nor_empty(params.compute_drift_forces, 'params.compute_drift_forces')
helper.check_not_none_nor_empty(params.remove_irregular_frequencies, 'params.remove_irregular_frequencies')
helper.check_not_none_nor_empty(params.compute_yaw_moment, 'params.compute_yaw_moment')
helper.check_type_value(params.floating_bodies, 'params.floating_bodies', list, True)
if params.floating_bodies is not None:
for body in params.floating_bodies:
helper.check_type_value(body, 'params.floating_bodies item', FloatingBody, False)
helper.check_not_none_nor_empty(body.mesh_file, 'body.mesh_file')
helper.check_not_none_nor_empty(body.points, 'body.points')
helper.check_not_none_nor_empty(body.panels, 'body.panels')
helper.check_not_none_nor_empty(body.degrees_of_freedom, 'body.degrees_of_freedom')
helper.check_not_none_nor_empty(body.resulting_generalised_forces, 'body.resulting_generalised_forces')
helper.check_not_none_nor_empty(body.additional_info_lines, 'body.additional_info_lines')
try:
# Write the hdf5 inputs according to given parameters
with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), "a") as hdf5_data:
utility.write_calculations(params, hdf5_data)
# Launch preProcessor and Solver
# A prepared 'results' folder is necessary for the Nemoh software suite
os.mkdir(os.path.join(simulation_dir, 'results'))
simulation_log_path = os.path.join(simulation_dir, 'simulation_log.txt')
custom_config = {
'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'),
'NEMOH_CALCULATIONS_FILE': None,
'NEMOH_INPUT_FILE': None,
'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'),
'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'),
'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'),
'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'),
'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'),
'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'),
'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'),
'GREEN_TABULATION_NUMX' : int(params.green_tabulation_numx),
'GREEN_TABULATION_NUMZ' : int(params.green_tabulation_numz),
'GREEN_TABULATION_SIMPSON_NPOINTS' : int(params.green_tabulation_simpson_npoints),
'USE_ODE_INFLUENCE_COEFFICIENTS': bool(int(params.use_ode_influence_coefficients)),
'USE_HIGHER_ORDER' : bool(int(params.use_higher_order)),
'NUM_PANEL_HIGHER_ORDER' : int(params.num_panel_higher_order),
'B_SPLINE_ORDER': int(params.b_spline_order),
'USE_DIPOLES_IMPLEMENTATION': bool(int(params.use_dipoles_implementation)),
'THIN_PANELS': [int(i) for i in params.thin_panels.split()],
'COMPUTE_DRIFT_FORCES' : bool(int(params.compute_drift_forces)),
'COMPUTE_YAW_MOMENT': bool(int(params.compute_yaw_moment)),
'REMOVE_IRREGULAR_FREQUENCIES' : bool(int(params.remove_irregular_frequencies))
}
_LOGGER.debug('Start preProcessor function.')
run_thread(preprocessor.preprocess, (custom_config,), simulation_log_path)
_LOGGER.debug('End preProcessor function.')
_LOGGER.debug('Start solver function.')
output = run_thread(solver.solve, (custom_config,), None)
with open(simulation_log_path, 'a') as log_file:
log_file.write(output)
_LOGGER.debug('End solver function.')
with open(simulation_log_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when doing simulation. Caused by:\n' + unicode(str(e)))
def postprocess(simulation_dir, params):
'''
Run post-processing.
@param simulation_dir: the simulation directory
@param params: the post-processing parameters
@return: the post-processing log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of PostprocessingParameters is not
of valid value
@raise ServiceError: if error occurs during launching the post-processing
'''
signature = __name__ + '.postprocess()'
helper.log_entrance(_LOGGER, signature,
{'simulation_dir': simulation_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
helper.check_type_value(params, 'params', PostprocessingParameters, False)
helper.check_type_value(params.irf, 'params.irf', list, False)
for irf_item in params.irf:
helper.check_not_none_nor_empty(irf_item, 'irf_item')
helper.check_not_none_nor_empty(params.show_pressure, 'params.show_pressure')
helper.check_type_value(params.kochin_function, 'params.kochin_function', list, False)
for kochin_function_item in params.kochin_function:
helper.check_not_none_nor_empty(kochin_function_item, 'kochin_function_item')
helper.check_type_value(params.free_surface_elevation, 'params.free_surface_elevation', list, False)
for elevation_item in params.free_surface_elevation:
helper.check_not_none_nor_empty(elevation_item, 'elevation_item')
try:
with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), "a") as hdf5_data:
utility.write_postprocessing_section(params, hdf5_data)
# Launch postProcessor
postprocessing_log_path = os.path.join(simulation_dir, 'postprocessing_log.txt')
custom_config = {
'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'),
'NEMOH_CALCULATIONS_FILE': None,
'NEMOH_INPUT_FILE': None,
'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'),
'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'),
'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'),
'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'),
'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'),
'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'),
'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'),
'GREEN_TABULATION_NUMX' : 328,
'GREEN_TABULATION_NUMZ' : 46,
'GREEN_TABULATION_SIMPSON_NPOINTS' : 251,
'USE_ODE_INFLUENCE_COEFFICIENTS': False,
'USE_HIGHER_ORDER' : False,
'NUM_PANEL_HIGHER_ORDER' : 1,
'B_SPLINE_ORDER': 1,
'USE_DIPOLES_IMPLEMENTATION': False,
'THIN_PANELS': [-1],
'COMPUTE_DRIFT_FORCES' : False,
'COMPUTE_YAW_MOMENT': False,
'REMOVE_IRREGULAR_FREQUENCIES' : False
}
_LOGGER.debug('Start postProcessor function.')
run_thread(postprocessor.postprocess, (custom_config,), postprocessing_log_path)
_LOGGER.debug('End postProcessor in subprocess.')
with open(postprocessing_log_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when running postprocess. Caused by:\n' + unicode(str(e)))
def visualize(simulation_dir):
'''
Launch ParaView to visualize simulation results.
@param simulation_dir: the simulation directory
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if error occurs during launching the ParaView
'''
signature = __name__ + '.visualize()'
helper.log_entrance(_LOGGER, signature, {'simulation_dir': simulation_dir})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
try:
# Filter files to be opened in ParaView
files = []
for f in os.listdir(os.path.join(simulation_dir, 'results')):
for ext in VISUALIZATION_FILE_EXTENSIONS:
if fnmatch.fnmatch(f, '*.' + ext):
files.append(os.path.join(simulation_dir, 'results', f))
# Check if there's tec/vtk/stl file to visualize
if len(files) == 0:
raise ServiceError('There is no accepted file to visualize.')
_LOGGER.debug('List of files to load:')
_LOGGER.debug(str(files))
# Prepare script to run by ParaView
paraview_script = os.path.join(os.path.join(simulation_dir, 'results'), 'load_data.py')
prepare_paraview_script(paraview_script, files)
# Launch ParaView without waiting for the ParaView to exit
_LOGGER.debug('Start launching ParaView in subprocess.')
subprocess.Popen([PARAVIEW_BIN, '--script=' + paraview_script + ''])
_LOGGER.debug('End launching ParaView in subprocess.')
helper.log_exit(_LOGGER, signature, None)
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when launching the ParaView. Caused by:\n' + unicode(str(e)))
def prepare_paraview_script(script_path, files):
'''
Prepare a script to be run by ParaView from a template.
@param script_path: path of the new script to create
@param files: a list of data files path
@raise Exception: to its caller if any error occurs
'''
# Since this is a inner function, no entrance/exit information would be logged.
with open(PARAVIEW_SCRIPT_TEMPLATE, 'r') as fin:
with open(script_path, 'w') as fout:
for line in fin.readlines():
fout.write(line.rstrip().replace('<parameter_files>', str(files)) + '\n')
# From http://code.activestate.com/recipes/577564-context-manager-for-low-level-redirection-of-stdou/
class Silence:
"""
Context manager which uses low-level file descriptors to suppress
output to stdout/stderr, optionally redirecting to the named file(s).
Example usage
with Silence(stderr='output.txt', mode='a'):
... # appending to existing file
... print >> sys.stderr, "Hello from stderr"
... print "Stdout redirected to os.devnull"
=== contents of 'output.txt' ===
"""
def __init__(self, stdout=os.devnull, stderr=os.devnull, mode='w'):
"""
Initialize
Args:
self: The class itself
stdout: the descriptor or file name where to redirect stdout
stdout: the descriptor or file name where to redirect stdout
mode: the output descriptor or file mode
"""
self.outfiles = stdout, stderr
self.combine = (stdout == stderr)
self.mode = mode
def __enter__(self):
"""
Enter the context
Args:
self: The class itself
"""
import sys
self.sys = sys
# save previous stdout/stderr
self.saved_streams = saved_streams = sys.__stdout__, sys.__stderr__
self.fds = fds = [s.fileno() for s in saved_streams]
self.saved_fds = map(os.dup, fds)
# flush any pending output
for s in saved_streams: s.flush()
# open surrogate files
if self.combine:
null_streams = [open(self.outfiles[0], self.mode, 0)] * 2
if self.outfiles[0] != os.devnull:
# disable buffering so output is merged immediately
sys.stdout, sys.stderr = map(os.fdopen, fds, ['w']*2, [0]*2)
else: null_streams = [open(f, self.mode, 0) for f in self.outfiles]
self.null_fds = null_fds = [s.fileno() for s in null_streams]
self.null_streams = null_streams
# overwrite file objects and low-level file descriptors
map(os.dup2, null_fds, fds)
def __exit__(self, *args):
"""
Exit the context
Args:
self: The class itself
args: other arguments
"""
sys = self.sys
# flush any pending output
for s in self.saved_streams: s.flush()
# restore original streams and file descriptors
map(os.dup2, self.saved_fds, self.fds)
sys.stdout, sys.stderr = self.saved_streams
# clean up
for s in self.null_streams: s.close()
for fd in self.saved_fds: os.close(fd)
return False
def wrapper_io(func, fd, args, return_dict):
"""
Run a function while redirecting its output to a file descriptor
Args:
func: A python function to run
fd: a file descriptor
args: A tuple containing argument for the function
return_dict: Dictionary where to put the result of the function
"""
return_dict["output"] = ''
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if fd:
with Silence(stdout=fd, stderr=os.devnull, mode='a'):
return_dict["output"] = func(*args)
else:
return_dict["output"] = func(*args)
def run_thread(func, args, fd):
"""
Run a python function in a thread and wait for it to complete.
Redirect its output to fd
Args:
func: A python function to run
args: A tuple containing argument for the function
fd: a file descriptor
"""
manager = Manager()
return_dict = manager.dict()
p = Process(target=wrapper_io, args=(func, fd, args, return_dict))
p.start()
p.join()
return return_dict["output"]
def writeline_if_not_none(fout, data):
'''
Write one line to the specified file if data is not None.
@param fout: the file object to write line in
@param data: the data to write as line
'''
# Since this is a inner function, no entrance/exit information would be logged.
if data is not None:
fout.write(str(data) + '\n')
def prepare_dir(prefix):
'''
Prepare a directory, the directory will be a sub-directory of USER_DATA_DIRECTORY with current timestamp
prefixed given prefix as the directory name.
@param prefix: the directory prefix
@return: the meshing/simulation directory full path
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if any error occurred when preparing the directory
'''
signature = __name__ + '.prepare_dir()'
helper.log_entrance(_LOGGER, signature, {'prefix': prefix})
# Checking parameters
helper.check_not_none_nor_empty(prefix, 'prefix')
try:
# Create a directory for this run (sub-directory name in format simulation_YYYYMMDDhhmmss)
# We should consider adding some more uuid suffix to allow more concurrent requests within 1 SINGLE second.
run_dir = os.path.join(USER_DATA_DIRECTORY, prefix + time.strftime('%Y%m%d%H%M%S') + '_' + uuid.uuid1().hex)
os.makedirs(run_dir)
helper.log_exit(_LOGGER, signature, [run_dir])
return run_dir
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when preparing the directory. Caused by:\n' + unicode(str(e)))
|
def generate_mesh(meshing_dir, params):
'''
Launch Mesh Generator to generate mesh.
@param meshing_dir: the meshing directory
@param params: the meshing parameters
@return: the mesh generation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of MeshingParameters is not
of valid value
@raise ServiceError: if error occurs during generating mesh
'''
signature = __name__ + '.generate_mesh()'
helper.log_entrance(_LOGGER, signature,
{'meshing_dir': meshing_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(meshing_dir, 'meshing_dir')
helper.check_is_directory(meshing_dir, 'meshing_dir')
helper.check_type_value(params, 'params', MeshingParameters, False)
helper.check_not_none_nor_empty(params.infile, 'params.infile')
helper.check_is_file(params.infile, 'params.infile')
helper.check_not_none_nor_empty(params.outfile, 'params.outfile')
helper.check_not_none_nor_empty(params.maxh, 'params.maxh')
helper.check_not_none_nor_empty(params.minh, 'params.minh')
helper.check_not_none_nor_empty(params.fineness, 'params.fineness')
helper.check_not_none_nor_empty(params.grading, 'params.grading')
helper.check_not_none_nor_empty(params.usetolerance, 'params.usetolerance')
if params.usetolerance == '1':
helper.check_not_none_nor_empty(params.tolerance, 'params.tolerance')
try:
config_file_path = os.path.join(meshing_dir, _CONFIG_FILE_NAME)
log_file_path = os.path.join(meshing_dir, _LOG_FILE_NAME)
# Generate config.txt according to given parameters
with open(config_file_path, 'w') as f:
f.write('\n'.join("%s: %s" % item for item in vars(params).items() if item[1] is not None))
# Launch mesh generator
with open(log_file_path, 'w') as log_file:
_LOGGER.debug('Start mesh generator in subprocess.')
subprocess.call(MESH_GENERATOR_BIN, cwd=meshing_dir, stdout=log_file)
_LOGGER.debug('End mesh generator in subprocess.')
# Read and return the log file content
with open(log_file_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when generating mesh. Caused by:\n' + unicode(str(e)))
| 81 | 132 |
# -*- coding: utf-8 -*-
"""
This Python module provides various service functions.
Updated since version 1.1:
1. Added support for postprocess and visualization.
2. Added file path validation for parameters of all related methods.
Updated since version 1.2: Merge Code and Update GUI
1. Integrate New Nemoh using hdf5 and python.
"""
__author__ = "caoweiquan322, TCSASSEMBLER"
__copyright__ = "Copyright (C) 2014-2015 TopCoder Inc. All rights reserved."
__version__ = "1.2"
import collections
import uuid
from settings import *
import os
import time
import subprocess
from multiprocessing import Process, Manager
import logging
from openwarp import helper
from nemoh import utility
from nemoh import preprocessor
from nemoh import postprocessor
from nemoh import solver
import warnings
import fnmatch
import h5py
# This class represents parameters used in the meshing process.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
MeshingParameters = collections.namedtuple('MeshingParameters',
'infile outfile maxh minh fineness grading usetolerance tolerance')
# This class represents parameters used in the simulation process.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
SimulationParameters = collections.namedtuple('SimulationParameters', 'rho g depth xeff yeff wave_frequencies ' +
'min_wave_frequencies max_wave_frequencies wave_directions ' +
'max_wave_direction min_wave_directions floating_bodies ' +
'indiq_solver ires tol_gmres max_iterations save_potential ' +
'green_tabulation_numx green_tabulation_numz ' +
'green_tabulation_simpson_npoints use_ode_influence_coefficients ' +
'use_higher_order num_panel_higher_order b_spline_order ' +
'use_dipoles_implementation thin_panels compute_drift_forces ' +
'compute_yaw_moment remove_irregular_frequencies')
# This class represents a floating body used in the SimulationParameters.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
FloatingBody = collections.namedtuple('FloatingBody', 'mesh_file points panels degrees_of_freedom surge sway ' +
'heave roll_about_cdg pitch_about_cdg yaw_about_cdg ' +
'resulting_generalised_forces force_in_x_direction force_in_y_direction ' +
'force_in_z_direction moment_cdg_force_in_x_direction ' +
'moment_cdg_force_in_y_direction moment_cdg_force_in_z_direction ' +
'additional_info_lines')
# This class represents parameters used in the post-proessing.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
PostprocessingParameters = collections.namedtuple('PostprocessingParameters', 'irf show_pressure ' +
'kochin_function free_surface_elevation')
# The pre-defined config file name used by MESH_GENERATOR_BIN.
_CONFIG_FILE_NAME = 'config.txt'
# The pre-defined stdout log file name.
_LOG_FILE_NAME = 'log.txt'
# The logger object for logging.
_LOGGER = logging.getLogger(__name__)
class ServiceError(Exception):
'''
This exception indicates a service error.
It will be raised by methods of this module.
'''
pass
def generate_mesh(meshing_dir, params):
'''
Launch Mesh Generator to generate mesh.
@param meshing_dir: the meshing directory
@param params: the meshing parameters
@return: the mesh generation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of MeshingParameters is not
of valid value
@raise ServiceError: if error occurs during generating mesh
'''
signature = __name__ + '.generate_mesh()'
helper.log_entrance(_LOGGER, signature,
{'meshing_dir': meshing_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(meshing_dir, 'meshing_dir')
helper.check_is_directory(meshing_dir, 'meshing_dir')
helper.check_type_value(params, 'params', MeshingParameters, False)
helper.check_not_none_nor_empty(params.infile, 'params.infile')
helper.check_is_file(params.infile, 'params.infile')
helper.check_not_none_nor_empty(params.outfile, 'params.outfile')
helper.check_not_none_nor_empty(params.maxh, 'params.maxh')
helper.check_not_none_nor_empty(params.minh, 'params.minh')
helper.check_not_none_nor_empty(params.fineness, 'params.fineness')
helper.check_not_none_nor_empty(params.grading, 'params.grading')
helper.check_not_none_nor_empty(params.usetolerance, 'params.usetolerance')
if params.usetolerance == '1':
helper.check_not_none_nor_empty(params.tolerance, 'params.tolerance')
try:
config_file_path = os.path.join(meshing_dir, _CONFIG_FILE_NAME)
log_file_path = os.path.join(meshing_dir, _LOG_FILE_NAME)
# Generate config.txt according to given parameters
with open(config_file_path, 'w') as f:
f.write('\n'.join("%s: %s" % item for item in vars(params).items() if item[1] is not None))
# Launch mesh generator
with open(log_file_path, 'w') as log_file:
_LOGGER.debug('Start mesh generator in subprocess.')
subprocess.call(MESH_GENERATOR_BIN, cwd=meshing_dir, stdout=log_file)
_LOGGER.debug('End mesh generator in subprocess.')
# Read and return the log file content
with open(log_file_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when generating mesh. Caused by:\n' + unicode(str(e)))
def simulate(simulation_dir, params):
'''
Run simulation.
@param simulation_dir: the simulation directory
@param params: the simulation parameters
@return: the simulation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of SimulationParameters is not
of valid value
@raise ServiceError: if any other error occurred when launching the simulation
'''
signature = __name__ + '.simulate()'
helper.log_entrance(_LOGGER, signature,
{'simulation_dir': simulation_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
helper.check_type_value(params, 'params', SimulationParameters, False)
helper.check_not_none_nor_empty(params.rho, 'params.rho')
helper.check_not_none_nor_empty(params.g, 'params.g')
helper.check_not_none_nor_empty(params.depth, 'params.depth')
helper.check_not_none_nor_empty(params.xeff, 'params.xeff')
helper.check_not_none_nor_empty(params.yeff, 'params.yeff')
helper.check_not_none_nor_empty(params.wave_frequencies, 'params.wave_frequencies')
helper.check_not_none_nor_empty(params.min_wave_frequencies, 'params.min_wave_frequencies')
helper.check_not_none_nor_empty(params.max_wave_frequencies, 'params.max_wave_frequencies')
helper.check_not_none_nor_empty(params.wave_directions, 'params.wave_directions')
helper.check_not_none_nor_empty(params.min_wave_directions, 'params.min_wave_directions')
helper.check_not_none_nor_empty(params.max_wave_direction, 'params.max_wave_direction')
helper.check_not_none_nor_empty(params.indiq_solver, 'params.indiq_solver')
helper.check_not_none_nor_empty(params.ires, 'params.ires')
helper.check_not_none_nor_empty(params.tol_gmres, 'params.tol_gmres')
helper.check_not_none_nor_empty(params.max_iterations, 'params.max_iterations')
helper.check_not_none_nor_empty(params.save_potential, 'params.save_potential')
helper.check_not_none_nor_empty(params.green_tabulation_numx, 'params.green_tabulation_numx')
helper.check_not_none_nor_empty(params.green_tabulation_numz, 'params.green_tabulation_numz')
helper.check_not_none_nor_empty(params.green_tabulation_simpson_npoints, 'params.green_tabulation_simpson_npoints')
helper.check_not_none_nor_empty(params.use_ode_influence_coefficients, 'params.use_ode_influence_coefficients')
helper.check_not_none_nor_empty(params.use_higher_order, 'params.use_higher_order')
helper.check_not_none_nor_empty(params.num_panel_higher_order, 'params.num_panel_higher_order')
helper.check_not_none_nor_empty(params.b_spline_order, 'params.b_spline_order')
helper.check_not_none_nor_empty(params.use_dipoles_implementation, 'params.use_dipoles_implementation')
helper.check_not_none_nor_empty(params.thin_panels, 'params.thin_panels')
helper.check_not_none_nor_empty(params.compute_drift_forces, 'params.compute_drift_forces')
helper.check_not_none_nor_empty(params.remove_irregular_frequencies, 'params.remove_irregular_frequencies')
helper.check_not_none_nor_empty(params.compute_yaw_moment, 'params.compute_yaw_moment')
helper.check_type_value(params.floating_bodies, 'params.floating_bodies', list, True)
if params.floating_bodies is not None:
for body in params.floating_bodies:
helper.check_type_value(body, 'params.floating_bodies item', FloatingBody, False)
helper.check_not_none_nor_empty(body.mesh_file, 'body.mesh_file')
helper.check_not_none_nor_empty(body.points, 'body.points')
helper.check_not_none_nor_empty(body.panels, 'body.panels')
helper.check_not_none_nor_empty(body.degrees_of_freedom, 'body.degrees_of_freedom')
helper.check_not_none_nor_empty(body.resulting_generalised_forces, 'body.resulting_generalised_forces')
helper.check_not_none_nor_empty(body.additional_info_lines, 'body.additional_info_lines')
try:
# Write the hdf5 inputs according to given parameters
with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), "a") as hdf5_data:
utility.write_calculations(params, hdf5_data)
# Launch preProcessor and Solver
# A prepared 'results' folder is necessary for the Nemoh software suite
os.mkdir(os.path.join(simulation_dir, 'results'))
simulation_log_path = os.path.join(simulation_dir, 'simulation_log.txt')
custom_config = {
'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'),
'NEMOH_CALCULATIONS_FILE': None,
'NEMOH_INPUT_FILE': None,
'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'),
'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'),
'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'),
'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'),
'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'),
'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'),
'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'),
'GREEN_TABULATION_NUMX' : int(params.green_tabulation_numx),
'GREEN_TABULATION_NUMZ' : int(params.green_tabulation_numz),
'GREEN_TABULATION_SIMPSON_NPOINTS' : int(params.green_tabulation_simpson_npoints),
'USE_ODE_INFLUENCE_COEFFICIENTS': bool(int(params.use_ode_influence_coefficients)),
'USE_HIGHER_ORDER' : bool(int(params.use_higher_order)),
'NUM_PANEL_HIGHER_ORDER' : int(params.num_panel_higher_order),
'B_SPLINE_ORDER': int(params.b_spline_order),
'USE_DIPOLES_IMPLEMENTATION': bool(int(params.use_dipoles_implementation)),
'THIN_PANELS': [int(i) for i in params.thin_panels.split()],
'COMPUTE_DRIFT_FORCES' : bool(int(params.compute_drift_forces)),
'COMPUTE_YAW_MOMENT': bool(int(params.compute_yaw_moment)),
'REMOVE_IRREGULAR_FREQUENCIES' : bool(int(params.remove_irregular_frequencies))
}
_LOGGER.debug('Start preProcessor function.')
run_thread(preprocessor.preprocess, (custom_config,), simulation_log_path)
_LOGGER.debug('End preProcessor function.')
_LOGGER.debug('Start solver function.')
output = run_thread(solver.solve, (custom_config,), None)
with open(simulation_log_path, 'a') as log_file:
log_file.write(output)
_LOGGER.debug('End solver function.')
with open(simulation_log_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when doing simulation. Caused by:\n' + unicode(str(e)))
def postprocess(simulation_dir, params):
'''
Run post-processing.
@param simulation_dir: the simulation directory
@param params: the post-processing parameters
@return: the post-processing log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of PostprocessingParameters is not
of valid value
@raise ServiceError: if error occurs during launching the post-processing
'''
signature = __name__ + '.postprocess()'
helper.log_entrance(_LOGGER, signature,
{'simulation_dir': simulation_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
helper.check_type_value(params, 'params', PostprocessingParameters, False)
helper.check_type_value(params.irf, 'params.irf', list, False)
for irf_item in params.irf:
helper.check_not_none_nor_empty(irf_item, 'irf_item')
helper.check_not_none_nor_empty(params.show_pressure, 'params.show_pressure')
helper.check_type_value(params.kochin_function, 'params.kochin_function', list, False)
for kochin_function_item in params.kochin_function:
helper.check_not_none_nor_empty(kochin_function_item, 'kochin_function_item')
helper.check_type_value(params.free_surface_elevation, 'params.free_surface_elevation', list, False)
for elevation_item in params.free_surface_elevation:
helper.check_not_none_nor_empty(elevation_item, 'elevation_item')
try:
with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), "a") as hdf5_data:
utility.write_postprocessing_section(params, hdf5_data)
# Launch postProcessor
postprocessing_log_path = os.path.join(simulation_dir, 'postprocessing_log.txt')
custom_config = {
'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'),
'NEMOH_CALCULATIONS_FILE': None,
'NEMOH_INPUT_FILE': None,
'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'),
'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'),
'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'),
'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'),
'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'),
'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'),
'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'),
'GREEN_TABULATION_NUMX' : 328,
'GREEN_TABULATION_NUMZ' : 46,
'GREEN_TABULATION_SIMPSON_NPOINTS' : 251,
'USE_ODE_INFLUENCE_COEFFICIENTS': False,
'USE_HIGHER_ORDER' : False,
'NUM_PANEL_HIGHER_ORDER' : 1,
'B_SPLINE_ORDER': 1,
'USE_DIPOLES_IMPLEMENTATION': False,
'THIN_PANELS': [-1],
'COMPUTE_DRIFT_FORCES' : False,
'COMPUTE_YAW_MOMENT': False,
'REMOVE_IRREGULAR_FREQUENCIES' : False
}
_LOGGER.debug('Start postProcessor function.')
run_thread(postprocessor.postprocess, (custom_config,), postprocessing_log_path)
_LOGGER.debug('End postProcessor in subprocess.')
with open(postprocessing_log_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when running postprocess. Caused by:\n' + unicode(str(e)))
def visualize(simulation_dir):
'''
Launch ParaView to visualize simulation results.
@param simulation_dir: the simulation directory
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if error occurs during launching the ParaView
'''
signature = __name__ + '.visualize()'
helper.log_entrance(_LOGGER, signature, {'simulation_dir': simulation_dir})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
try:
# Filter files to be opened in ParaView
files = []
for f in os.listdir(os.path.join(simulation_dir, 'results')):
for ext in VISUALIZATION_FILE_EXTENSIONS:
if fnmatch.fnmatch(f, '*.' + ext):
files.append(os.path.join(simulation_dir, 'results', f))
# Check if there's tec/vtk/stl file to visualize
if len(files) == 0:
raise ServiceError('There is no accepted file to visualize.')
_LOGGER.debug('List of files to load:')
_LOGGER.debug(str(files))
# Prepare script to run by ParaView
paraview_script = os.path.join(os.path.join(simulation_dir, 'results'), 'load_data.py')
prepare_paraview_script(paraview_script, files)
# Launch ParaView without waiting for the ParaView to exit
_LOGGER.debug('Start launching ParaView in subprocess.')
subprocess.Popen([PARAVIEW_BIN, '--script=' + paraview_script + ''])
_LOGGER.debug('End launching ParaView in subprocess.')
helper.log_exit(_LOGGER, signature, None)
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when launching the ParaView. Caused by:\n' + unicode(str(e)))
def prepare_paraview_script(script_path, files):
'''
Prepare a script to be run by ParaView from a template.
@param script_path: path of the new script to create
@param files: a list of data files path
@raise Exception: to its caller if any error occurs
'''
# Since this is a inner function, no entrance/exit information would be logged.
with open(PARAVIEW_SCRIPT_TEMPLATE, 'r') as fin:
with open(script_path, 'w') as fout:
for line in fin.readlines():
fout.write(line.rstrip().replace('<parameter_files>', str(files)) + '\n')
# From http://code.activestate.com/recipes/577564-context-manager-for-low-level-redirection-of-stdou/
class Silence:
"""
Context manager which uses low-level file descriptors to suppress
output to stdout/stderr, optionally redirecting to the named file(s).
Example usage
with Silence(stderr='output.txt', mode='a'):
... # appending to existing file
... print >> sys.stderr, "Hello from stderr"
... print "Stdout redirected to os.devnull"
=== contents of 'output.txt' ===
"""
def __init__(self, stdout=os.devnull, stderr=os.devnull, mode='w'):
"""
Initialize
Args:
self: The class itself
stdout: the descriptor or file name where to redirect stdout
stdout: the descriptor or file name where to redirect stdout
mode: the output descriptor or file mode
"""
self.outfiles = stdout, stderr
self.combine = (stdout == stderr)
self.mode = mode
def __enter__(self):
"""
Enter the context
Args:
self: The class itself
"""
import sys
self.sys = sys
# save previous stdout/stderr
self.saved_streams = saved_streams = sys.__stdout__, sys.__stderr__
self.fds = fds = [s.fileno() for s in saved_streams]
self.saved_fds = map(os.dup, fds)
# flush any pending output
for s in saved_streams: s.flush()
# open surrogate files
if self.combine:
null_streams = [open(self.outfiles[0], self.mode, 0)] * 2
if self.outfiles[0] != os.devnull:
# disable buffering so output is merged immediately
sys.stdout, sys.stderr = map(os.fdopen, fds, ['w']*2, [0]*2)
else: null_streams = [open(f, self.mode, 0) for f in self.outfiles]
self.null_fds = null_fds = [s.fileno() for s in null_streams]
self.null_streams = null_streams
# overwrite file objects and low-level file descriptors
map(os.dup2, null_fds, fds)
def __exit__(self, *args):
"""
Exit the context
Args:
self: The class itself
args: other arguments
"""
sys = self.sys
# flush any pending output
for s in self.saved_streams: s.flush()
# restore original streams and file descriptors
map(os.dup2, self.saved_fds, self.fds)
sys.stdout, sys.stderr = self.saved_streams
# clean up
for s in self.null_streams: s.close()
for fd in self.saved_fds: os.close(fd)
return False
def wrapper_io(func, fd, args, return_dict):
"""
Run a function while redirecting its output to a file descriptor
Args:
func: A python function to run
fd: a file descriptor
args: A tuple containing argument for the function
return_dict: Dictionary where to put the result of the function
"""
return_dict["output"] = ''
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if fd:
with Silence(stdout=fd, stderr=os.devnull, mode='a'):
return_dict["output"] = func(*args)
else:
return_dict["output"] = func(*args)
def run_thread(func, args, fd):
"""
Run a python function in a thread and wait for it to complete.
Redirect its output to fd
Args:
func: A python function to run
args: A tuple containing argument for the function
fd: a file descriptor
"""
manager = Manager()
return_dict = manager.dict()
p = Process(target=wrapper_io, args=(func, fd, args, return_dict))
p.start()
p.join()
return return_dict["output"]
def writeline_if_not_none(fout, data):
'''
Write one line to the specified file if data is not None.
@param fout: the file object to write line in
@param data: the data to write as line
'''
# Since this is a inner function, no entrance/exit information would be logged.
if data is not None:
fout.write(str(data) + '\n')
def prepare_dir(prefix):
'''
Prepare a directory, the directory will be a sub-directory of USER_DATA_DIRECTORY with current timestamp
prefixed given prefix as the directory name.
@param prefix: the directory prefix
@return: the meshing/simulation directory full path
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if any error occurred when preparing the directory
'''
signature = __name__ + '.prepare_dir()'
helper.log_entrance(_LOGGER, signature, {'prefix': prefix})
# Checking parameters
helper.check_not_none_nor_empty(prefix, 'prefix')
try:
# Create a directory for this run (sub-directory name in format simulation_YYYYMMDDhhmmss)
# We should consider adding some more uuid suffix to allow more concurrent requests within 1 SINGLE second.
run_dir = os.path.join(USER_DATA_DIRECTORY, prefix + time.strftime('%Y%m%d%H%M%S') + '_' + uuid.uuid1().hex)
os.makedirs(run_dir)
helper.log_exit(_LOGGER, signature, [run_dir])
return run_dir
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when preparing the directory. Caused by:\n' + unicode(str(e)))
|
simulate
|
Run simulation.
@param simulation_dir: the simulation directory
@param params: the simulation parameters
@return: the simulation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of SimulationParameters is not
of valid value
@raise ServiceError: if any other error occurred when launching the simulation
|
# -*- coding: utf-8 -*-
"""
This Python module provides various service functions.
Updated since version 1.1:
1. Added support for postprocess and visualization.
2. Added file path validation for parameters of all related methods.
Updated since version 1.2: Merge Code and Update GUI
1. Integrate New Nemoh using hdf5 and python.
"""
__author__ = "caoweiquan322, TCSASSEMBLER"
__copyright__ = "Copyright (C) 2014-2015 TopCoder Inc. All rights reserved."
__version__ = "1.2"
import collections
import uuid
from settings import *
import os
import time
import subprocess
from multiprocessing import Process, Manager
import logging
from openwarp import helper
from nemoh import utility
from nemoh import preprocessor
from nemoh import postprocessor
from nemoh import solver
import warnings
import fnmatch
import h5py
# This class represents parameters used in the meshing process.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
MeshingParameters = collections.namedtuple('MeshingParameters',
'infile outfile maxh minh fineness grading usetolerance tolerance')
# This class represents parameters used in the simulation process.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
SimulationParameters = collections.namedtuple('SimulationParameters', 'rho g depth xeff yeff wave_frequencies ' +
'min_wave_frequencies max_wave_frequencies wave_directions ' +
'max_wave_direction min_wave_directions floating_bodies ' +
'indiq_solver ires tol_gmres max_iterations save_potential ' +
'green_tabulation_numx green_tabulation_numz ' +
'green_tabulation_simpson_npoints use_ode_influence_coefficients ' +
'use_higher_order num_panel_higher_order b_spline_order ' +
'use_dipoles_implementation thin_panels compute_drift_forces ' +
'compute_yaw_moment remove_irregular_frequencies')
# This class represents a floating body used in the SimulationParameters.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
FloatingBody = collections.namedtuple('FloatingBody', 'mesh_file points panels degrees_of_freedom surge sway ' +
'heave roll_about_cdg pitch_about_cdg yaw_about_cdg ' +
'resulting_generalised_forces force_in_x_direction force_in_y_direction ' +
'force_in_z_direction moment_cdg_force_in_x_direction ' +
'moment_cdg_force_in_y_direction moment_cdg_force_in_z_direction ' +
'additional_info_lines')
# This class represents parameters used in the post-proessing.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
PostprocessingParameters = collections.namedtuple('PostprocessingParameters', 'irf show_pressure ' +
'kochin_function free_surface_elevation')
# The pre-defined config file name used by MESH_GENERATOR_BIN.
_CONFIG_FILE_NAME = 'config.txt'
# The pre-defined stdout log file name.
_LOG_FILE_NAME = 'log.txt'
# The logger object for logging.
_LOGGER = logging.getLogger(__name__)
class ServiceError(Exception):
'''
This exception indicates a service error.
It will be raised by methods of this module.
'''
pass
def generate_mesh(meshing_dir, params):
'''
Launch Mesh Generator to generate mesh.
@param meshing_dir: the meshing directory
@param params: the meshing parameters
@return: the mesh generation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of MeshingParameters is not
of valid value
@raise ServiceError: if error occurs during generating mesh
'''
signature = __name__ + '.generate_mesh()'
helper.log_entrance(_LOGGER, signature,
{'meshing_dir': meshing_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(meshing_dir, 'meshing_dir')
helper.check_is_directory(meshing_dir, 'meshing_dir')
helper.check_type_value(params, 'params', MeshingParameters, False)
helper.check_not_none_nor_empty(params.infile, 'params.infile')
helper.check_is_file(params.infile, 'params.infile')
helper.check_not_none_nor_empty(params.outfile, 'params.outfile')
helper.check_not_none_nor_empty(params.maxh, 'params.maxh')
helper.check_not_none_nor_empty(params.minh, 'params.minh')
helper.check_not_none_nor_empty(params.fineness, 'params.fineness')
helper.check_not_none_nor_empty(params.grading, 'params.grading')
helper.check_not_none_nor_empty(params.usetolerance, 'params.usetolerance')
if params.usetolerance == '1':
helper.check_not_none_nor_empty(params.tolerance, 'params.tolerance')
try:
config_file_path = os.path.join(meshing_dir, _CONFIG_FILE_NAME)
log_file_path = os.path.join(meshing_dir, _LOG_FILE_NAME)
# Generate config.txt according to given parameters
with open(config_file_path, 'w') as f:
f.write('\n'.join("%s: %s" % item for item in vars(params).items() if item[1] is not None))
# Launch mesh generator
with open(log_file_path, 'w') as log_file:
_LOGGER.debug('Start mesh generator in subprocess.')
subprocess.call(MESH_GENERATOR_BIN, cwd=meshing_dir, stdout=log_file)
_LOGGER.debug('End mesh generator in subprocess.')
# Read and return the log file content
with open(log_file_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when generating mesh. Caused by:\n' + unicode(str(e)))
# MASKED: simulate function (lines 134-242)
def postprocess(simulation_dir, params):
'''
Run post-processing.
@param simulation_dir: the simulation directory
@param params: the post-processing parameters
@return: the post-processing log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of PostprocessingParameters is not
of valid value
@raise ServiceError: if error occurs during launching the post-processing
'''
signature = __name__ + '.postprocess()'
helper.log_entrance(_LOGGER, signature,
{'simulation_dir': simulation_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
helper.check_type_value(params, 'params', PostprocessingParameters, False)
helper.check_type_value(params.irf, 'params.irf', list, False)
for irf_item in params.irf:
helper.check_not_none_nor_empty(irf_item, 'irf_item')
helper.check_not_none_nor_empty(params.show_pressure, 'params.show_pressure')
helper.check_type_value(params.kochin_function, 'params.kochin_function', list, False)
for kochin_function_item in params.kochin_function:
helper.check_not_none_nor_empty(kochin_function_item, 'kochin_function_item')
helper.check_type_value(params.free_surface_elevation, 'params.free_surface_elevation', list, False)
for elevation_item in params.free_surface_elevation:
helper.check_not_none_nor_empty(elevation_item, 'elevation_item')
try:
with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), "a") as hdf5_data:
utility.write_postprocessing_section(params, hdf5_data)
# Launch postProcessor
postprocessing_log_path = os.path.join(simulation_dir, 'postprocessing_log.txt')
custom_config = {
'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'),
'NEMOH_CALCULATIONS_FILE': None,
'NEMOH_INPUT_FILE': None,
'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'),
'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'),
'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'),
'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'),
'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'),
'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'),
'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'),
'GREEN_TABULATION_NUMX' : 328,
'GREEN_TABULATION_NUMZ' : 46,
'GREEN_TABULATION_SIMPSON_NPOINTS' : 251,
'USE_ODE_INFLUENCE_COEFFICIENTS': False,
'USE_HIGHER_ORDER' : False,
'NUM_PANEL_HIGHER_ORDER' : 1,
'B_SPLINE_ORDER': 1,
'USE_DIPOLES_IMPLEMENTATION': False,
'THIN_PANELS': [-1],
'COMPUTE_DRIFT_FORCES' : False,
'COMPUTE_YAW_MOMENT': False,
'REMOVE_IRREGULAR_FREQUENCIES' : False
}
_LOGGER.debug('Start postProcessor function.')
run_thread(postprocessor.postprocess, (custom_config,), postprocessing_log_path)
_LOGGER.debug('End postProcessor in subprocess.')
with open(postprocessing_log_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when running postprocess. Caused by:\n' + unicode(str(e)))
def visualize(simulation_dir):
'''
Launch ParaView to visualize simulation results.
@param simulation_dir: the simulation directory
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if error occurs during launching the ParaView
'''
signature = __name__ + '.visualize()'
helper.log_entrance(_LOGGER, signature, {'simulation_dir': simulation_dir})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
try:
# Filter files to be opened in ParaView
files = []
for f in os.listdir(os.path.join(simulation_dir, 'results')):
for ext in VISUALIZATION_FILE_EXTENSIONS:
if fnmatch.fnmatch(f, '*.' + ext):
files.append(os.path.join(simulation_dir, 'results', f))
# Check if there's tec/vtk/stl file to visualize
if len(files) == 0:
raise ServiceError('There is no accepted file to visualize.')
_LOGGER.debug('List of files to load:')
_LOGGER.debug(str(files))
# Prepare script to run by ParaView
paraview_script = os.path.join(os.path.join(simulation_dir, 'results'), 'load_data.py')
prepare_paraview_script(paraview_script, files)
# Launch ParaView without waiting for the ParaView to exit
_LOGGER.debug('Start launching ParaView in subprocess.')
subprocess.Popen([PARAVIEW_BIN, '--script=' + paraview_script + ''])
_LOGGER.debug('End launching ParaView in subprocess.')
helper.log_exit(_LOGGER, signature, None)
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when launching the ParaView. Caused by:\n' + unicode(str(e)))
def prepare_paraview_script(script_path, files):
'''
Prepare a script to be run by ParaView from a template.
@param script_path: path of the new script to create
@param files: a list of data files path
@raise Exception: to its caller if any error occurs
'''
# Since this is a inner function, no entrance/exit information would be logged.
with open(PARAVIEW_SCRIPT_TEMPLATE, 'r') as fin:
with open(script_path, 'w') as fout:
for line in fin.readlines():
fout.write(line.rstrip().replace('<parameter_files>', str(files)) + '\n')
# From http://code.activestate.com/recipes/577564-context-manager-for-low-level-redirection-of-stdou/
class Silence:
"""
Context manager which uses low-level file descriptors to suppress
output to stdout/stderr, optionally redirecting to the named file(s).
Example usage
with Silence(stderr='output.txt', mode='a'):
... # appending to existing file
... print >> sys.stderr, "Hello from stderr"
... print "Stdout redirected to os.devnull"
=== contents of 'output.txt' ===
"""
def __init__(self, stdout=os.devnull, stderr=os.devnull, mode='w'):
"""
Initialize
Args:
self: The class itself
stdout: the descriptor or file name where to redirect stdout
stdout: the descriptor or file name where to redirect stdout
mode: the output descriptor or file mode
"""
self.outfiles = stdout, stderr
self.combine = (stdout == stderr)
self.mode = mode
def __enter__(self):
"""
Enter the context
Args:
self: The class itself
"""
import sys
self.sys = sys
# save previous stdout/stderr
self.saved_streams = saved_streams = sys.__stdout__, sys.__stderr__
self.fds = fds = [s.fileno() for s in saved_streams]
self.saved_fds = map(os.dup, fds)
# flush any pending output
for s in saved_streams: s.flush()
# open surrogate files
if self.combine:
null_streams = [open(self.outfiles[0], self.mode, 0)] * 2
if self.outfiles[0] != os.devnull:
# disable buffering so output is merged immediately
sys.stdout, sys.stderr = map(os.fdopen, fds, ['w']*2, [0]*2)
else: null_streams = [open(f, self.mode, 0) for f in self.outfiles]
self.null_fds = null_fds = [s.fileno() for s in null_streams]
self.null_streams = null_streams
# overwrite file objects and low-level file descriptors
map(os.dup2, null_fds, fds)
def __exit__(self, *args):
"""
Exit the context
Args:
self: The class itself
args: other arguments
"""
sys = self.sys
# flush any pending output
for s in self.saved_streams: s.flush()
# restore original streams and file descriptors
map(os.dup2, self.saved_fds, self.fds)
sys.stdout, sys.stderr = self.saved_streams
# clean up
for s in self.null_streams: s.close()
for fd in self.saved_fds: os.close(fd)
return False
def wrapper_io(func, fd, args, return_dict):
"""
Run a function while redirecting its output to a file descriptor
Args:
func: A python function to run
fd: a file descriptor
args: A tuple containing argument for the function
return_dict: Dictionary where to put the result of the function
"""
return_dict["output"] = ''
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if fd:
with Silence(stdout=fd, stderr=os.devnull, mode='a'):
return_dict["output"] = func(*args)
else:
return_dict["output"] = func(*args)
def run_thread(func, args, fd):
"""
Run a python function in a thread and wait for it to complete.
Redirect its output to fd
Args:
func: A python function to run
args: A tuple containing argument for the function
fd: a file descriptor
"""
manager = Manager()
return_dict = manager.dict()
p = Process(target=wrapper_io, args=(func, fd, args, return_dict))
p.start()
p.join()
return return_dict["output"]
def writeline_if_not_none(fout, data):
'''
Write one line to the specified file if data is not None.
@param fout: the file object to write line in
@param data: the data to write as line
'''
# Since this is a inner function, no entrance/exit information would be logged.
if data is not None:
fout.write(str(data) + '\n')
def prepare_dir(prefix):
'''
Prepare a directory, the directory will be a sub-directory of USER_DATA_DIRECTORY with current timestamp
prefixed given prefix as the directory name.
@param prefix: the directory prefix
@return: the meshing/simulation directory full path
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if any error occurred when preparing the directory
'''
signature = __name__ + '.prepare_dir()'
helper.log_entrance(_LOGGER, signature, {'prefix': prefix})
# Checking parameters
helper.check_not_none_nor_empty(prefix, 'prefix')
try:
# Create a directory for this run (sub-directory name in format simulation_YYYYMMDDhhmmss)
# We should consider adding some more uuid suffix to allow more concurrent requests within 1 SINGLE second.
run_dir = os.path.join(USER_DATA_DIRECTORY, prefix + time.strftime('%Y%m%d%H%M%S') + '_' + uuid.uuid1().hex)
os.makedirs(run_dir)
helper.log_exit(_LOGGER, signature, [run_dir])
return run_dir
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when preparing the directory. Caused by:\n' + unicode(str(e)))
|
def simulate(simulation_dir, params):
'''
Run simulation.
@param simulation_dir: the simulation directory
@param params: the simulation parameters
@return: the simulation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of SimulationParameters is not
of valid value
@raise ServiceError: if any other error occurred when launching the simulation
'''
signature = __name__ + '.simulate()'
helper.log_entrance(_LOGGER, signature,
{'simulation_dir': simulation_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
helper.check_type_value(params, 'params', SimulationParameters, False)
helper.check_not_none_nor_empty(params.rho, 'params.rho')
helper.check_not_none_nor_empty(params.g, 'params.g')
helper.check_not_none_nor_empty(params.depth, 'params.depth')
helper.check_not_none_nor_empty(params.xeff, 'params.xeff')
helper.check_not_none_nor_empty(params.yeff, 'params.yeff')
helper.check_not_none_nor_empty(params.wave_frequencies, 'params.wave_frequencies')
helper.check_not_none_nor_empty(params.min_wave_frequencies, 'params.min_wave_frequencies')
helper.check_not_none_nor_empty(params.max_wave_frequencies, 'params.max_wave_frequencies')
helper.check_not_none_nor_empty(params.wave_directions, 'params.wave_directions')
helper.check_not_none_nor_empty(params.min_wave_directions, 'params.min_wave_directions')
helper.check_not_none_nor_empty(params.max_wave_direction, 'params.max_wave_direction')
helper.check_not_none_nor_empty(params.indiq_solver, 'params.indiq_solver')
helper.check_not_none_nor_empty(params.ires, 'params.ires')
helper.check_not_none_nor_empty(params.tol_gmres, 'params.tol_gmres')
helper.check_not_none_nor_empty(params.max_iterations, 'params.max_iterations')
helper.check_not_none_nor_empty(params.save_potential, 'params.save_potential')
helper.check_not_none_nor_empty(params.green_tabulation_numx, 'params.green_tabulation_numx')
helper.check_not_none_nor_empty(params.green_tabulation_numz, 'params.green_tabulation_numz')
helper.check_not_none_nor_empty(params.green_tabulation_simpson_npoints, 'params.green_tabulation_simpson_npoints')
helper.check_not_none_nor_empty(params.use_ode_influence_coefficients, 'params.use_ode_influence_coefficients')
helper.check_not_none_nor_empty(params.use_higher_order, 'params.use_higher_order')
helper.check_not_none_nor_empty(params.num_panel_higher_order, 'params.num_panel_higher_order')
helper.check_not_none_nor_empty(params.b_spline_order, 'params.b_spline_order')
helper.check_not_none_nor_empty(params.use_dipoles_implementation, 'params.use_dipoles_implementation')
helper.check_not_none_nor_empty(params.thin_panels, 'params.thin_panels')
helper.check_not_none_nor_empty(params.compute_drift_forces, 'params.compute_drift_forces')
helper.check_not_none_nor_empty(params.remove_irregular_frequencies, 'params.remove_irregular_frequencies')
helper.check_not_none_nor_empty(params.compute_yaw_moment, 'params.compute_yaw_moment')
helper.check_type_value(params.floating_bodies, 'params.floating_bodies', list, True)
if params.floating_bodies is not None:
for body in params.floating_bodies:
helper.check_type_value(body, 'params.floating_bodies item', FloatingBody, False)
helper.check_not_none_nor_empty(body.mesh_file, 'body.mesh_file')
helper.check_not_none_nor_empty(body.points, 'body.points')
helper.check_not_none_nor_empty(body.panels, 'body.panels')
helper.check_not_none_nor_empty(body.degrees_of_freedom, 'body.degrees_of_freedom')
helper.check_not_none_nor_empty(body.resulting_generalised_forces, 'body.resulting_generalised_forces')
helper.check_not_none_nor_empty(body.additional_info_lines, 'body.additional_info_lines')
try:
# Write the hdf5 inputs according to given parameters
with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), "a") as hdf5_data:
utility.write_calculations(params, hdf5_data)
# Launch preProcessor and Solver
# A prepared 'results' folder is necessary for the Nemoh software suite
os.mkdir(os.path.join(simulation_dir, 'results'))
simulation_log_path = os.path.join(simulation_dir, 'simulation_log.txt')
custom_config = {
'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'),
'NEMOH_CALCULATIONS_FILE': None,
'NEMOH_INPUT_FILE': None,
'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'),
'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'),
'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'),
'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'),
'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'),
'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'),
'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'),
'GREEN_TABULATION_NUMX' : int(params.green_tabulation_numx),
'GREEN_TABULATION_NUMZ' : int(params.green_tabulation_numz),
'GREEN_TABULATION_SIMPSON_NPOINTS' : int(params.green_tabulation_simpson_npoints),
'USE_ODE_INFLUENCE_COEFFICIENTS': bool(int(params.use_ode_influence_coefficients)),
'USE_HIGHER_ORDER' : bool(int(params.use_higher_order)),
'NUM_PANEL_HIGHER_ORDER' : int(params.num_panel_higher_order),
'B_SPLINE_ORDER': int(params.b_spline_order),
'USE_DIPOLES_IMPLEMENTATION': bool(int(params.use_dipoles_implementation)),
'THIN_PANELS': [int(i) for i in params.thin_panels.split()],
'COMPUTE_DRIFT_FORCES' : bool(int(params.compute_drift_forces)),
'COMPUTE_YAW_MOMENT': bool(int(params.compute_yaw_moment)),
'REMOVE_IRREGULAR_FREQUENCIES' : bool(int(params.remove_irregular_frequencies))
}
_LOGGER.debug('Start preProcessor function.')
run_thread(preprocessor.preprocess, (custom_config,), simulation_log_path)
_LOGGER.debug('End preProcessor function.')
_LOGGER.debug('Start solver function.')
output = run_thread(solver.solve, (custom_config,), None)
with open(simulation_log_path, 'a') as log_file:
log_file.write(output)
_LOGGER.debug('End solver function.')
with open(simulation_log_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when doing simulation. Caused by:\n' + unicode(str(e)))
| 134 | 242 |
# -*- coding: utf-8 -*-
"""
This Python module provides various service functions.
Updated since version 1.1:
1. Added support for postprocess and visualization.
2. Added file path validation for parameters of all related methods.
Updated since version 1.2: Merge Code and Update GUI
1. Integrate New Nemoh using hdf5 and python.
"""
__author__ = "caoweiquan322, TCSASSEMBLER"
__copyright__ = "Copyright (C) 2014-2015 TopCoder Inc. All rights reserved."
__version__ = "1.2"
import collections
import uuid
from settings import *
import os
import time
import subprocess
from multiprocessing import Process, Manager
import logging
from openwarp import helper
from nemoh import utility
from nemoh import preprocessor
from nemoh import postprocessor
from nemoh import solver
import warnings
import fnmatch
import h5py
# This class represents parameters used in the meshing process.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
MeshingParameters = collections.namedtuple('MeshingParameters',
'infile outfile maxh minh fineness grading usetolerance tolerance')
# This class represents parameters used in the simulation process.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
SimulationParameters = collections.namedtuple('SimulationParameters', 'rho g depth xeff yeff wave_frequencies ' +
'min_wave_frequencies max_wave_frequencies wave_directions ' +
'max_wave_direction min_wave_directions floating_bodies ' +
'indiq_solver ires tol_gmres max_iterations save_potential ' +
'green_tabulation_numx green_tabulation_numz ' +
'green_tabulation_simpson_npoints use_ode_influence_coefficients ' +
'use_higher_order num_panel_higher_order b_spline_order ' +
'use_dipoles_implementation thin_panels compute_drift_forces ' +
'compute_yaw_moment remove_irregular_frequencies')
# This class represents a floating body used in the SimulationParameters.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
FloatingBody = collections.namedtuple('FloatingBody', 'mesh_file points panels degrees_of_freedom surge sway ' +
'heave roll_about_cdg pitch_about_cdg yaw_about_cdg ' +
'resulting_generalised_forces force_in_x_direction force_in_y_direction ' +
'force_in_z_direction moment_cdg_force_in_x_direction ' +
'moment_cdg_force_in_y_direction moment_cdg_force_in_z_direction ' +
'additional_info_lines')
# This class represents parameters used in the post-proessing.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
PostprocessingParameters = collections.namedtuple('PostprocessingParameters', 'irf show_pressure ' +
'kochin_function free_surface_elevation')
# The pre-defined config file name used by MESH_GENERATOR_BIN.
_CONFIG_FILE_NAME = 'config.txt'
# The pre-defined stdout log file name.
_LOG_FILE_NAME = 'log.txt'
# The logger object for logging.
_LOGGER = logging.getLogger(__name__)
class ServiceError(Exception):
'''
This exception indicates a service error.
It will be raised by methods of this module.
'''
pass
def generate_mesh(meshing_dir, params):
'''
Launch Mesh Generator to generate mesh.
@param meshing_dir: the meshing directory
@param params: the meshing parameters
@return: the mesh generation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of MeshingParameters is not
of valid value
@raise ServiceError: if error occurs during generating mesh
'''
signature = __name__ + '.generate_mesh()'
helper.log_entrance(_LOGGER, signature,
{'meshing_dir': meshing_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(meshing_dir, 'meshing_dir')
helper.check_is_directory(meshing_dir, 'meshing_dir')
helper.check_type_value(params, 'params', MeshingParameters, False)
helper.check_not_none_nor_empty(params.infile, 'params.infile')
helper.check_is_file(params.infile, 'params.infile')
helper.check_not_none_nor_empty(params.outfile, 'params.outfile')
helper.check_not_none_nor_empty(params.maxh, 'params.maxh')
helper.check_not_none_nor_empty(params.minh, 'params.minh')
helper.check_not_none_nor_empty(params.fineness, 'params.fineness')
helper.check_not_none_nor_empty(params.grading, 'params.grading')
helper.check_not_none_nor_empty(params.usetolerance, 'params.usetolerance')
if params.usetolerance == '1':
helper.check_not_none_nor_empty(params.tolerance, 'params.tolerance')
try:
config_file_path = os.path.join(meshing_dir, _CONFIG_FILE_NAME)
log_file_path = os.path.join(meshing_dir, _LOG_FILE_NAME)
# Generate config.txt according to given parameters
with open(config_file_path, 'w') as f:
f.write('\n'.join("%s: %s" % item for item in vars(params).items() if item[1] is not None))
# Launch mesh generator
with open(log_file_path, 'w') as log_file:
_LOGGER.debug('Start mesh generator in subprocess.')
subprocess.call(MESH_GENERATOR_BIN, cwd=meshing_dir, stdout=log_file)
_LOGGER.debug('End mesh generator in subprocess.')
# Read and return the log file content
with open(log_file_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when generating mesh. Caused by:\n' + unicode(str(e)))
def simulate(simulation_dir, params):
'''
Run simulation.
@param simulation_dir: the simulation directory
@param params: the simulation parameters
@return: the simulation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of SimulationParameters is not
of valid value
@raise ServiceError: if any other error occurred when launching the simulation
'''
signature = __name__ + '.simulate()'
helper.log_entrance(_LOGGER, signature,
{'simulation_dir': simulation_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
helper.check_type_value(params, 'params', SimulationParameters, False)
helper.check_not_none_nor_empty(params.rho, 'params.rho')
helper.check_not_none_nor_empty(params.g, 'params.g')
helper.check_not_none_nor_empty(params.depth, 'params.depth')
helper.check_not_none_nor_empty(params.xeff, 'params.xeff')
helper.check_not_none_nor_empty(params.yeff, 'params.yeff')
helper.check_not_none_nor_empty(params.wave_frequencies, 'params.wave_frequencies')
helper.check_not_none_nor_empty(params.min_wave_frequencies, 'params.min_wave_frequencies')
helper.check_not_none_nor_empty(params.max_wave_frequencies, 'params.max_wave_frequencies')
helper.check_not_none_nor_empty(params.wave_directions, 'params.wave_directions')
helper.check_not_none_nor_empty(params.min_wave_directions, 'params.min_wave_directions')
helper.check_not_none_nor_empty(params.max_wave_direction, 'params.max_wave_direction')
helper.check_not_none_nor_empty(params.indiq_solver, 'params.indiq_solver')
helper.check_not_none_nor_empty(params.ires, 'params.ires')
helper.check_not_none_nor_empty(params.tol_gmres, 'params.tol_gmres')
helper.check_not_none_nor_empty(params.max_iterations, 'params.max_iterations')
helper.check_not_none_nor_empty(params.save_potential, 'params.save_potential')
helper.check_not_none_nor_empty(params.green_tabulation_numx, 'params.green_tabulation_numx')
helper.check_not_none_nor_empty(params.green_tabulation_numz, 'params.green_tabulation_numz')
helper.check_not_none_nor_empty(params.green_tabulation_simpson_npoints, 'params.green_tabulation_simpson_npoints')
helper.check_not_none_nor_empty(params.use_ode_influence_coefficients, 'params.use_ode_influence_coefficients')
helper.check_not_none_nor_empty(params.use_higher_order, 'params.use_higher_order')
helper.check_not_none_nor_empty(params.num_panel_higher_order, 'params.num_panel_higher_order')
helper.check_not_none_nor_empty(params.b_spline_order, 'params.b_spline_order')
helper.check_not_none_nor_empty(params.use_dipoles_implementation, 'params.use_dipoles_implementation')
helper.check_not_none_nor_empty(params.thin_panels, 'params.thin_panels')
helper.check_not_none_nor_empty(params.compute_drift_forces, 'params.compute_drift_forces')
helper.check_not_none_nor_empty(params.remove_irregular_frequencies, 'params.remove_irregular_frequencies')
helper.check_not_none_nor_empty(params.compute_yaw_moment, 'params.compute_yaw_moment')
helper.check_type_value(params.floating_bodies, 'params.floating_bodies', list, True)
if params.floating_bodies is not None:
for body in params.floating_bodies:
helper.check_type_value(body, 'params.floating_bodies item', FloatingBody, False)
helper.check_not_none_nor_empty(body.mesh_file, 'body.mesh_file')
helper.check_not_none_nor_empty(body.points, 'body.points')
helper.check_not_none_nor_empty(body.panels, 'body.panels')
helper.check_not_none_nor_empty(body.degrees_of_freedom, 'body.degrees_of_freedom')
helper.check_not_none_nor_empty(body.resulting_generalised_forces, 'body.resulting_generalised_forces')
helper.check_not_none_nor_empty(body.additional_info_lines, 'body.additional_info_lines')
try:
# Write the hdf5 inputs according to given parameters
with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), "a") as hdf5_data:
utility.write_calculations(params, hdf5_data)
# Launch preProcessor and Solver
# A prepared 'results' folder is necessary for the Nemoh software suite
os.mkdir(os.path.join(simulation_dir, 'results'))
simulation_log_path = os.path.join(simulation_dir, 'simulation_log.txt')
custom_config = {
'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'),
'NEMOH_CALCULATIONS_FILE': None,
'NEMOH_INPUT_FILE': None,
'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'),
'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'),
'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'),
'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'),
'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'),
'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'),
'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'),
'GREEN_TABULATION_NUMX' : int(params.green_tabulation_numx),
'GREEN_TABULATION_NUMZ' : int(params.green_tabulation_numz),
'GREEN_TABULATION_SIMPSON_NPOINTS' : int(params.green_tabulation_simpson_npoints),
'USE_ODE_INFLUENCE_COEFFICIENTS': bool(int(params.use_ode_influence_coefficients)),
'USE_HIGHER_ORDER' : bool(int(params.use_higher_order)),
'NUM_PANEL_HIGHER_ORDER' : int(params.num_panel_higher_order),
'B_SPLINE_ORDER': int(params.b_spline_order),
'USE_DIPOLES_IMPLEMENTATION': bool(int(params.use_dipoles_implementation)),
'THIN_PANELS': [int(i) for i in params.thin_panels.split()],
'COMPUTE_DRIFT_FORCES' : bool(int(params.compute_drift_forces)),
'COMPUTE_YAW_MOMENT': bool(int(params.compute_yaw_moment)),
'REMOVE_IRREGULAR_FREQUENCIES' : bool(int(params.remove_irregular_frequencies))
}
_LOGGER.debug('Start preProcessor function.')
run_thread(preprocessor.preprocess, (custom_config,), simulation_log_path)
_LOGGER.debug('End preProcessor function.')
_LOGGER.debug('Start solver function.')
output = run_thread(solver.solve, (custom_config,), None)
with open(simulation_log_path, 'a') as log_file:
log_file.write(output)
_LOGGER.debug('End solver function.')
with open(simulation_log_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when doing simulation. Caused by:\n' + unicode(str(e)))
def postprocess(simulation_dir, params):
'''
Run post-processing.
@param simulation_dir: the simulation directory
@param params: the post-processing parameters
@return: the post-processing log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of PostprocessingParameters is not
of valid value
@raise ServiceError: if error occurs during launching the post-processing
'''
signature = __name__ + '.postprocess()'
helper.log_entrance(_LOGGER, signature,
{'simulation_dir': simulation_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
helper.check_type_value(params, 'params', PostprocessingParameters, False)
helper.check_type_value(params.irf, 'params.irf', list, False)
for irf_item in params.irf:
helper.check_not_none_nor_empty(irf_item, 'irf_item')
helper.check_not_none_nor_empty(params.show_pressure, 'params.show_pressure')
helper.check_type_value(params.kochin_function, 'params.kochin_function', list, False)
for kochin_function_item in params.kochin_function:
helper.check_not_none_nor_empty(kochin_function_item, 'kochin_function_item')
helper.check_type_value(params.free_surface_elevation, 'params.free_surface_elevation', list, False)
for elevation_item in params.free_surface_elevation:
helper.check_not_none_nor_empty(elevation_item, 'elevation_item')
try:
with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), "a") as hdf5_data:
utility.write_postprocessing_section(params, hdf5_data)
# Launch postProcessor
postprocessing_log_path = os.path.join(simulation_dir, 'postprocessing_log.txt')
custom_config = {
'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'),
'NEMOH_CALCULATIONS_FILE': None,
'NEMOH_INPUT_FILE': None,
'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'),
'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'),
'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'),
'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'),
'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'),
'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'),
'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'),
'GREEN_TABULATION_NUMX' : 328,
'GREEN_TABULATION_NUMZ' : 46,
'GREEN_TABULATION_SIMPSON_NPOINTS' : 251,
'USE_ODE_INFLUENCE_COEFFICIENTS': False,
'USE_HIGHER_ORDER' : False,
'NUM_PANEL_HIGHER_ORDER' : 1,
'B_SPLINE_ORDER': 1,
'USE_DIPOLES_IMPLEMENTATION': False,
'THIN_PANELS': [-1],
'COMPUTE_DRIFT_FORCES' : False,
'COMPUTE_YAW_MOMENT': False,
'REMOVE_IRREGULAR_FREQUENCIES' : False
}
_LOGGER.debug('Start postProcessor function.')
run_thread(postprocessor.postprocess, (custom_config,), postprocessing_log_path)
_LOGGER.debug('End postProcessor in subprocess.')
with open(postprocessing_log_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when running postprocess. Caused by:\n' + unicode(str(e)))
def visualize(simulation_dir):
'''
Launch ParaView to visualize simulation results.
@param simulation_dir: the simulation directory
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if error occurs during launching the ParaView
'''
signature = __name__ + '.visualize()'
helper.log_entrance(_LOGGER, signature, {'simulation_dir': simulation_dir})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
try:
# Filter files to be opened in ParaView
files = []
for f in os.listdir(os.path.join(simulation_dir, 'results')):
for ext in VISUALIZATION_FILE_EXTENSIONS:
if fnmatch.fnmatch(f, '*.' + ext):
files.append(os.path.join(simulation_dir, 'results', f))
# Check if there's tec/vtk/stl file to visualize
if len(files) == 0:
raise ServiceError('There is no accepted file to visualize.')
_LOGGER.debug('List of files to load:')
_LOGGER.debug(str(files))
# Prepare script to run by ParaView
paraview_script = os.path.join(os.path.join(simulation_dir, 'results'), 'load_data.py')
prepare_paraview_script(paraview_script, files)
# Launch ParaView without waiting for the ParaView to exit
_LOGGER.debug('Start launching ParaView in subprocess.')
subprocess.Popen([PARAVIEW_BIN, '--script=' + paraview_script + ''])
_LOGGER.debug('End launching ParaView in subprocess.')
helper.log_exit(_LOGGER, signature, None)
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when launching the ParaView. Caused by:\n' + unicode(str(e)))
def prepare_paraview_script(script_path, files):
'''
Prepare a script to be run by ParaView from a template.
@param script_path: path of the new script to create
@param files: a list of data files path
@raise Exception: to its caller if any error occurs
'''
# Since this is a inner function, no entrance/exit information would be logged.
with open(PARAVIEW_SCRIPT_TEMPLATE, 'r') as fin:
with open(script_path, 'w') as fout:
for line in fin.readlines():
fout.write(line.rstrip().replace('<parameter_files>', str(files)) + '\n')
# From http://code.activestate.com/recipes/577564-context-manager-for-low-level-redirection-of-stdou/
class Silence:
"""
Context manager which uses low-level file descriptors to suppress
output to stdout/stderr, optionally redirecting to the named file(s).
Example usage
with Silence(stderr='output.txt', mode='a'):
... # appending to existing file
... print >> sys.stderr, "Hello from stderr"
... print "Stdout redirected to os.devnull"
=== contents of 'output.txt' ===
"""
def __init__(self, stdout=os.devnull, stderr=os.devnull, mode='w'):
"""
Initialize
Args:
self: The class itself
stdout: the descriptor or file name where to redirect stdout
stdout: the descriptor or file name where to redirect stdout
mode: the output descriptor or file mode
"""
self.outfiles = stdout, stderr
self.combine = (stdout == stderr)
self.mode = mode
def __enter__(self):
"""
Enter the context
Args:
self: The class itself
"""
import sys
self.sys = sys
# save previous stdout/stderr
self.saved_streams = saved_streams = sys.__stdout__, sys.__stderr__
self.fds = fds = [s.fileno() for s in saved_streams]
self.saved_fds = map(os.dup, fds)
# flush any pending output
for s in saved_streams: s.flush()
# open surrogate files
if self.combine:
null_streams = [open(self.outfiles[0], self.mode, 0)] * 2
if self.outfiles[0] != os.devnull:
# disable buffering so output is merged immediately
sys.stdout, sys.stderr = map(os.fdopen, fds, ['w']*2, [0]*2)
else: null_streams = [open(f, self.mode, 0) for f in self.outfiles]
self.null_fds = null_fds = [s.fileno() for s in null_streams]
self.null_streams = null_streams
# overwrite file objects and low-level file descriptors
map(os.dup2, null_fds, fds)
def __exit__(self, *args):
"""
Exit the context
Args:
self: The class itself
args: other arguments
"""
sys = self.sys
# flush any pending output
for s in self.saved_streams: s.flush()
# restore original streams and file descriptors
map(os.dup2, self.saved_fds, self.fds)
sys.stdout, sys.stderr = self.saved_streams
# clean up
for s in self.null_streams: s.close()
for fd in self.saved_fds: os.close(fd)
return False
def wrapper_io(func, fd, args, return_dict):
"""
Run a function while redirecting its output to a file descriptor
Args:
func: A python function to run
fd: a file descriptor
args: A tuple containing argument for the function
return_dict: Dictionary where to put the result of the function
"""
return_dict["output"] = ''
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if fd:
with Silence(stdout=fd, stderr=os.devnull, mode='a'):
return_dict["output"] = func(*args)
else:
return_dict["output"] = func(*args)
def run_thread(func, args, fd):
"""
Run a python function in a thread and wait for it to complete.
Redirect its output to fd
Args:
func: A python function to run
args: A tuple containing argument for the function
fd: a file descriptor
"""
manager = Manager()
return_dict = manager.dict()
p = Process(target=wrapper_io, args=(func, fd, args, return_dict))
p.start()
p.join()
return return_dict["output"]
def writeline_if_not_none(fout, data):
'''
Write one line to the specified file if data is not None.
@param fout: the file object to write line in
@param data: the data to write as line
'''
# Since this is a inner function, no entrance/exit information would be logged.
if data is not None:
fout.write(str(data) + '\n')
def prepare_dir(prefix):
'''
Prepare a directory, the directory will be a sub-directory of USER_DATA_DIRECTORY with current timestamp
prefixed given prefix as the directory name.
@param prefix: the directory prefix
@return: the meshing/simulation directory full path
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if any error occurred when preparing the directory
'''
signature = __name__ + '.prepare_dir()'
helper.log_entrance(_LOGGER, signature, {'prefix': prefix})
# Checking parameters
helper.check_not_none_nor_empty(prefix, 'prefix')
try:
# Create a directory for this run (sub-directory name in format simulation_YYYYMMDDhhmmss)
# We should consider adding some more uuid suffix to allow more concurrent requests within 1 SINGLE second.
run_dir = os.path.join(USER_DATA_DIRECTORY, prefix + time.strftime('%Y%m%d%H%M%S') + '_' + uuid.uuid1().hex)
os.makedirs(run_dir)
helper.log_exit(_LOGGER, signature, [run_dir])
return run_dir
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when preparing the directory. Caused by:\n' + unicode(str(e)))
|
postprocess
|
Run post-processing.
@param simulation_dir: the simulation directory
@param params: the post-processing parameters
@return: the post-processing log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of PostprocessingParameters is not
of valid value
@raise ServiceError: if error occurs during launching the post-processing
|
# -*- coding: utf-8 -*-
"""
This Python module provides various service functions.
Updated since version 1.1:
1. Added support for postprocess and visualization.
2. Added file path validation for parameters of all related methods.
Updated since version 1.2: Merge Code and Update GUI
1. Integrate New Nemoh using hdf5 and python.
"""
__author__ = "caoweiquan322, TCSASSEMBLER"
__copyright__ = "Copyright (C) 2014-2015 TopCoder Inc. All rights reserved."
__version__ = "1.2"
import collections
import uuid
from settings import *
import os
import time
import subprocess
from multiprocessing import Process, Manager
import logging
from openwarp import helper
from nemoh import utility
from nemoh import preprocessor
from nemoh import postprocessor
from nemoh import solver
import warnings
import fnmatch
import h5py
# This class represents parameters used in the meshing process.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
MeshingParameters = collections.namedtuple('MeshingParameters',
'infile outfile maxh minh fineness grading usetolerance tolerance')
# This class represents parameters used in the simulation process.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
SimulationParameters = collections.namedtuple('SimulationParameters', 'rho g depth xeff yeff wave_frequencies ' +
'min_wave_frequencies max_wave_frequencies wave_directions ' +
'max_wave_direction min_wave_directions floating_bodies ' +
'indiq_solver ires tol_gmres max_iterations save_potential ' +
'green_tabulation_numx green_tabulation_numz ' +
'green_tabulation_simpson_npoints use_ode_influence_coefficients ' +
'use_higher_order num_panel_higher_order b_spline_order ' +
'use_dipoles_implementation thin_panels compute_drift_forces ' +
'compute_yaw_moment remove_irregular_frequencies')
# This class represents a floating body used in the SimulationParameters.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
FloatingBody = collections.namedtuple('FloatingBody', 'mesh_file points panels degrees_of_freedom surge sway ' +
'heave roll_about_cdg pitch_about_cdg yaw_about_cdg ' +
'resulting_generalised_forces force_in_x_direction force_in_y_direction ' +
'force_in_z_direction moment_cdg_force_in_x_direction ' +
'moment_cdg_force_in_y_direction moment_cdg_force_in_z_direction ' +
'additional_info_lines')
# This class represents parameters used in the post-proessing.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
PostprocessingParameters = collections.namedtuple('PostprocessingParameters', 'irf show_pressure ' +
'kochin_function free_surface_elevation')
# The pre-defined config file name used by MESH_GENERATOR_BIN.
_CONFIG_FILE_NAME = 'config.txt'
# The pre-defined stdout log file name.
_LOG_FILE_NAME = 'log.txt'
# The logger object for logging.
_LOGGER = logging.getLogger(__name__)
class ServiceError(Exception):
'''
This exception indicates a service error.
It will be raised by methods of this module.
'''
pass
def generate_mesh(meshing_dir, params):
'''
Launch Mesh Generator to generate mesh.
@param meshing_dir: the meshing directory
@param params: the meshing parameters
@return: the mesh generation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of MeshingParameters is not
of valid value
@raise ServiceError: if error occurs during generating mesh
'''
signature = __name__ + '.generate_mesh()'
helper.log_entrance(_LOGGER, signature,
{'meshing_dir': meshing_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(meshing_dir, 'meshing_dir')
helper.check_is_directory(meshing_dir, 'meshing_dir')
helper.check_type_value(params, 'params', MeshingParameters, False)
helper.check_not_none_nor_empty(params.infile, 'params.infile')
helper.check_is_file(params.infile, 'params.infile')
helper.check_not_none_nor_empty(params.outfile, 'params.outfile')
helper.check_not_none_nor_empty(params.maxh, 'params.maxh')
helper.check_not_none_nor_empty(params.minh, 'params.minh')
helper.check_not_none_nor_empty(params.fineness, 'params.fineness')
helper.check_not_none_nor_empty(params.grading, 'params.grading')
helper.check_not_none_nor_empty(params.usetolerance, 'params.usetolerance')
if params.usetolerance == '1':
helper.check_not_none_nor_empty(params.tolerance, 'params.tolerance')
try:
config_file_path = os.path.join(meshing_dir, _CONFIG_FILE_NAME)
log_file_path = os.path.join(meshing_dir, _LOG_FILE_NAME)
# Generate config.txt according to given parameters
with open(config_file_path, 'w') as f:
f.write('\n'.join("%s: %s" % item for item in vars(params).items() if item[1] is not None))
# Launch mesh generator
with open(log_file_path, 'w') as log_file:
_LOGGER.debug('Start mesh generator in subprocess.')
subprocess.call(MESH_GENERATOR_BIN, cwd=meshing_dir, stdout=log_file)
_LOGGER.debug('End mesh generator in subprocess.')
# Read and return the log file content
with open(log_file_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when generating mesh. Caused by:\n' + unicode(str(e)))
def simulate(simulation_dir, params):
'''
Run simulation.
@param simulation_dir: the simulation directory
@param params: the simulation parameters
@return: the simulation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of SimulationParameters is not
of valid value
@raise ServiceError: if any other error occurred when launching the simulation
'''
signature = __name__ + '.simulate()'
helper.log_entrance(_LOGGER, signature,
{'simulation_dir': simulation_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
helper.check_type_value(params, 'params', SimulationParameters, False)
helper.check_not_none_nor_empty(params.rho, 'params.rho')
helper.check_not_none_nor_empty(params.g, 'params.g')
helper.check_not_none_nor_empty(params.depth, 'params.depth')
helper.check_not_none_nor_empty(params.xeff, 'params.xeff')
helper.check_not_none_nor_empty(params.yeff, 'params.yeff')
helper.check_not_none_nor_empty(params.wave_frequencies, 'params.wave_frequencies')
helper.check_not_none_nor_empty(params.min_wave_frequencies, 'params.min_wave_frequencies')
helper.check_not_none_nor_empty(params.max_wave_frequencies, 'params.max_wave_frequencies')
helper.check_not_none_nor_empty(params.wave_directions, 'params.wave_directions')
helper.check_not_none_nor_empty(params.min_wave_directions, 'params.min_wave_directions')
helper.check_not_none_nor_empty(params.max_wave_direction, 'params.max_wave_direction')
helper.check_not_none_nor_empty(params.indiq_solver, 'params.indiq_solver')
helper.check_not_none_nor_empty(params.ires, 'params.ires')
helper.check_not_none_nor_empty(params.tol_gmres, 'params.tol_gmres')
helper.check_not_none_nor_empty(params.max_iterations, 'params.max_iterations')
helper.check_not_none_nor_empty(params.save_potential, 'params.save_potential')
helper.check_not_none_nor_empty(params.green_tabulation_numx, 'params.green_tabulation_numx')
helper.check_not_none_nor_empty(params.green_tabulation_numz, 'params.green_tabulation_numz')
helper.check_not_none_nor_empty(params.green_tabulation_simpson_npoints, 'params.green_tabulation_simpson_npoints')
helper.check_not_none_nor_empty(params.use_ode_influence_coefficients, 'params.use_ode_influence_coefficients')
helper.check_not_none_nor_empty(params.use_higher_order, 'params.use_higher_order')
helper.check_not_none_nor_empty(params.num_panel_higher_order, 'params.num_panel_higher_order')
helper.check_not_none_nor_empty(params.b_spline_order, 'params.b_spline_order')
helper.check_not_none_nor_empty(params.use_dipoles_implementation, 'params.use_dipoles_implementation')
helper.check_not_none_nor_empty(params.thin_panels, 'params.thin_panels')
helper.check_not_none_nor_empty(params.compute_drift_forces, 'params.compute_drift_forces')
helper.check_not_none_nor_empty(params.remove_irregular_frequencies, 'params.remove_irregular_frequencies')
helper.check_not_none_nor_empty(params.compute_yaw_moment, 'params.compute_yaw_moment')
helper.check_type_value(params.floating_bodies, 'params.floating_bodies', list, True)
if params.floating_bodies is not None:
for body in params.floating_bodies:
helper.check_type_value(body, 'params.floating_bodies item', FloatingBody, False)
helper.check_not_none_nor_empty(body.mesh_file, 'body.mesh_file')
helper.check_not_none_nor_empty(body.points, 'body.points')
helper.check_not_none_nor_empty(body.panels, 'body.panels')
helper.check_not_none_nor_empty(body.degrees_of_freedom, 'body.degrees_of_freedom')
helper.check_not_none_nor_empty(body.resulting_generalised_forces, 'body.resulting_generalised_forces')
helper.check_not_none_nor_empty(body.additional_info_lines, 'body.additional_info_lines')
try:
# Write the hdf5 inputs according to given parameters
with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), "a") as hdf5_data:
utility.write_calculations(params, hdf5_data)
# Launch preProcessor and Solver
# A prepared 'results' folder is necessary for the Nemoh software suite
os.mkdir(os.path.join(simulation_dir, 'results'))
simulation_log_path = os.path.join(simulation_dir, 'simulation_log.txt')
custom_config = {
'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'),
'NEMOH_CALCULATIONS_FILE': None,
'NEMOH_INPUT_FILE': None,
'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'),
'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'),
'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'),
'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'),
'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'),
'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'),
'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'),
'GREEN_TABULATION_NUMX' : int(params.green_tabulation_numx),
'GREEN_TABULATION_NUMZ' : int(params.green_tabulation_numz),
'GREEN_TABULATION_SIMPSON_NPOINTS' : int(params.green_tabulation_simpson_npoints),
'USE_ODE_INFLUENCE_COEFFICIENTS': bool(int(params.use_ode_influence_coefficients)),
'USE_HIGHER_ORDER' : bool(int(params.use_higher_order)),
'NUM_PANEL_HIGHER_ORDER' : int(params.num_panel_higher_order),
'B_SPLINE_ORDER': int(params.b_spline_order),
'USE_DIPOLES_IMPLEMENTATION': bool(int(params.use_dipoles_implementation)),
'THIN_PANELS': [int(i) for i in params.thin_panels.split()],
'COMPUTE_DRIFT_FORCES' : bool(int(params.compute_drift_forces)),
'COMPUTE_YAW_MOMENT': bool(int(params.compute_yaw_moment)),
'REMOVE_IRREGULAR_FREQUENCIES' : bool(int(params.remove_irregular_frequencies))
}
_LOGGER.debug('Start preProcessor function.')
run_thread(preprocessor.preprocess, (custom_config,), simulation_log_path)
_LOGGER.debug('End preProcessor function.')
_LOGGER.debug('Start solver function.')
output = run_thread(solver.solve, (custom_config,), None)
with open(simulation_log_path, 'a') as log_file:
log_file.write(output)
_LOGGER.debug('End solver function.')
with open(simulation_log_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when doing simulation. Caused by:\n' + unicode(str(e)))
# MASKED: postprocess function (lines 244-315)
def visualize(simulation_dir):
'''
Launch ParaView to visualize simulation results.
@param simulation_dir: the simulation directory
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if error occurs during launching the ParaView
'''
signature = __name__ + '.visualize()'
helper.log_entrance(_LOGGER, signature, {'simulation_dir': simulation_dir})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
try:
# Filter files to be opened in ParaView
files = []
for f in os.listdir(os.path.join(simulation_dir, 'results')):
for ext in VISUALIZATION_FILE_EXTENSIONS:
if fnmatch.fnmatch(f, '*.' + ext):
files.append(os.path.join(simulation_dir, 'results', f))
# Check if there's tec/vtk/stl file to visualize
if len(files) == 0:
raise ServiceError('There is no accepted file to visualize.')
_LOGGER.debug('List of files to load:')
_LOGGER.debug(str(files))
# Prepare script to run by ParaView
paraview_script = os.path.join(os.path.join(simulation_dir, 'results'), 'load_data.py')
prepare_paraview_script(paraview_script, files)
# Launch ParaView without waiting for the ParaView to exit
_LOGGER.debug('Start launching ParaView in subprocess.')
subprocess.Popen([PARAVIEW_BIN, '--script=' + paraview_script + ''])
_LOGGER.debug('End launching ParaView in subprocess.')
helper.log_exit(_LOGGER, signature, None)
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when launching the ParaView. Caused by:\n' + unicode(str(e)))
def prepare_paraview_script(script_path, files):
'''
Prepare a script to be run by ParaView from a template.
@param script_path: path of the new script to create
@param files: a list of data files path
@raise Exception: to its caller if any error occurs
'''
# Since this is a inner function, no entrance/exit information would be logged.
with open(PARAVIEW_SCRIPT_TEMPLATE, 'r') as fin:
with open(script_path, 'w') as fout:
for line in fin.readlines():
fout.write(line.rstrip().replace('<parameter_files>', str(files)) + '\n')
# From http://code.activestate.com/recipes/577564-context-manager-for-low-level-redirection-of-stdou/
class Silence:
"""
Context manager which uses low-level file descriptors to suppress
output to stdout/stderr, optionally redirecting to the named file(s).
Example usage
with Silence(stderr='output.txt', mode='a'):
... # appending to existing file
... print >> sys.stderr, "Hello from stderr"
... print "Stdout redirected to os.devnull"
=== contents of 'output.txt' ===
"""
def __init__(self, stdout=os.devnull, stderr=os.devnull, mode='w'):
"""
Initialize
Args:
self: The class itself
stdout: the descriptor or file name where to redirect stdout
stdout: the descriptor or file name where to redirect stdout
mode: the output descriptor or file mode
"""
self.outfiles = stdout, stderr
self.combine = (stdout == stderr)
self.mode = mode
def __enter__(self):
"""
Enter the context
Args:
self: The class itself
"""
import sys
self.sys = sys
# save previous stdout/stderr
self.saved_streams = saved_streams = sys.__stdout__, sys.__stderr__
self.fds = fds = [s.fileno() for s in saved_streams]
self.saved_fds = map(os.dup, fds)
# flush any pending output
for s in saved_streams: s.flush()
# open surrogate files
if self.combine:
null_streams = [open(self.outfiles[0], self.mode, 0)] * 2
if self.outfiles[0] != os.devnull:
# disable buffering so output is merged immediately
sys.stdout, sys.stderr = map(os.fdopen, fds, ['w']*2, [0]*2)
else: null_streams = [open(f, self.mode, 0) for f in self.outfiles]
self.null_fds = null_fds = [s.fileno() for s in null_streams]
self.null_streams = null_streams
# overwrite file objects and low-level file descriptors
map(os.dup2, null_fds, fds)
def __exit__(self, *args):
"""
Exit the context
Args:
self: The class itself
args: other arguments
"""
sys = self.sys
# flush any pending output
for s in self.saved_streams: s.flush()
# restore original streams and file descriptors
map(os.dup2, self.saved_fds, self.fds)
sys.stdout, sys.stderr = self.saved_streams
# clean up
for s in self.null_streams: s.close()
for fd in self.saved_fds: os.close(fd)
return False
def wrapper_io(func, fd, args, return_dict):
"""
Run a function while redirecting its output to a file descriptor
Args:
func: A python function to run
fd: a file descriptor
args: A tuple containing argument for the function
return_dict: Dictionary where to put the result of the function
"""
return_dict["output"] = ''
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if fd:
with Silence(stdout=fd, stderr=os.devnull, mode='a'):
return_dict["output"] = func(*args)
else:
return_dict["output"] = func(*args)
def run_thread(func, args, fd):
"""
Run a python function in a thread and wait for it to complete.
Redirect its output to fd
Args:
func: A python function to run
args: A tuple containing argument for the function
fd: a file descriptor
"""
manager = Manager()
return_dict = manager.dict()
p = Process(target=wrapper_io, args=(func, fd, args, return_dict))
p.start()
p.join()
return return_dict["output"]
def writeline_if_not_none(fout, data):
'''
Write one line to the specified file if data is not None.
@param fout: the file object to write line in
@param data: the data to write as line
'''
# Since this is a inner function, no entrance/exit information would be logged.
if data is not None:
fout.write(str(data) + '\n')
def prepare_dir(prefix):
'''
Prepare a directory, the directory will be a sub-directory of USER_DATA_DIRECTORY with current timestamp
prefixed given prefix as the directory name.
@param prefix: the directory prefix
@return: the meshing/simulation directory full path
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if any error occurred when preparing the directory
'''
signature = __name__ + '.prepare_dir()'
helper.log_entrance(_LOGGER, signature, {'prefix': prefix})
# Checking parameters
helper.check_not_none_nor_empty(prefix, 'prefix')
try:
# Create a directory for this run (sub-directory name in format simulation_YYYYMMDDhhmmss)
# We should consider adding some more uuid suffix to allow more concurrent requests within 1 SINGLE second.
run_dir = os.path.join(USER_DATA_DIRECTORY, prefix + time.strftime('%Y%m%d%H%M%S') + '_' + uuid.uuid1().hex)
os.makedirs(run_dir)
helper.log_exit(_LOGGER, signature, [run_dir])
return run_dir
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when preparing the directory. Caused by:\n' + unicode(str(e)))
|
def postprocess(simulation_dir, params):
'''
Run post-processing.
@param simulation_dir: the simulation directory
@param params: the post-processing parameters
@return: the post-processing log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of PostprocessingParameters is not
of valid value
@raise ServiceError: if error occurs during launching the post-processing
'''
signature = __name__ + '.postprocess()'
helper.log_entrance(_LOGGER, signature,
{'simulation_dir': simulation_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
helper.check_type_value(params, 'params', PostprocessingParameters, False)
helper.check_type_value(params.irf, 'params.irf', list, False)
for irf_item in params.irf:
helper.check_not_none_nor_empty(irf_item, 'irf_item')
helper.check_not_none_nor_empty(params.show_pressure, 'params.show_pressure')
helper.check_type_value(params.kochin_function, 'params.kochin_function', list, False)
for kochin_function_item in params.kochin_function:
helper.check_not_none_nor_empty(kochin_function_item, 'kochin_function_item')
helper.check_type_value(params.free_surface_elevation, 'params.free_surface_elevation', list, False)
for elevation_item in params.free_surface_elevation:
helper.check_not_none_nor_empty(elevation_item, 'elevation_item')
try:
with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), "a") as hdf5_data:
utility.write_postprocessing_section(params, hdf5_data)
# Launch postProcessor
postprocessing_log_path = os.path.join(simulation_dir, 'postprocessing_log.txt')
custom_config = {
'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'),
'NEMOH_CALCULATIONS_FILE': None,
'NEMOH_INPUT_FILE': None,
'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'),
'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'),
'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'),
'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'),
'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'),
'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'),
'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'),
'GREEN_TABULATION_NUMX' : 328,
'GREEN_TABULATION_NUMZ' : 46,
'GREEN_TABULATION_SIMPSON_NPOINTS' : 251,
'USE_ODE_INFLUENCE_COEFFICIENTS': False,
'USE_HIGHER_ORDER' : False,
'NUM_PANEL_HIGHER_ORDER' : 1,
'B_SPLINE_ORDER': 1,
'USE_DIPOLES_IMPLEMENTATION': False,
'THIN_PANELS': [-1],
'COMPUTE_DRIFT_FORCES' : False,
'COMPUTE_YAW_MOMENT': False,
'REMOVE_IRREGULAR_FREQUENCIES' : False
}
_LOGGER.debug('Start postProcessor function.')
run_thread(postprocessor.postprocess, (custom_config,), postprocessing_log_path)
_LOGGER.debug('End postProcessor in subprocess.')
with open(postprocessing_log_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when running postprocess. Caused by:\n' + unicode(str(e)))
| 244 | 315 |
# -*- coding: utf-8 -*-
"""
This Python module provides various service functions.
Updated since version 1.1:
1. Added support for postprocess and visualization.
2. Added file path validation for parameters of all related methods.
Updated since version 1.2: Merge Code and Update GUI
1. Integrate New Nemoh using hdf5 and python.
"""
__author__ = "caoweiquan322, TCSASSEMBLER"
__copyright__ = "Copyright (C) 2014-2015 TopCoder Inc. All rights reserved."
__version__ = "1.2"
import collections
import uuid
from settings import *
import os
import time
import subprocess
from multiprocessing import Process, Manager
import logging
from openwarp import helper
from nemoh import utility
from nemoh import preprocessor
from nemoh import postprocessor
from nemoh import solver
import warnings
import fnmatch
import h5py
# This class represents parameters used in the meshing process.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
MeshingParameters = collections.namedtuple('MeshingParameters',
'infile outfile maxh minh fineness grading usetolerance tolerance')
# This class represents parameters used in the simulation process.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
SimulationParameters = collections.namedtuple('SimulationParameters', 'rho g depth xeff yeff wave_frequencies ' +
'min_wave_frequencies max_wave_frequencies wave_directions ' +
'max_wave_direction min_wave_directions floating_bodies ' +
'indiq_solver ires tol_gmres max_iterations save_potential ' +
'green_tabulation_numx green_tabulation_numz ' +
'green_tabulation_simpson_npoints use_ode_influence_coefficients ' +
'use_higher_order num_panel_higher_order b_spline_order ' +
'use_dipoles_implementation thin_panels compute_drift_forces ' +
'compute_yaw_moment remove_irregular_frequencies')
# This class represents a floating body used in the SimulationParameters.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
FloatingBody = collections.namedtuple('FloatingBody', 'mesh_file points panels degrees_of_freedom surge sway ' +
'heave roll_about_cdg pitch_about_cdg yaw_about_cdg ' +
'resulting_generalised_forces force_in_x_direction force_in_y_direction ' +
'force_in_z_direction moment_cdg_force_in_x_direction ' +
'moment_cdg_force_in_y_direction moment_cdg_force_in_z_direction ' +
'additional_info_lines')
# This class represents parameters used in the post-proessing.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
PostprocessingParameters = collections.namedtuple('PostprocessingParameters', 'irf show_pressure ' +
'kochin_function free_surface_elevation')
# The pre-defined config file name used by MESH_GENERATOR_BIN.
_CONFIG_FILE_NAME = 'config.txt'
# The pre-defined stdout log file name.
_LOG_FILE_NAME = 'log.txt'
# The logger object for logging.
_LOGGER = logging.getLogger(__name__)
class ServiceError(Exception):
'''
This exception indicates a service error.
It will be raised by methods of this module.
'''
pass
def generate_mesh(meshing_dir, params):
'''
Launch Mesh Generator to generate mesh.
@param meshing_dir: the meshing directory
@param params: the meshing parameters
@return: the mesh generation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of MeshingParameters is not
of valid value
@raise ServiceError: if error occurs during generating mesh
'''
signature = __name__ + '.generate_mesh()'
helper.log_entrance(_LOGGER, signature,
{'meshing_dir': meshing_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(meshing_dir, 'meshing_dir')
helper.check_is_directory(meshing_dir, 'meshing_dir')
helper.check_type_value(params, 'params', MeshingParameters, False)
helper.check_not_none_nor_empty(params.infile, 'params.infile')
helper.check_is_file(params.infile, 'params.infile')
helper.check_not_none_nor_empty(params.outfile, 'params.outfile')
helper.check_not_none_nor_empty(params.maxh, 'params.maxh')
helper.check_not_none_nor_empty(params.minh, 'params.minh')
helper.check_not_none_nor_empty(params.fineness, 'params.fineness')
helper.check_not_none_nor_empty(params.grading, 'params.grading')
helper.check_not_none_nor_empty(params.usetolerance, 'params.usetolerance')
if params.usetolerance == '1':
helper.check_not_none_nor_empty(params.tolerance, 'params.tolerance')
try:
config_file_path = os.path.join(meshing_dir, _CONFIG_FILE_NAME)
log_file_path = os.path.join(meshing_dir, _LOG_FILE_NAME)
# Generate config.txt according to given parameters
with open(config_file_path, 'w') as f:
f.write('\n'.join("%s: %s" % item for item in vars(params).items() if item[1] is not None))
# Launch mesh generator
with open(log_file_path, 'w') as log_file:
_LOGGER.debug('Start mesh generator in subprocess.')
subprocess.call(MESH_GENERATOR_BIN, cwd=meshing_dir, stdout=log_file)
_LOGGER.debug('End mesh generator in subprocess.')
# Read and return the log file content
with open(log_file_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when generating mesh. Caused by:\n' + unicode(str(e)))
def simulate(simulation_dir, params):
'''
Run simulation.
@param simulation_dir: the simulation directory
@param params: the simulation parameters
@return: the simulation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of SimulationParameters is not
of valid value
@raise ServiceError: if any other error occurred when launching the simulation
'''
signature = __name__ + '.simulate()'
helper.log_entrance(_LOGGER, signature,
{'simulation_dir': simulation_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
helper.check_type_value(params, 'params', SimulationParameters, False)
helper.check_not_none_nor_empty(params.rho, 'params.rho')
helper.check_not_none_nor_empty(params.g, 'params.g')
helper.check_not_none_nor_empty(params.depth, 'params.depth')
helper.check_not_none_nor_empty(params.xeff, 'params.xeff')
helper.check_not_none_nor_empty(params.yeff, 'params.yeff')
helper.check_not_none_nor_empty(params.wave_frequencies, 'params.wave_frequencies')
helper.check_not_none_nor_empty(params.min_wave_frequencies, 'params.min_wave_frequencies')
helper.check_not_none_nor_empty(params.max_wave_frequencies, 'params.max_wave_frequencies')
helper.check_not_none_nor_empty(params.wave_directions, 'params.wave_directions')
helper.check_not_none_nor_empty(params.min_wave_directions, 'params.min_wave_directions')
helper.check_not_none_nor_empty(params.max_wave_direction, 'params.max_wave_direction')
helper.check_not_none_nor_empty(params.indiq_solver, 'params.indiq_solver')
helper.check_not_none_nor_empty(params.ires, 'params.ires')
helper.check_not_none_nor_empty(params.tol_gmres, 'params.tol_gmres')
helper.check_not_none_nor_empty(params.max_iterations, 'params.max_iterations')
helper.check_not_none_nor_empty(params.save_potential, 'params.save_potential')
helper.check_not_none_nor_empty(params.green_tabulation_numx, 'params.green_tabulation_numx')
helper.check_not_none_nor_empty(params.green_tabulation_numz, 'params.green_tabulation_numz')
helper.check_not_none_nor_empty(params.green_tabulation_simpson_npoints, 'params.green_tabulation_simpson_npoints')
helper.check_not_none_nor_empty(params.use_ode_influence_coefficients, 'params.use_ode_influence_coefficients')
helper.check_not_none_nor_empty(params.use_higher_order, 'params.use_higher_order')
helper.check_not_none_nor_empty(params.num_panel_higher_order, 'params.num_panel_higher_order')
helper.check_not_none_nor_empty(params.b_spline_order, 'params.b_spline_order')
helper.check_not_none_nor_empty(params.use_dipoles_implementation, 'params.use_dipoles_implementation')
helper.check_not_none_nor_empty(params.thin_panels, 'params.thin_panels')
helper.check_not_none_nor_empty(params.compute_drift_forces, 'params.compute_drift_forces')
helper.check_not_none_nor_empty(params.remove_irregular_frequencies, 'params.remove_irregular_frequencies')
helper.check_not_none_nor_empty(params.compute_yaw_moment, 'params.compute_yaw_moment')
helper.check_type_value(params.floating_bodies, 'params.floating_bodies', list, True)
if params.floating_bodies is not None:
for body in params.floating_bodies:
helper.check_type_value(body, 'params.floating_bodies item', FloatingBody, False)
helper.check_not_none_nor_empty(body.mesh_file, 'body.mesh_file')
helper.check_not_none_nor_empty(body.points, 'body.points')
helper.check_not_none_nor_empty(body.panels, 'body.panels')
helper.check_not_none_nor_empty(body.degrees_of_freedom, 'body.degrees_of_freedom')
helper.check_not_none_nor_empty(body.resulting_generalised_forces, 'body.resulting_generalised_forces')
helper.check_not_none_nor_empty(body.additional_info_lines, 'body.additional_info_lines')
try:
# Write the hdf5 inputs according to given parameters
with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), "a") as hdf5_data:
utility.write_calculations(params, hdf5_data)
# Launch preProcessor and Solver
# A prepared 'results' folder is necessary for the Nemoh software suite
os.mkdir(os.path.join(simulation_dir, 'results'))
simulation_log_path = os.path.join(simulation_dir, 'simulation_log.txt')
custom_config = {
'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'),
'NEMOH_CALCULATIONS_FILE': None,
'NEMOH_INPUT_FILE': None,
'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'),
'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'),
'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'),
'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'),
'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'),
'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'),
'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'),
'GREEN_TABULATION_NUMX' : int(params.green_tabulation_numx),
'GREEN_TABULATION_NUMZ' : int(params.green_tabulation_numz),
'GREEN_TABULATION_SIMPSON_NPOINTS' : int(params.green_tabulation_simpson_npoints),
'USE_ODE_INFLUENCE_COEFFICIENTS': bool(int(params.use_ode_influence_coefficients)),
'USE_HIGHER_ORDER' : bool(int(params.use_higher_order)),
'NUM_PANEL_HIGHER_ORDER' : int(params.num_panel_higher_order),
'B_SPLINE_ORDER': int(params.b_spline_order),
'USE_DIPOLES_IMPLEMENTATION': bool(int(params.use_dipoles_implementation)),
'THIN_PANELS': [int(i) for i in params.thin_panels.split()],
'COMPUTE_DRIFT_FORCES' : bool(int(params.compute_drift_forces)),
'COMPUTE_YAW_MOMENT': bool(int(params.compute_yaw_moment)),
'REMOVE_IRREGULAR_FREQUENCIES' : bool(int(params.remove_irregular_frequencies))
}
_LOGGER.debug('Start preProcessor function.')
run_thread(preprocessor.preprocess, (custom_config,), simulation_log_path)
_LOGGER.debug('End preProcessor function.')
_LOGGER.debug('Start solver function.')
output = run_thread(solver.solve, (custom_config,), None)
with open(simulation_log_path, 'a') as log_file:
log_file.write(output)
_LOGGER.debug('End solver function.')
with open(simulation_log_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when doing simulation. Caused by:\n' + unicode(str(e)))
def postprocess(simulation_dir, params):
'''
Run post-processing.
@param simulation_dir: the simulation directory
@param params: the post-processing parameters
@return: the post-processing log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of PostprocessingParameters is not
of valid value
@raise ServiceError: if error occurs during launching the post-processing
'''
signature = __name__ + '.postprocess()'
helper.log_entrance(_LOGGER, signature,
{'simulation_dir': simulation_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
helper.check_type_value(params, 'params', PostprocessingParameters, False)
helper.check_type_value(params.irf, 'params.irf', list, False)
for irf_item in params.irf:
helper.check_not_none_nor_empty(irf_item, 'irf_item')
helper.check_not_none_nor_empty(params.show_pressure, 'params.show_pressure')
helper.check_type_value(params.kochin_function, 'params.kochin_function', list, False)
for kochin_function_item in params.kochin_function:
helper.check_not_none_nor_empty(kochin_function_item, 'kochin_function_item')
helper.check_type_value(params.free_surface_elevation, 'params.free_surface_elevation', list, False)
for elevation_item in params.free_surface_elevation:
helper.check_not_none_nor_empty(elevation_item, 'elevation_item')
try:
with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), "a") as hdf5_data:
utility.write_postprocessing_section(params, hdf5_data)
# Launch postProcessor
postprocessing_log_path = os.path.join(simulation_dir, 'postprocessing_log.txt')
custom_config = {
'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'),
'NEMOH_CALCULATIONS_FILE': None,
'NEMOH_INPUT_FILE': None,
'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'),
'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'),
'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'),
'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'),
'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'),
'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'),
'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'),
'GREEN_TABULATION_NUMX' : 328,
'GREEN_TABULATION_NUMZ' : 46,
'GREEN_TABULATION_SIMPSON_NPOINTS' : 251,
'USE_ODE_INFLUENCE_COEFFICIENTS': False,
'USE_HIGHER_ORDER' : False,
'NUM_PANEL_HIGHER_ORDER' : 1,
'B_SPLINE_ORDER': 1,
'USE_DIPOLES_IMPLEMENTATION': False,
'THIN_PANELS': [-1],
'COMPUTE_DRIFT_FORCES' : False,
'COMPUTE_YAW_MOMENT': False,
'REMOVE_IRREGULAR_FREQUENCIES' : False
}
_LOGGER.debug('Start postProcessor function.')
run_thread(postprocessor.postprocess, (custom_config,), postprocessing_log_path)
_LOGGER.debug('End postProcessor in subprocess.')
with open(postprocessing_log_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when running postprocess. Caused by:\n' + unicode(str(e)))
def visualize(simulation_dir):
'''
Launch ParaView to visualize simulation results.
@param simulation_dir: the simulation directory
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if error occurs during launching the ParaView
'''
signature = __name__ + '.visualize()'
helper.log_entrance(_LOGGER, signature, {'simulation_dir': simulation_dir})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
try:
# Filter files to be opened in ParaView
files = []
for f in os.listdir(os.path.join(simulation_dir, 'results')):
for ext in VISUALIZATION_FILE_EXTENSIONS:
if fnmatch.fnmatch(f, '*.' + ext):
files.append(os.path.join(simulation_dir, 'results', f))
# Check if there's tec/vtk/stl file to visualize
if len(files) == 0:
raise ServiceError('There is no accepted file to visualize.')
_LOGGER.debug('List of files to load:')
_LOGGER.debug(str(files))
# Prepare script to run by ParaView
paraview_script = os.path.join(os.path.join(simulation_dir, 'results'), 'load_data.py')
prepare_paraview_script(paraview_script, files)
# Launch ParaView without waiting for the ParaView to exit
_LOGGER.debug('Start launching ParaView in subprocess.')
subprocess.Popen([PARAVIEW_BIN, '--script=' + paraview_script + ''])
_LOGGER.debug('End launching ParaView in subprocess.')
helper.log_exit(_LOGGER, signature, None)
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when launching the ParaView. Caused by:\n' + unicode(str(e)))
def prepare_paraview_script(script_path, files):
'''
Prepare a script to be run by ParaView from a template.
@param script_path: path of the new script to create
@param files: a list of data files path
@raise Exception: to its caller if any error occurs
'''
# Since this is a inner function, no entrance/exit information would be logged.
with open(PARAVIEW_SCRIPT_TEMPLATE, 'r') as fin:
with open(script_path, 'w') as fout:
for line in fin.readlines():
fout.write(line.rstrip().replace('<parameter_files>', str(files)) + '\n')
# From http://code.activestate.com/recipes/577564-context-manager-for-low-level-redirection-of-stdou/
class Silence:
"""
Context manager which uses low-level file descriptors to suppress
output to stdout/stderr, optionally redirecting to the named file(s).
Example usage
with Silence(stderr='output.txt', mode='a'):
... # appending to existing file
... print >> sys.stderr, "Hello from stderr"
... print "Stdout redirected to os.devnull"
=== contents of 'output.txt' ===
"""
def __init__(self, stdout=os.devnull, stderr=os.devnull, mode='w'):
"""
Initialize
Args:
self: The class itself
stdout: the descriptor or file name where to redirect stdout
stdout: the descriptor or file name where to redirect stdout
mode: the output descriptor or file mode
"""
self.outfiles = stdout, stderr
self.combine = (stdout == stderr)
self.mode = mode
def __enter__(self):
"""
Enter the context
Args:
self: The class itself
"""
import sys
self.sys = sys
# save previous stdout/stderr
self.saved_streams = saved_streams = sys.__stdout__, sys.__stderr__
self.fds = fds = [s.fileno() for s in saved_streams]
self.saved_fds = map(os.dup, fds)
# flush any pending output
for s in saved_streams: s.flush()
# open surrogate files
if self.combine:
null_streams = [open(self.outfiles[0], self.mode, 0)] * 2
if self.outfiles[0] != os.devnull:
# disable buffering so output is merged immediately
sys.stdout, sys.stderr = map(os.fdopen, fds, ['w']*2, [0]*2)
else: null_streams = [open(f, self.mode, 0) for f in self.outfiles]
self.null_fds = null_fds = [s.fileno() for s in null_streams]
self.null_streams = null_streams
# overwrite file objects and low-level file descriptors
map(os.dup2, null_fds, fds)
def __exit__(self, *args):
"""
Exit the context
Args:
self: The class itself
args: other arguments
"""
sys = self.sys
# flush any pending output
for s in self.saved_streams: s.flush()
# restore original streams and file descriptors
map(os.dup2, self.saved_fds, self.fds)
sys.stdout, sys.stderr = self.saved_streams
# clean up
for s in self.null_streams: s.close()
for fd in self.saved_fds: os.close(fd)
return False
def wrapper_io(func, fd, args, return_dict):
"""
Run a function while redirecting its output to a file descriptor
Args:
func: A python function to run
fd: a file descriptor
args: A tuple containing argument for the function
return_dict: Dictionary where to put the result of the function
"""
return_dict["output"] = ''
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if fd:
with Silence(stdout=fd, stderr=os.devnull, mode='a'):
return_dict["output"] = func(*args)
else:
return_dict["output"] = func(*args)
def run_thread(func, args, fd):
"""
Run a python function in a thread and wait for it to complete.
Redirect its output to fd
Args:
func: A python function to run
args: A tuple containing argument for the function
fd: a file descriptor
"""
manager = Manager()
return_dict = manager.dict()
p = Process(target=wrapper_io, args=(func, fd, args, return_dict))
p.start()
p.join()
return return_dict["output"]
def writeline_if_not_none(fout, data):
'''
Write one line to the specified file if data is not None.
@param fout: the file object to write line in
@param data: the data to write as line
'''
# Since this is a inner function, no entrance/exit information would be logged.
if data is not None:
fout.write(str(data) + '\n')
def prepare_dir(prefix):
'''
Prepare a directory, the directory will be a sub-directory of USER_DATA_DIRECTORY with current timestamp
prefixed given prefix as the directory name.
@param prefix: the directory prefix
@return: the meshing/simulation directory full path
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if any error occurred when preparing the directory
'''
signature = __name__ + '.prepare_dir()'
helper.log_entrance(_LOGGER, signature, {'prefix': prefix})
# Checking parameters
helper.check_not_none_nor_empty(prefix, 'prefix')
try:
# Create a directory for this run (sub-directory name in format simulation_YYYYMMDDhhmmss)
# We should consider adding some more uuid suffix to allow more concurrent requests within 1 SINGLE second.
run_dir = os.path.join(USER_DATA_DIRECTORY, prefix + time.strftime('%Y%m%d%H%M%S') + '_' + uuid.uuid1().hex)
os.makedirs(run_dir)
helper.log_exit(_LOGGER, signature, [run_dir])
return run_dir
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when preparing the directory. Caused by:\n' + unicode(str(e)))
|
run_thread
|
Run a python function in a thread and wait for it to complete.
Redirect its output to fd
Args:
func: A python function to run
args: A tuple containing argument for the function
fd: a file descriptor
|
# -*- coding: utf-8 -*-
"""
This Python module provides various service functions.
Updated since version 1.1:
1. Added support for postprocess and visualization.
2. Added file path validation for parameters of all related methods.
Updated since version 1.2: Merge Code and Update GUI
1. Integrate New Nemoh using hdf5 and python.
"""
__author__ = "caoweiquan322, TCSASSEMBLER"
__copyright__ = "Copyright (C) 2014-2015 TopCoder Inc. All rights reserved."
__version__ = "1.2"
import collections
import uuid
from settings import *
import os
import time
import subprocess
from multiprocessing import Process, Manager
import logging
from openwarp import helper
from nemoh import utility
from nemoh import preprocessor
from nemoh import postprocessor
from nemoh import solver
import warnings
import fnmatch
import h5py
# This class represents parameters used in the meshing process.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
MeshingParameters = collections.namedtuple('MeshingParameters',
'infile outfile maxh minh fineness grading usetolerance tolerance')
# This class represents parameters used in the simulation process.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
SimulationParameters = collections.namedtuple('SimulationParameters', 'rho g depth xeff yeff wave_frequencies ' +
'min_wave_frequencies max_wave_frequencies wave_directions ' +
'max_wave_direction min_wave_directions floating_bodies ' +
'indiq_solver ires tol_gmres max_iterations save_potential ' +
'green_tabulation_numx green_tabulation_numz ' +
'green_tabulation_simpson_npoints use_ode_influence_coefficients ' +
'use_higher_order num_panel_higher_order b_spline_order ' +
'use_dipoles_implementation thin_panels compute_drift_forces ' +
'compute_yaw_moment remove_irregular_frequencies')
# This class represents a floating body used in the SimulationParameters.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
FloatingBody = collections.namedtuple('FloatingBody', 'mesh_file points panels degrees_of_freedom surge sway ' +
'heave roll_about_cdg pitch_about_cdg yaw_about_cdg ' +
'resulting_generalised_forces force_in_x_direction force_in_y_direction ' +
'force_in_z_direction moment_cdg_force_in_x_direction ' +
'moment_cdg_force_in_y_direction moment_cdg_force_in_z_direction ' +
'additional_info_lines')
# This class represents parameters used in the post-proessing.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
PostprocessingParameters = collections.namedtuple('PostprocessingParameters', 'irf show_pressure ' +
'kochin_function free_surface_elevation')
# The pre-defined config file name used by MESH_GENERATOR_BIN.
_CONFIG_FILE_NAME = 'config.txt'
# The pre-defined stdout log file name.
_LOG_FILE_NAME = 'log.txt'
# The logger object for logging.
_LOGGER = logging.getLogger(__name__)
class ServiceError(Exception):
'''
This exception indicates a service error.
It will be raised by methods of this module.
'''
pass
def generate_mesh(meshing_dir, params):
'''
Launch Mesh Generator to generate mesh.
@param meshing_dir: the meshing directory
@param params: the meshing parameters
@return: the mesh generation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of MeshingParameters is not
of valid value
@raise ServiceError: if error occurs during generating mesh
'''
signature = __name__ + '.generate_mesh()'
helper.log_entrance(_LOGGER, signature,
{'meshing_dir': meshing_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(meshing_dir, 'meshing_dir')
helper.check_is_directory(meshing_dir, 'meshing_dir')
helper.check_type_value(params, 'params', MeshingParameters, False)
helper.check_not_none_nor_empty(params.infile, 'params.infile')
helper.check_is_file(params.infile, 'params.infile')
helper.check_not_none_nor_empty(params.outfile, 'params.outfile')
helper.check_not_none_nor_empty(params.maxh, 'params.maxh')
helper.check_not_none_nor_empty(params.minh, 'params.minh')
helper.check_not_none_nor_empty(params.fineness, 'params.fineness')
helper.check_not_none_nor_empty(params.grading, 'params.grading')
helper.check_not_none_nor_empty(params.usetolerance, 'params.usetolerance')
if params.usetolerance == '1':
helper.check_not_none_nor_empty(params.tolerance, 'params.tolerance')
try:
config_file_path = os.path.join(meshing_dir, _CONFIG_FILE_NAME)
log_file_path = os.path.join(meshing_dir, _LOG_FILE_NAME)
# Generate config.txt according to given parameters
with open(config_file_path, 'w') as f:
f.write('\n'.join("%s: %s" % item for item in vars(params).items() if item[1] is not None))
# Launch mesh generator
with open(log_file_path, 'w') as log_file:
_LOGGER.debug('Start mesh generator in subprocess.')
subprocess.call(MESH_GENERATOR_BIN, cwd=meshing_dir, stdout=log_file)
_LOGGER.debug('End mesh generator in subprocess.')
# Read and return the log file content
with open(log_file_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when generating mesh. Caused by:\n' + unicode(str(e)))
def simulate(simulation_dir, params):
'''
Run simulation.
@param simulation_dir: the simulation directory
@param params: the simulation parameters
@return: the simulation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of SimulationParameters is not
of valid value
@raise ServiceError: if any other error occurred when launching the simulation
'''
signature = __name__ + '.simulate()'
helper.log_entrance(_LOGGER, signature,
{'simulation_dir': simulation_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
helper.check_type_value(params, 'params', SimulationParameters, False)
helper.check_not_none_nor_empty(params.rho, 'params.rho')
helper.check_not_none_nor_empty(params.g, 'params.g')
helper.check_not_none_nor_empty(params.depth, 'params.depth')
helper.check_not_none_nor_empty(params.xeff, 'params.xeff')
helper.check_not_none_nor_empty(params.yeff, 'params.yeff')
helper.check_not_none_nor_empty(params.wave_frequencies, 'params.wave_frequencies')
helper.check_not_none_nor_empty(params.min_wave_frequencies, 'params.min_wave_frequencies')
helper.check_not_none_nor_empty(params.max_wave_frequencies, 'params.max_wave_frequencies')
helper.check_not_none_nor_empty(params.wave_directions, 'params.wave_directions')
helper.check_not_none_nor_empty(params.min_wave_directions, 'params.min_wave_directions')
helper.check_not_none_nor_empty(params.max_wave_direction, 'params.max_wave_direction')
helper.check_not_none_nor_empty(params.indiq_solver, 'params.indiq_solver')
helper.check_not_none_nor_empty(params.ires, 'params.ires')
helper.check_not_none_nor_empty(params.tol_gmres, 'params.tol_gmres')
helper.check_not_none_nor_empty(params.max_iterations, 'params.max_iterations')
helper.check_not_none_nor_empty(params.save_potential, 'params.save_potential')
helper.check_not_none_nor_empty(params.green_tabulation_numx, 'params.green_tabulation_numx')
helper.check_not_none_nor_empty(params.green_tabulation_numz, 'params.green_tabulation_numz')
helper.check_not_none_nor_empty(params.green_tabulation_simpson_npoints, 'params.green_tabulation_simpson_npoints')
helper.check_not_none_nor_empty(params.use_ode_influence_coefficients, 'params.use_ode_influence_coefficients')
helper.check_not_none_nor_empty(params.use_higher_order, 'params.use_higher_order')
helper.check_not_none_nor_empty(params.num_panel_higher_order, 'params.num_panel_higher_order')
helper.check_not_none_nor_empty(params.b_spline_order, 'params.b_spline_order')
helper.check_not_none_nor_empty(params.use_dipoles_implementation, 'params.use_dipoles_implementation')
helper.check_not_none_nor_empty(params.thin_panels, 'params.thin_panels')
helper.check_not_none_nor_empty(params.compute_drift_forces, 'params.compute_drift_forces')
helper.check_not_none_nor_empty(params.remove_irregular_frequencies, 'params.remove_irregular_frequencies')
helper.check_not_none_nor_empty(params.compute_yaw_moment, 'params.compute_yaw_moment')
helper.check_type_value(params.floating_bodies, 'params.floating_bodies', list, True)
if params.floating_bodies is not None:
for body in params.floating_bodies:
helper.check_type_value(body, 'params.floating_bodies item', FloatingBody, False)
helper.check_not_none_nor_empty(body.mesh_file, 'body.mesh_file')
helper.check_not_none_nor_empty(body.points, 'body.points')
helper.check_not_none_nor_empty(body.panels, 'body.panels')
helper.check_not_none_nor_empty(body.degrees_of_freedom, 'body.degrees_of_freedom')
helper.check_not_none_nor_empty(body.resulting_generalised_forces, 'body.resulting_generalised_forces')
helper.check_not_none_nor_empty(body.additional_info_lines, 'body.additional_info_lines')
try:
# Write the hdf5 inputs according to given parameters
with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), "a") as hdf5_data:
utility.write_calculations(params, hdf5_data)
# Launch preProcessor and Solver
# A prepared 'results' folder is necessary for the Nemoh software suite
os.mkdir(os.path.join(simulation_dir, 'results'))
simulation_log_path = os.path.join(simulation_dir, 'simulation_log.txt')
custom_config = {
'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'),
'NEMOH_CALCULATIONS_FILE': None,
'NEMOH_INPUT_FILE': None,
'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'),
'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'),
'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'),
'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'),
'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'),
'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'),
'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'),
'GREEN_TABULATION_NUMX' : int(params.green_tabulation_numx),
'GREEN_TABULATION_NUMZ' : int(params.green_tabulation_numz),
'GREEN_TABULATION_SIMPSON_NPOINTS' : int(params.green_tabulation_simpson_npoints),
'USE_ODE_INFLUENCE_COEFFICIENTS': bool(int(params.use_ode_influence_coefficients)),
'USE_HIGHER_ORDER' : bool(int(params.use_higher_order)),
'NUM_PANEL_HIGHER_ORDER' : int(params.num_panel_higher_order),
'B_SPLINE_ORDER': int(params.b_spline_order),
'USE_DIPOLES_IMPLEMENTATION': bool(int(params.use_dipoles_implementation)),
'THIN_PANELS': [int(i) for i in params.thin_panels.split()],
'COMPUTE_DRIFT_FORCES' : bool(int(params.compute_drift_forces)),
'COMPUTE_YAW_MOMENT': bool(int(params.compute_yaw_moment)),
'REMOVE_IRREGULAR_FREQUENCIES' : bool(int(params.remove_irregular_frequencies))
}
_LOGGER.debug('Start preProcessor function.')
run_thread(preprocessor.preprocess, (custom_config,), simulation_log_path)
_LOGGER.debug('End preProcessor function.')
_LOGGER.debug('Start solver function.')
output = run_thread(solver.solve, (custom_config,), None)
with open(simulation_log_path, 'a') as log_file:
log_file.write(output)
_LOGGER.debug('End solver function.')
with open(simulation_log_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when doing simulation. Caused by:\n' + unicode(str(e)))
def postprocess(simulation_dir, params):
'''
Run post-processing.
@param simulation_dir: the simulation directory
@param params: the post-processing parameters
@return: the post-processing log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of PostprocessingParameters is not
of valid value
@raise ServiceError: if error occurs during launching the post-processing
'''
signature = __name__ + '.postprocess()'
helper.log_entrance(_LOGGER, signature,
{'simulation_dir': simulation_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
helper.check_type_value(params, 'params', PostprocessingParameters, False)
helper.check_type_value(params.irf, 'params.irf', list, False)
for irf_item in params.irf:
helper.check_not_none_nor_empty(irf_item, 'irf_item')
helper.check_not_none_nor_empty(params.show_pressure, 'params.show_pressure')
helper.check_type_value(params.kochin_function, 'params.kochin_function', list, False)
for kochin_function_item in params.kochin_function:
helper.check_not_none_nor_empty(kochin_function_item, 'kochin_function_item')
helper.check_type_value(params.free_surface_elevation, 'params.free_surface_elevation', list, False)
for elevation_item in params.free_surface_elevation:
helper.check_not_none_nor_empty(elevation_item, 'elevation_item')
try:
with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), "a") as hdf5_data:
utility.write_postprocessing_section(params, hdf5_data)
# Launch postProcessor
postprocessing_log_path = os.path.join(simulation_dir, 'postprocessing_log.txt')
custom_config = {
'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'),
'NEMOH_CALCULATIONS_FILE': None,
'NEMOH_INPUT_FILE': None,
'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'),
'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'),
'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'),
'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'),
'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'),
'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'),
'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'),
'GREEN_TABULATION_NUMX' : 328,
'GREEN_TABULATION_NUMZ' : 46,
'GREEN_TABULATION_SIMPSON_NPOINTS' : 251,
'USE_ODE_INFLUENCE_COEFFICIENTS': False,
'USE_HIGHER_ORDER' : False,
'NUM_PANEL_HIGHER_ORDER' : 1,
'B_SPLINE_ORDER': 1,
'USE_DIPOLES_IMPLEMENTATION': False,
'THIN_PANELS': [-1],
'COMPUTE_DRIFT_FORCES' : False,
'COMPUTE_YAW_MOMENT': False,
'REMOVE_IRREGULAR_FREQUENCIES' : False
}
_LOGGER.debug('Start postProcessor function.')
run_thread(postprocessor.postprocess, (custom_config,), postprocessing_log_path)
_LOGGER.debug('End postProcessor in subprocess.')
with open(postprocessing_log_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when running postprocess. Caused by:\n' + unicode(str(e)))
def visualize(simulation_dir):
'''
Launch ParaView to visualize simulation results.
@param simulation_dir: the simulation directory
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if error occurs during launching the ParaView
'''
signature = __name__ + '.visualize()'
helper.log_entrance(_LOGGER, signature, {'simulation_dir': simulation_dir})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
try:
# Filter files to be opened in ParaView
files = []
for f in os.listdir(os.path.join(simulation_dir, 'results')):
for ext in VISUALIZATION_FILE_EXTENSIONS:
if fnmatch.fnmatch(f, '*.' + ext):
files.append(os.path.join(simulation_dir, 'results', f))
# Check if there's tec/vtk/stl file to visualize
if len(files) == 0:
raise ServiceError('There is no accepted file to visualize.')
_LOGGER.debug('List of files to load:')
_LOGGER.debug(str(files))
# Prepare script to run by ParaView
paraview_script = os.path.join(os.path.join(simulation_dir, 'results'), 'load_data.py')
prepare_paraview_script(paraview_script, files)
# Launch ParaView without waiting for the ParaView to exit
_LOGGER.debug('Start launching ParaView in subprocess.')
subprocess.Popen([PARAVIEW_BIN, '--script=' + paraview_script + ''])
_LOGGER.debug('End launching ParaView in subprocess.')
helper.log_exit(_LOGGER, signature, None)
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when launching the ParaView. Caused by:\n' + unicode(str(e)))
def prepare_paraview_script(script_path, files):
'''
Prepare a script to be run by ParaView from a template.
@param script_path: path of the new script to create
@param files: a list of data files path
@raise Exception: to its caller if any error occurs
'''
# Since this is a inner function, no entrance/exit information would be logged.
with open(PARAVIEW_SCRIPT_TEMPLATE, 'r') as fin:
with open(script_path, 'w') as fout:
for line in fin.readlines():
fout.write(line.rstrip().replace('<parameter_files>', str(files)) + '\n')
# From http://code.activestate.com/recipes/577564-context-manager-for-low-level-redirection-of-stdou/
class Silence:
"""
Context manager which uses low-level file descriptors to suppress
output to stdout/stderr, optionally redirecting to the named file(s).
Example usage
with Silence(stderr='output.txt', mode='a'):
... # appending to existing file
... print >> sys.stderr, "Hello from stderr"
... print "Stdout redirected to os.devnull"
=== contents of 'output.txt' ===
"""
def __init__(self, stdout=os.devnull, stderr=os.devnull, mode='w'):
"""
Initialize
Args:
self: The class itself
stdout: the descriptor or file name where to redirect stdout
stdout: the descriptor or file name where to redirect stdout
mode: the output descriptor or file mode
"""
self.outfiles = stdout, stderr
self.combine = (stdout == stderr)
self.mode = mode
def __enter__(self):
"""
Enter the context
Args:
self: The class itself
"""
import sys
self.sys = sys
# save previous stdout/stderr
self.saved_streams = saved_streams = sys.__stdout__, sys.__stderr__
self.fds = fds = [s.fileno() for s in saved_streams]
self.saved_fds = map(os.dup, fds)
# flush any pending output
for s in saved_streams: s.flush()
# open surrogate files
if self.combine:
null_streams = [open(self.outfiles[0], self.mode, 0)] * 2
if self.outfiles[0] != os.devnull:
# disable buffering so output is merged immediately
sys.stdout, sys.stderr = map(os.fdopen, fds, ['w']*2, [0]*2)
else: null_streams = [open(f, self.mode, 0) for f in self.outfiles]
self.null_fds = null_fds = [s.fileno() for s in null_streams]
self.null_streams = null_streams
# overwrite file objects and low-level file descriptors
map(os.dup2, null_fds, fds)
def __exit__(self, *args):
"""
Exit the context
Args:
self: The class itself
args: other arguments
"""
sys = self.sys
# flush any pending output
for s in self.saved_streams: s.flush()
# restore original streams and file descriptors
map(os.dup2, self.saved_fds, self.fds)
sys.stdout, sys.stderr = self.saved_streams
# clean up
for s in self.null_streams: s.close()
for fd in self.saved_fds: os.close(fd)
return False
def wrapper_io(func, fd, args, return_dict):
"""
Run a function while redirecting its output to a file descriptor
Args:
func: A python function to run
fd: a file descriptor
args: A tuple containing argument for the function
return_dict: Dictionary where to put the result of the function
"""
return_dict["output"] = ''
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if fd:
with Silence(stdout=fd, stderr=os.devnull, mode='a'):
return_dict["output"] = func(*args)
else:
return_dict["output"] = func(*args)
# MASKED: run_thread function (lines 470-487)
def writeline_if_not_none(fout, data):
'''
Write one line to the specified file if data is not None.
@param fout: the file object to write line in
@param data: the data to write as line
'''
# Since this is a inner function, no entrance/exit information would be logged.
if data is not None:
fout.write(str(data) + '\n')
def prepare_dir(prefix):
'''
Prepare a directory, the directory will be a sub-directory of USER_DATA_DIRECTORY with current timestamp
prefixed given prefix as the directory name.
@param prefix: the directory prefix
@return: the meshing/simulation directory full path
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if any error occurred when preparing the directory
'''
signature = __name__ + '.prepare_dir()'
helper.log_entrance(_LOGGER, signature, {'prefix': prefix})
# Checking parameters
helper.check_not_none_nor_empty(prefix, 'prefix')
try:
# Create a directory for this run (sub-directory name in format simulation_YYYYMMDDhhmmss)
# We should consider adding some more uuid suffix to allow more concurrent requests within 1 SINGLE second.
run_dir = os.path.join(USER_DATA_DIRECTORY, prefix + time.strftime('%Y%m%d%H%M%S') + '_' + uuid.uuid1().hex)
os.makedirs(run_dir)
helper.log_exit(_LOGGER, signature, [run_dir])
return run_dir
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when preparing the directory. Caused by:\n' + unicode(str(e)))
|
def run_thread(func, args, fd):
"""
Run a python function in a thread and wait for it to complete.
Redirect its output to fd
Args:
func: A python function to run
args: A tuple containing argument for the function
fd: a file descriptor
"""
manager = Manager()
return_dict = manager.dict()
p = Process(target=wrapper_io, args=(func, fd, args, return_dict))
p.start()
p.join()
return return_dict["output"]
| 470 | 487 |
# -*- coding: utf-8 -*-
"""
This Python module provides various service functions.
Updated since version 1.1:
1. Added support for postprocess and visualization.
2. Added file path validation for parameters of all related methods.
Updated since version 1.2: Merge Code and Update GUI
1. Integrate New Nemoh using hdf5 and python.
"""
__author__ = "caoweiquan322, TCSASSEMBLER"
__copyright__ = "Copyright (C) 2014-2015 TopCoder Inc. All rights reserved."
__version__ = "1.2"
import collections
import uuid
from settings import *
import os
import time
import subprocess
from multiprocessing import Process, Manager
import logging
from openwarp import helper
from nemoh import utility
from nemoh import preprocessor
from nemoh import postprocessor
from nemoh import solver
import warnings
import fnmatch
import h5py
# This class represents parameters used in the meshing process.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
MeshingParameters = collections.namedtuple('MeshingParameters',
'infile outfile maxh minh fineness grading usetolerance tolerance')
# This class represents parameters used in the simulation process.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
SimulationParameters = collections.namedtuple('SimulationParameters', 'rho g depth xeff yeff wave_frequencies ' +
'min_wave_frequencies max_wave_frequencies wave_directions ' +
'max_wave_direction min_wave_directions floating_bodies ' +
'indiq_solver ires tol_gmres max_iterations save_potential ' +
'green_tabulation_numx green_tabulation_numz ' +
'green_tabulation_simpson_npoints use_ode_influence_coefficients ' +
'use_higher_order num_panel_higher_order b_spline_order ' +
'use_dipoles_implementation thin_panels compute_drift_forces ' +
'compute_yaw_moment remove_irregular_frequencies')
# This class represents a floating body used in the SimulationParameters.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
FloatingBody = collections.namedtuple('FloatingBody', 'mesh_file points panels degrees_of_freedom surge sway ' +
'heave roll_about_cdg pitch_about_cdg yaw_about_cdg ' +
'resulting_generalised_forces force_in_x_direction force_in_y_direction ' +
'force_in_z_direction moment_cdg_force_in_x_direction ' +
'moment_cdg_force_in_y_direction moment_cdg_force_in_z_direction ' +
'additional_info_lines')
# This class represents parameters used in the post-proessing.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
PostprocessingParameters = collections.namedtuple('PostprocessingParameters', 'irf show_pressure ' +
'kochin_function free_surface_elevation')
# The pre-defined config file name used by MESH_GENERATOR_BIN.
_CONFIG_FILE_NAME = 'config.txt'
# The pre-defined stdout log file name.
_LOG_FILE_NAME = 'log.txt'
# The logger object for logging.
_LOGGER = logging.getLogger(__name__)
class ServiceError(Exception):
'''
This exception indicates a service error.
It will be raised by methods of this module.
'''
pass
def generate_mesh(meshing_dir, params):
'''
Launch Mesh Generator to generate mesh.
@param meshing_dir: the meshing directory
@param params: the meshing parameters
@return: the mesh generation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of MeshingParameters is not
of valid value
@raise ServiceError: if error occurs during generating mesh
'''
signature = __name__ + '.generate_mesh()'
helper.log_entrance(_LOGGER, signature,
{'meshing_dir': meshing_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(meshing_dir, 'meshing_dir')
helper.check_is_directory(meshing_dir, 'meshing_dir')
helper.check_type_value(params, 'params', MeshingParameters, False)
helper.check_not_none_nor_empty(params.infile, 'params.infile')
helper.check_is_file(params.infile, 'params.infile')
helper.check_not_none_nor_empty(params.outfile, 'params.outfile')
helper.check_not_none_nor_empty(params.maxh, 'params.maxh')
helper.check_not_none_nor_empty(params.minh, 'params.minh')
helper.check_not_none_nor_empty(params.fineness, 'params.fineness')
helper.check_not_none_nor_empty(params.grading, 'params.grading')
helper.check_not_none_nor_empty(params.usetolerance, 'params.usetolerance')
if params.usetolerance == '1':
helper.check_not_none_nor_empty(params.tolerance, 'params.tolerance')
try:
config_file_path = os.path.join(meshing_dir, _CONFIG_FILE_NAME)
log_file_path = os.path.join(meshing_dir, _LOG_FILE_NAME)
# Generate config.txt according to given parameters
with open(config_file_path, 'w') as f:
f.write('\n'.join("%s: %s" % item for item in vars(params).items() if item[1] is not None))
# Launch mesh generator
with open(log_file_path, 'w') as log_file:
_LOGGER.debug('Start mesh generator in subprocess.')
subprocess.call(MESH_GENERATOR_BIN, cwd=meshing_dir, stdout=log_file)
_LOGGER.debug('End mesh generator in subprocess.')
# Read and return the log file content
with open(log_file_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when generating mesh. Caused by:\n' + unicode(str(e)))
def simulate(simulation_dir, params):
'''
Run simulation.
@param simulation_dir: the simulation directory
@param params: the simulation parameters
@return: the simulation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of SimulationParameters is not
of valid value
@raise ServiceError: if any other error occurred when launching the simulation
'''
signature = __name__ + '.simulate()'
helper.log_entrance(_LOGGER, signature,
{'simulation_dir': simulation_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
helper.check_type_value(params, 'params', SimulationParameters, False)
helper.check_not_none_nor_empty(params.rho, 'params.rho')
helper.check_not_none_nor_empty(params.g, 'params.g')
helper.check_not_none_nor_empty(params.depth, 'params.depth')
helper.check_not_none_nor_empty(params.xeff, 'params.xeff')
helper.check_not_none_nor_empty(params.yeff, 'params.yeff')
helper.check_not_none_nor_empty(params.wave_frequencies, 'params.wave_frequencies')
helper.check_not_none_nor_empty(params.min_wave_frequencies, 'params.min_wave_frequencies')
helper.check_not_none_nor_empty(params.max_wave_frequencies, 'params.max_wave_frequencies')
helper.check_not_none_nor_empty(params.wave_directions, 'params.wave_directions')
helper.check_not_none_nor_empty(params.min_wave_directions, 'params.min_wave_directions')
helper.check_not_none_nor_empty(params.max_wave_direction, 'params.max_wave_direction')
helper.check_not_none_nor_empty(params.indiq_solver, 'params.indiq_solver')
helper.check_not_none_nor_empty(params.ires, 'params.ires')
helper.check_not_none_nor_empty(params.tol_gmres, 'params.tol_gmres')
helper.check_not_none_nor_empty(params.max_iterations, 'params.max_iterations')
helper.check_not_none_nor_empty(params.save_potential, 'params.save_potential')
helper.check_not_none_nor_empty(params.green_tabulation_numx, 'params.green_tabulation_numx')
helper.check_not_none_nor_empty(params.green_tabulation_numz, 'params.green_tabulation_numz')
helper.check_not_none_nor_empty(params.green_tabulation_simpson_npoints, 'params.green_tabulation_simpson_npoints')
helper.check_not_none_nor_empty(params.use_ode_influence_coefficients, 'params.use_ode_influence_coefficients')
helper.check_not_none_nor_empty(params.use_higher_order, 'params.use_higher_order')
helper.check_not_none_nor_empty(params.num_panel_higher_order, 'params.num_panel_higher_order')
helper.check_not_none_nor_empty(params.b_spline_order, 'params.b_spline_order')
helper.check_not_none_nor_empty(params.use_dipoles_implementation, 'params.use_dipoles_implementation')
helper.check_not_none_nor_empty(params.thin_panels, 'params.thin_panels')
helper.check_not_none_nor_empty(params.compute_drift_forces, 'params.compute_drift_forces')
helper.check_not_none_nor_empty(params.remove_irregular_frequencies, 'params.remove_irregular_frequencies')
helper.check_not_none_nor_empty(params.compute_yaw_moment, 'params.compute_yaw_moment')
helper.check_type_value(params.floating_bodies, 'params.floating_bodies', list, True)
if params.floating_bodies is not None:
for body in params.floating_bodies:
helper.check_type_value(body, 'params.floating_bodies item', FloatingBody, False)
helper.check_not_none_nor_empty(body.mesh_file, 'body.mesh_file')
helper.check_not_none_nor_empty(body.points, 'body.points')
helper.check_not_none_nor_empty(body.panels, 'body.panels')
helper.check_not_none_nor_empty(body.degrees_of_freedom, 'body.degrees_of_freedom')
helper.check_not_none_nor_empty(body.resulting_generalised_forces, 'body.resulting_generalised_forces')
helper.check_not_none_nor_empty(body.additional_info_lines, 'body.additional_info_lines')
try:
# Write the hdf5 inputs according to given parameters
with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), "a") as hdf5_data:
utility.write_calculations(params, hdf5_data)
# Launch preProcessor and Solver
# A prepared 'results' folder is necessary for the Nemoh software suite
os.mkdir(os.path.join(simulation_dir, 'results'))
simulation_log_path = os.path.join(simulation_dir, 'simulation_log.txt')
custom_config = {
'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'),
'NEMOH_CALCULATIONS_FILE': None,
'NEMOH_INPUT_FILE': None,
'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'),
'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'),
'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'),
'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'),
'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'),
'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'),
'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'),
'GREEN_TABULATION_NUMX' : int(params.green_tabulation_numx),
'GREEN_TABULATION_NUMZ' : int(params.green_tabulation_numz),
'GREEN_TABULATION_SIMPSON_NPOINTS' : int(params.green_tabulation_simpson_npoints),
'USE_ODE_INFLUENCE_COEFFICIENTS': bool(int(params.use_ode_influence_coefficients)),
'USE_HIGHER_ORDER' : bool(int(params.use_higher_order)),
'NUM_PANEL_HIGHER_ORDER' : int(params.num_panel_higher_order),
'B_SPLINE_ORDER': int(params.b_spline_order),
'USE_DIPOLES_IMPLEMENTATION': bool(int(params.use_dipoles_implementation)),
'THIN_PANELS': [int(i) for i in params.thin_panels.split()],
'COMPUTE_DRIFT_FORCES' : bool(int(params.compute_drift_forces)),
'COMPUTE_YAW_MOMENT': bool(int(params.compute_yaw_moment)),
'REMOVE_IRREGULAR_FREQUENCIES' : bool(int(params.remove_irregular_frequencies))
}
_LOGGER.debug('Start preProcessor function.')
run_thread(preprocessor.preprocess, (custom_config,), simulation_log_path)
_LOGGER.debug('End preProcessor function.')
_LOGGER.debug('Start solver function.')
output = run_thread(solver.solve, (custom_config,), None)
with open(simulation_log_path, 'a') as log_file:
log_file.write(output)
_LOGGER.debug('End solver function.')
with open(simulation_log_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when doing simulation. Caused by:\n' + unicode(str(e)))
def postprocess(simulation_dir, params):
'''
Run post-processing.
@param simulation_dir: the simulation directory
@param params: the post-processing parameters
@return: the post-processing log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of PostprocessingParameters is not
of valid value
@raise ServiceError: if error occurs during launching the post-processing
'''
signature = __name__ + '.postprocess()'
helper.log_entrance(_LOGGER, signature,
{'simulation_dir': simulation_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
helper.check_type_value(params, 'params', PostprocessingParameters, False)
helper.check_type_value(params.irf, 'params.irf', list, False)
for irf_item in params.irf:
helper.check_not_none_nor_empty(irf_item, 'irf_item')
helper.check_not_none_nor_empty(params.show_pressure, 'params.show_pressure')
helper.check_type_value(params.kochin_function, 'params.kochin_function', list, False)
for kochin_function_item in params.kochin_function:
helper.check_not_none_nor_empty(kochin_function_item, 'kochin_function_item')
helper.check_type_value(params.free_surface_elevation, 'params.free_surface_elevation', list, False)
for elevation_item in params.free_surface_elevation:
helper.check_not_none_nor_empty(elevation_item, 'elevation_item')
try:
with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), "a") as hdf5_data:
utility.write_postprocessing_section(params, hdf5_data)
# Launch postProcessor
postprocessing_log_path = os.path.join(simulation_dir, 'postprocessing_log.txt')
custom_config = {
'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'),
'NEMOH_CALCULATIONS_FILE': None,
'NEMOH_INPUT_FILE': None,
'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'),
'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'),
'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'),
'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'),
'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'),
'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'),
'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'),
'GREEN_TABULATION_NUMX' : 328,
'GREEN_TABULATION_NUMZ' : 46,
'GREEN_TABULATION_SIMPSON_NPOINTS' : 251,
'USE_ODE_INFLUENCE_COEFFICIENTS': False,
'USE_HIGHER_ORDER' : False,
'NUM_PANEL_HIGHER_ORDER' : 1,
'B_SPLINE_ORDER': 1,
'USE_DIPOLES_IMPLEMENTATION': False,
'THIN_PANELS': [-1],
'COMPUTE_DRIFT_FORCES' : False,
'COMPUTE_YAW_MOMENT': False,
'REMOVE_IRREGULAR_FREQUENCIES' : False
}
_LOGGER.debug('Start postProcessor function.')
run_thread(postprocessor.postprocess, (custom_config,), postprocessing_log_path)
_LOGGER.debug('End postProcessor in subprocess.')
with open(postprocessing_log_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when running postprocess. Caused by:\n' + unicode(str(e)))
def visualize(simulation_dir):
'''
Launch ParaView to visualize simulation results.
@param simulation_dir: the simulation directory
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if error occurs during launching the ParaView
'''
signature = __name__ + '.visualize()'
helper.log_entrance(_LOGGER, signature, {'simulation_dir': simulation_dir})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
try:
# Filter files to be opened in ParaView
files = []
for f in os.listdir(os.path.join(simulation_dir, 'results')):
for ext in VISUALIZATION_FILE_EXTENSIONS:
if fnmatch.fnmatch(f, '*.' + ext):
files.append(os.path.join(simulation_dir, 'results', f))
# Check if there's tec/vtk/stl file to visualize
if len(files) == 0:
raise ServiceError('There is no accepted file to visualize.')
_LOGGER.debug('List of files to load:')
_LOGGER.debug(str(files))
# Prepare script to run by ParaView
paraview_script = os.path.join(os.path.join(simulation_dir, 'results'), 'load_data.py')
prepare_paraview_script(paraview_script, files)
# Launch ParaView without waiting for the ParaView to exit
_LOGGER.debug('Start launching ParaView in subprocess.')
subprocess.Popen([PARAVIEW_BIN, '--script=' + paraview_script + ''])
_LOGGER.debug('End launching ParaView in subprocess.')
helper.log_exit(_LOGGER, signature, None)
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when launching the ParaView. Caused by:\n' + unicode(str(e)))
def prepare_paraview_script(script_path, files):
'''
Prepare a script to be run by ParaView from a template.
@param script_path: path of the new script to create
@param files: a list of data files path
@raise Exception: to its caller if any error occurs
'''
# Since this is a inner function, no entrance/exit information would be logged.
with open(PARAVIEW_SCRIPT_TEMPLATE, 'r') as fin:
with open(script_path, 'w') as fout:
for line in fin.readlines():
fout.write(line.rstrip().replace('<parameter_files>', str(files)) + '\n')
# From http://code.activestate.com/recipes/577564-context-manager-for-low-level-redirection-of-stdou/
class Silence:
"""
Context manager which uses low-level file descriptors to suppress
output to stdout/stderr, optionally redirecting to the named file(s).
Example usage
with Silence(stderr='output.txt', mode='a'):
... # appending to existing file
... print >> sys.stderr, "Hello from stderr"
... print "Stdout redirected to os.devnull"
=== contents of 'output.txt' ===
"""
def __init__(self, stdout=os.devnull, stderr=os.devnull, mode='w'):
"""
Initialize
Args:
self: The class itself
stdout: the descriptor or file name where to redirect stdout
stdout: the descriptor or file name where to redirect stdout
mode: the output descriptor or file mode
"""
self.outfiles = stdout, stderr
self.combine = (stdout == stderr)
self.mode = mode
def __enter__(self):
"""
Enter the context
Args:
self: The class itself
"""
import sys
self.sys = sys
# save previous stdout/stderr
self.saved_streams = saved_streams = sys.__stdout__, sys.__stderr__
self.fds = fds = [s.fileno() for s in saved_streams]
self.saved_fds = map(os.dup, fds)
# flush any pending output
for s in saved_streams: s.flush()
# open surrogate files
if self.combine:
null_streams = [open(self.outfiles[0], self.mode, 0)] * 2
if self.outfiles[0] != os.devnull:
# disable buffering so output is merged immediately
sys.stdout, sys.stderr = map(os.fdopen, fds, ['w']*2, [0]*2)
else: null_streams = [open(f, self.mode, 0) for f in self.outfiles]
self.null_fds = null_fds = [s.fileno() for s in null_streams]
self.null_streams = null_streams
# overwrite file objects and low-level file descriptors
map(os.dup2, null_fds, fds)
def __exit__(self, *args):
"""
Exit the context
Args:
self: The class itself
args: other arguments
"""
sys = self.sys
# flush any pending output
for s in self.saved_streams: s.flush()
# restore original streams and file descriptors
map(os.dup2, self.saved_fds, self.fds)
sys.stdout, sys.stderr = self.saved_streams
# clean up
for s in self.null_streams: s.close()
for fd in self.saved_fds: os.close(fd)
return False
def wrapper_io(func, fd, args, return_dict):
"""
Run a function while redirecting its output to a file descriptor
Args:
func: A python function to run
fd: a file descriptor
args: A tuple containing argument for the function
return_dict: Dictionary where to put the result of the function
"""
return_dict["output"] = ''
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if fd:
with Silence(stdout=fd, stderr=os.devnull, mode='a'):
return_dict["output"] = func(*args)
else:
return_dict["output"] = func(*args)
def run_thread(func, args, fd):
"""
Run a python function in a thread and wait for it to complete.
Redirect its output to fd
Args:
func: A python function to run
args: A tuple containing argument for the function
fd: a file descriptor
"""
manager = Manager()
return_dict = manager.dict()
p = Process(target=wrapper_io, args=(func, fd, args, return_dict))
p.start()
p.join()
return return_dict["output"]
def writeline_if_not_none(fout, data):
'''
Write one line to the specified file if data is not None.
@param fout: the file object to write line in
@param data: the data to write as line
'''
# Since this is a inner function, no entrance/exit information would be logged.
if data is not None:
fout.write(str(data) + '\n')
def prepare_dir(prefix):
'''
Prepare a directory, the directory will be a sub-directory of USER_DATA_DIRECTORY with current timestamp
prefixed given prefix as the directory name.
@param prefix: the directory prefix
@return: the meshing/simulation directory full path
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if any error occurred when preparing the directory
'''
signature = __name__ + '.prepare_dir()'
helper.log_entrance(_LOGGER, signature, {'prefix': prefix})
# Checking parameters
helper.check_not_none_nor_empty(prefix, 'prefix')
try:
# Create a directory for this run (sub-directory name in format simulation_YYYYMMDDhhmmss)
# We should consider adding some more uuid suffix to allow more concurrent requests within 1 SINGLE second.
run_dir = os.path.join(USER_DATA_DIRECTORY, prefix + time.strftime('%Y%m%d%H%M%S') + '_' + uuid.uuid1().hex)
os.makedirs(run_dir)
helper.log_exit(_LOGGER, signature, [run_dir])
return run_dir
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when preparing the directory. Caused by:\n' + unicode(str(e)))
|
__enter__
|
Enter the context
Args:
self: The class itself
|
# -*- coding: utf-8 -*-
"""
This Python module provides various service functions.
Updated since version 1.1:
1. Added support for postprocess and visualization.
2. Added file path validation for parameters of all related methods.
Updated since version 1.2: Merge Code and Update GUI
1. Integrate New Nemoh using hdf5 and python.
"""
__author__ = "caoweiquan322, TCSASSEMBLER"
__copyright__ = "Copyright (C) 2014-2015 TopCoder Inc. All rights reserved."
__version__ = "1.2"
import collections
import uuid
from settings import *
import os
import time
import subprocess
from multiprocessing import Process, Manager
import logging
from openwarp import helper
from nemoh import utility
from nemoh import preprocessor
from nemoh import postprocessor
from nemoh import solver
import warnings
import fnmatch
import h5py
# This class represents parameters used in the meshing process.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
MeshingParameters = collections.namedtuple('MeshingParameters',
'infile outfile maxh minh fineness grading usetolerance tolerance')
# This class represents parameters used in the simulation process.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
SimulationParameters = collections.namedtuple('SimulationParameters', 'rho g depth xeff yeff wave_frequencies ' +
'min_wave_frequencies max_wave_frequencies wave_directions ' +
'max_wave_direction min_wave_directions floating_bodies ' +
'indiq_solver ires tol_gmres max_iterations save_potential ' +
'green_tabulation_numx green_tabulation_numz ' +
'green_tabulation_simpson_npoints use_ode_influence_coefficients ' +
'use_higher_order num_panel_higher_order b_spline_order ' +
'use_dipoles_implementation thin_panels compute_drift_forces ' +
'compute_yaw_moment remove_irregular_frequencies')
# This class represents a floating body used in the SimulationParameters.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
FloatingBody = collections.namedtuple('FloatingBody', 'mesh_file points panels degrees_of_freedom surge sway ' +
'heave roll_about_cdg pitch_about_cdg yaw_about_cdg ' +
'resulting_generalised_forces force_in_x_direction force_in_y_direction ' +
'force_in_z_direction moment_cdg_force_in_x_direction ' +
'moment_cdg_force_in_y_direction moment_cdg_force_in_z_direction ' +
'additional_info_lines')
# This class represents parameters used in the post-proessing.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
PostprocessingParameters = collections.namedtuple('PostprocessingParameters', 'irf show_pressure ' +
'kochin_function free_surface_elevation')
# The pre-defined config file name used by MESH_GENERATOR_BIN.
_CONFIG_FILE_NAME = 'config.txt'
# The pre-defined stdout log file name.
_LOG_FILE_NAME = 'log.txt'
# The logger object for logging.
_LOGGER = logging.getLogger(__name__)
class ServiceError(Exception):
'''
This exception indicates a service error.
It will be raised by methods of this module.
'''
pass
def generate_mesh(meshing_dir, params):
'''
Launch Mesh Generator to generate mesh.
@param meshing_dir: the meshing directory
@param params: the meshing parameters
@return: the mesh generation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of MeshingParameters is not
of valid value
@raise ServiceError: if error occurs during generating mesh
'''
signature = __name__ + '.generate_mesh()'
helper.log_entrance(_LOGGER, signature,
{'meshing_dir': meshing_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(meshing_dir, 'meshing_dir')
helper.check_is_directory(meshing_dir, 'meshing_dir')
helper.check_type_value(params, 'params', MeshingParameters, False)
helper.check_not_none_nor_empty(params.infile, 'params.infile')
helper.check_is_file(params.infile, 'params.infile')
helper.check_not_none_nor_empty(params.outfile, 'params.outfile')
helper.check_not_none_nor_empty(params.maxh, 'params.maxh')
helper.check_not_none_nor_empty(params.minh, 'params.minh')
helper.check_not_none_nor_empty(params.fineness, 'params.fineness')
helper.check_not_none_nor_empty(params.grading, 'params.grading')
helper.check_not_none_nor_empty(params.usetolerance, 'params.usetolerance')
if params.usetolerance == '1':
helper.check_not_none_nor_empty(params.tolerance, 'params.tolerance')
try:
config_file_path = os.path.join(meshing_dir, _CONFIG_FILE_NAME)
log_file_path = os.path.join(meshing_dir, _LOG_FILE_NAME)
# Generate config.txt according to given parameters
with open(config_file_path, 'w') as f:
f.write('\n'.join("%s: %s" % item for item in vars(params).items() if item[1] is not None))
# Launch mesh generator
with open(log_file_path, 'w') as log_file:
_LOGGER.debug('Start mesh generator in subprocess.')
subprocess.call(MESH_GENERATOR_BIN, cwd=meshing_dir, stdout=log_file)
_LOGGER.debug('End mesh generator in subprocess.')
# Read and return the log file content
with open(log_file_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when generating mesh. Caused by:\n' + unicode(str(e)))
def simulate(simulation_dir, params):
'''
Run simulation.
@param simulation_dir: the simulation directory
@param params: the simulation parameters
@return: the simulation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of SimulationParameters is not
of valid value
@raise ServiceError: if any other error occurred when launching the simulation
'''
signature = __name__ + '.simulate()'
helper.log_entrance(_LOGGER, signature,
{'simulation_dir': simulation_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
helper.check_type_value(params, 'params', SimulationParameters, False)
helper.check_not_none_nor_empty(params.rho, 'params.rho')
helper.check_not_none_nor_empty(params.g, 'params.g')
helper.check_not_none_nor_empty(params.depth, 'params.depth')
helper.check_not_none_nor_empty(params.xeff, 'params.xeff')
helper.check_not_none_nor_empty(params.yeff, 'params.yeff')
helper.check_not_none_nor_empty(params.wave_frequencies, 'params.wave_frequencies')
helper.check_not_none_nor_empty(params.min_wave_frequencies, 'params.min_wave_frequencies')
helper.check_not_none_nor_empty(params.max_wave_frequencies, 'params.max_wave_frequencies')
helper.check_not_none_nor_empty(params.wave_directions, 'params.wave_directions')
helper.check_not_none_nor_empty(params.min_wave_directions, 'params.min_wave_directions')
helper.check_not_none_nor_empty(params.max_wave_direction, 'params.max_wave_direction')
helper.check_not_none_nor_empty(params.indiq_solver, 'params.indiq_solver')
helper.check_not_none_nor_empty(params.ires, 'params.ires')
helper.check_not_none_nor_empty(params.tol_gmres, 'params.tol_gmres')
helper.check_not_none_nor_empty(params.max_iterations, 'params.max_iterations')
helper.check_not_none_nor_empty(params.save_potential, 'params.save_potential')
helper.check_not_none_nor_empty(params.green_tabulation_numx, 'params.green_tabulation_numx')
helper.check_not_none_nor_empty(params.green_tabulation_numz, 'params.green_tabulation_numz')
helper.check_not_none_nor_empty(params.green_tabulation_simpson_npoints, 'params.green_tabulation_simpson_npoints')
helper.check_not_none_nor_empty(params.use_ode_influence_coefficients, 'params.use_ode_influence_coefficients')
helper.check_not_none_nor_empty(params.use_higher_order, 'params.use_higher_order')
helper.check_not_none_nor_empty(params.num_panel_higher_order, 'params.num_panel_higher_order')
helper.check_not_none_nor_empty(params.b_spline_order, 'params.b_spline_order')
helper.check_not_none_nor_empty(params.use_dipoles_implementation, 'params.use_dipoles_implementation')
helper.check_not_none_nor_empty(params.thin_panels, 'params.thin_panels')
helper.check_not_none_nor_empty(params.compute_drift_forces, 'params.compute_drift_forces')
helper.check_not_none_nor_empty(params.remove_irregular_frequencies, 'params.remove_irregular_frequencies')
helper.check_not_none_nor_empty(params.compute_yaw_moment, 'params.compute_yaw_moment')
helper.check_type_value(params.floating_bodies, 'params.floating_bodies', list, True)
if params.floating_bodies is not None:
for body in params.floating_bodies:
helper.check_type_value(body, 'params.floating_bodies item', FloatingBody, False)
helper.check_not_none_nor_empty(body.mesh_file, 'body.mesh_file')
helper.check_not_none_nor_empty(body.points, 'body.points')
helper.check_not_none_nor_empty(body.panels, 'body.panels')
helper.check_not_none_nor_empty(body.degrees_of_freedom, 'body.degrees_of_freedom')
helper.check_not_none_nor_empty(body.resulting_generalised_forces, 'body.resulting_generalised_forces')
helper.check_not_none_nor_empty(body.additional_info_lines, 'body.additional_info_lines')
try:
# Write the hdf5 inputs according to given parameters
with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), "a") as hdf5_data:
utility.write_calculations(params, hdf5_data)
# Launch preProcessor and Solver
# A prepared 'results' folder is necessary for the Nemoh software suite
os.mkdir(os.path.join(simulation_dir, 'results'))
simulation_log_path = os.path.join(simulation_dir, 'simulation_log.txt')
custom_config = {
'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'),
'NEMOH_CALCULATIONS_FILE': None,
'NEMOH_INPUT_FILE': None,
'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'),
'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'),
'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'),
'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'),
'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'),
'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'),
'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'),
'GREEN_TABULATION_NUMX' : int(params.green_tabulation_numx),
'GREEN_TABULATION_NUMZ' : int(params.green_tabulation_numz),
'GREEN_TABULATION_SIMPSON_NPOINTS' : int(params.green_tabulation_simpson_npoints),
'USE_ODE_INFLUENCE_COEFFICIENTS': bool(int(params.use_ode_influence_coefficients)),
'USE_HIGHER_ORDER' : bool(int(params.use_higher_order)),
'NUM_PANEL_HIGHER_ORDER' : int(params.num_panel_higher_order),
'B_SPLINE_ORDER': int(params.b_spline_order),
'USE_DIPOLES_IMPLEMENTATION': bool(int(params.use_dipoles_implementation)),
'THIN_PANELS': [int(i) for i in params.thin_panels.split()],
'COMPUTE_DRIFT_FORCES' : bool(int(params.compute_drift_forces)),
'COMPUTE_YAW_MOMENT': bool(int(params.compute_yaw_moment)),
'REMOVE_IRREGULAR_FREQUENCIES' : bool(int(params.remove_irregular_frequencies))
}
_LOGGER.debug('Start preProcessor function.')
run_thread(preprocessor.preprocess, (custom_config,), simulation_log_path)
_LOGGER.debug('End preProcessor function.')
_LOGGER.debug('Start solver function.')
output = run_thread(solver.solve, (custom_config,), None)
with open(simulation_log_path, 'a') as log_file:
log_file.write(output)
_LOGGER.debug('End solver function.')
with open(simulation_log_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when doing simulation. Caused by:\n' + unicode(str(e)))
def postprocess(simulation_dir, params):
'''
Run post-processing.
@param simulation_dir: the simulation directory
@param params: the post-processing parameters
@return: the post-processing log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of PostprocessingParameters is not
of valid value
@raise ServiceError: if error occurs during launching the post-processing
'''
signature = __name__ + '.postprocess()'
helper.log_entrance(_LOGGER, signature,
{'simulation_dir': simulation_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
helper.check_type_value(params, 'params', PostprocessingParameters, False)
helper.check_type_value(params.irf, 'params.irf', list, False)
for irf_item in params.irf:
helper.check_not_none_nor_empty(irf_item, 'irf_item')
helper.check_not_none_nor_empty(params.show_pressure, 'params.show_pressure')
helper.check_type_value(params.kochin_function, 'params.kochin_function', list, False)
for kochin_function_item in params.kochin_function:
helper.check_not_none_nor_empty(kochin_function_item, 'kochin_function_item')
helper.check_type_value(params.free_surface_elevation, 'params.free_surface_elevation', list, False)
for elevation_item in params.free_surface_elevation:
helper.check_not_none_nor_empty(elevation_item, 'elevation_item')
try:
with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), "a") as hdf5_data:
utility.write_postprocessing_section(params, hdf5_data)
# Launch postProcessor
postprocessing_log_path = os.path.join(simulation_dir, 'postprocessing_log.txt')
custom_config = {
'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'),
'NEMOH_CALCULATIONS_FILE': None,
'NEMOH_INPUT_FILE': None,
'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'),
'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'),
'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'),
'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'),
'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'),
'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'),
'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'),
'GREEN_TABULATION_NUMX' : 328,
'GREEN_TABULATION_NUMZ' : 46,
'GREEN_TABULATION_SIMPSON_NPOINTS' : 251,
'USE_ODE_INFLUENCE_COEFFICIENTS': False,
'USE_HIGHER_ORDER' : False,
'NUM_PANEL_HIGHER_ORDER' : 1,
'B_SPLINE_ORDER': 1,
'USE_DIPOLES_IMPLEMENTATION': False,
'THIN_PANELS': [-1],
'COMPUTE_DRIFT_FORCES' : False,
'COMPUTE_YAW_MOMENT': False,
'REMOVE_IRREGULAR_FREQUENCIES' : False
}
_LOGGER.debug('Start postProcessor function.')
run_thread(postprocessor.postprocess, (custom_config,), postprocessing_log_path)
_LOGGER.debug('End postProcessor in subprocess.')
with open(postprocessing_log_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when running postprocess. Caused by:\n' + unicode(str(e)))
def visualize(simulation_dir):
'''
Launch ParaView to visualize simulation results.
@param simulation_dir: the simulation directory
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if error occurs during launching the ParaView
'''
signature = __name__ + '.visualize()'
helper.log_entrance(_LOGGER, signature, {'simulation_dir': simulation_dir})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
try:
# Filter files to be opened in ParaView
files = []
for f in os.listdir(os.path.join(simulation_dir, 'results')):
for ext in VISUALIZATION_FILE_EXTENSIONS:
if fnmatch.fnmatch(f, '*.' + ext):
files.append(os.path.join(simulation_dir, 'results', f))
# Check if there's tec/vtk/stl file to visualize
if len(files) == 0:
raise ServiceError('There is no accepted file to visualize.')
_LOGGER.debug('List of files to load:')
_LOGGER.debug(str(files))
# Prepare script to run by ParaView
paraview_script = os.path.join(os.path.join(simulation_dir, 'results'), 'load_data.py')
prepare_paraview_script(paraview_script, files)
# Launch ParaView without waiting for the ParaView to exit
_LOGGER.debug('Start launching ParaView in subprocess.')
subprocess.Popen([PARAVIEW_BIN, '--script=' + paraview_script + ''])
_LOGGER.debug('End launching ParaView in subprocess.')
helper.log_exit(_LOGGER, signature, None)
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when launching the ParaView. Caused by:\n' + unicode(str(e)))
def prepare_paraview_script(script_path, files):
'''
Prepare a script to be run by ParaView from a template.
@param script_path: path of the new script to create
@param files: a list of data files path
@raise Exception: to its caller if any error occurs
'''
# Since this is a inner function, no entrance/exit information would be logged.
with open(PARAVIEW_SCRIPT_TEMPLATE, 'r') as fin:
with open(script_path, 'w') as fout:
for line in fin.readlines():
fout.write(line.rstrip().replace('<parameter_files>', str(files)) + '\n')
# From http://code.activestate.com/recipes/577564-context-manager-for-low-level-redirection-of-stdou/
class Silence:
"""
Context manager which uses low-level file descriptors to suppress
output to stdout/stderr, optionally redirecting to the named file(s).
Example usage
with Silence(stderr='output.txt', mode='a'):
... # appending to existing file
... print >> sys.stderr, "Hello from stderr"
... print "Stdout redirected to os.devnull"
=== contents of 'output.txt' ===
"""
def __init__(self, stdout=os.devnull, stderr=os.devnull, mode='w'):
"""
Initialize
Args:
self: The class itself
stdout: the descriptor or file name where to redirect stdout
stdout: the descriptor or file name where to redirect stdout
mode: the output descriptor or file mode
"""
self.outfiles = stdout, stderr
self.combine = (stdout == stderr)
self.mode = mode
# MASKED: __enter__ function (lines 403-429)
def __exit__(self, *args):
"""
Exit the context
Args:
self: The class itself
args: other arguments
"""
sys = self.sys
# flush any pending output
for s in self.saved_streams: s.flush()
# restore original streams and file descriptors
map(os.dup2, self.saved_fds, self.fds)
sys.stdout, sys.stderr = self.saved_streams
# clean up
for s in self.null_streams: s.close()
for fd in self.saved_fds: os.close(fd)
return False
def wrapper_io(func, fd, args, return_dict):
"""
Run a function while redirecting its output to a file descriptor
Args:
func: A python function to run
fd: a file descriptor
args: A tuple containing argument for the function
return_dict: Dictionary where to put the result of the function
"""
return_dict["output"] = ''
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if fd:
with Silence(stdout=fd, stderr=os.devnull, mode='a'):
return_dict["output"] = func(*args)
else:
return_dict["output"] = func(*args)
def run_thread(func, args, fd):
"""
Run a python function in a thread and wait for it to complete.
Redirect its output to fd
Args:
func: A python function to run
args: A tuple containing argument for the function
fd: a file descriptor
"""
manager = Manager()
return_dict = manager.dict()
p = Process(target=wrapper_io, args=(func, fd, args, return_dict))
p.start()
p.join()
return return_dict["output"]
def writeline_if_not_none(fout, data):
'''
Write one line to the specified file if data is not None.
@param fout: the file object to write line in
@param data: the data to write as line
'''
# Since this is a inner function, no entrance/exit information would be logged.
if data is not None:
fout.write(str(data) + '\n')
def prepare_dir(prefix):
'''
Prepare a directory, the directory will be a sub-directory of USER_DATA_DIRECTORY with current timestamp
prefixed given prefix as the directory name.
@param prefix: the directory prefix
@return: the meshing/simulation directory full path
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if any error occurred when preparing the directory
'''
signature = __name__ + '.prepare_dir()'
helper.log_entrance(_LOGGER, signature, {'prefix': prefix})
# Checking parameters
helper.check_not_none_nor_empty(prefix, 'prefix')
try:
# Create a directory for this run (sub-directory name in format simulation_YYYYMMDDhhmmss)
# We should consider adding some more uuid suffix to allow more concurrent requests within 1 SINGLE second.
run_dir = os.path.join(USER_DATA_DIRECTORY, prefix + time.strftime('%Y%m%d%H%M%S') + '_' + uuid.uuid1().hex)
os.makedirs(run_dir)
helper.log_exit(_LOGGER, signature, [run_dir])
return run_dir
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when preparing the directory. Caused by:\n' + unicode(str(e)))
|
def __enter__(self):
"""
Enter the context
Args:
self: The class itself
"""
import sys
self.sys = sys
# save previous stdout/stderr
self.saved_streams = saved_streams = sys.__stdout__, sys.__stderr__
self.fds = fds = [s.fileno() for s in saved_streams]
self.saved_fds = map(os.dup, fds)
# flush any pending output
for s in saved_streams: s.flush()
# open surrogate files
if self.combine:
null_streams = [open(self.outfiles[0], self.mode, 0)] * 2
if self.outfiles[0] != os.devnull:
# disable buffering so output is merged immediately
sys.stdout, sys.stderr = map(os.fdopen, fds, ['w']*2, [0]*2)
else: null_streams = [open(f, self.mode, 0) for f in self.outfiles]
self.null_fds = null_fds = [s.fileno() for s in null_streams]
self.null_streams = null_streams
# overwrite file objects and low-level file descriptors
map(os.dup2, null_fds, fds)
| 403 | 429 |
# -*- coding: utf-8 -*-
"""
This Python module provides various service functions.
Updated since version 1.1:
1. Added support for postprocess and visualization.
2. Added file path validation for parameters of all related methods.
Updated since version 1.2: Merge Code and Update GUI
1. Integrate New Nemoh using hdf5 and python.
"""
__author__ = "caoweiquan322, TCSASSEMBLER"
__copyright__ = "Copyright (C) 2014-2015 TopCoder Inc. All rights reserved."
__version__ = "1.2"
import collections
import uuid
from settings import *
import os
import time
import subprocess
from multiprocessing import Process, Manager
import logging
from openwarp import helper
from nemoh import utility
from nemoh import preprocessor
from nemoh import postprocessor
from nemoh import solver
import warnings
import fnmatch
import h5py
# This class represents parameters used in the meshing process.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
MeshingParameters = collections.namedtuple('MeshingParameters',
'infile outfile maxh minh fineness grading usetolerance tolerance')
# This class represents parameters used in the simulation process.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
SimulationParameters = collections.namedtuple('SimulationParameters', 'rho g depth xeff yeff wave_frequencies ' +
'min_wave_frequencies max_wave_frequencies wave_directions ' +
'max_wave_direction min_wave_directions floating_bodies ' +
'indiq_solver ires tol_gmres max_iterations save_potential ' +
'green_tabulation_numx green_tabulation_numz ' +
'green_tabulation_simpson_npoints use_ode_influence_coefficients ' +
'use_higher_order num_panel_higher_order b_spline_order ' +
'use_dipoles_implementation thin_panels compute_drift_forces ' +
'compute_yaw_moment remove_irregular_frequencies')
# This class represents a floating body used in the SimulationParameters.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
FloatingBody = collections.namedtuple('FloatingBody', 'mesh_file points panels degrees_of_freedom surge sway ' +
'heave roll_about_cdg pitch_about_cdg yaw_about_cdg ' +
'resulting_generalised_forces force_in_x_direction force_in_y_direction ' +
'force_in_z_direction moment_cdg_force_in_x_direction ' +
'moment_cdg_force_in_y_direction moment_cdg_force_in_z_direction ' +
'additional_info_lines')
# This class represents parameters used in the post-proessing.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
PostprocessingParameters = collections.namedtuple('PostprocessingParameters', 'irf show_pressure ' +
'kochin_function free_surface_elevation')
# The pre-defined config file name used by MESH_GENERATOR_BIN.
_CONFIG_FILE_NAME = 'config.txt'
# The pre-defined stdout log file name.
_LOG_FILE_NAME = 'log.txt'
# The logger object for logging.
_LOGGER = logging.getLogger(__name__)
class ServiceError(Exception):
'''
This exception indicates a service error.
It will be raised by methods of this module.
'''
pass
def generate_mesh(meshing_dir, params):
'''
Launch Mesh Generator to generate mesh.
@param meshing_dir: the meshing directory
@param params: the meshing parameters
@return: the mesh generation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of MeshingParameters is not
of valid value
@raise ServiceError: if error occurs during generating mesh
'''
signature = __name__ + '.generate_mesh()'
helper.log_entrance(_LOGGER, signature,
{'meshing_dir': meshing_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(meshing_dir, 'meshing_dir')
helper.check_is_directory(meshing_dir, 'meshing_dir')
helper.check_type_value(params, 'params', MeshingParameters, False)
helper.check_not_none_nor_empty(params.infile, 'params.infile')
helper.check_is_file(params.infile, 'params.infile')
helper.check_not_none_nor_empty(params.outfile, 'params.outfile')
helper.check_not_none_nor_empty(params.maxh, 'params.maxh')
helper.check_not_none_nor_empty(params.minh, 'params.minh')
helper.check_not_none_nor_empty(params.fineness, 'params.fineness')
helper.check_not_none_nor_empty(params.grading, 'params.grading')
helper.check_not_none_nor_empty(params.usetolerance, 'params.usetolerance')
if params.usetolerance == '1':
helper.check_not_none_nor_empty(params.tolerance, 'params.tolerance')
try:
config_file_path = os.path.join(meshing_dir, _CONFIG_FILE_NAME)
log_file_path = os.path.join(meshing_dir, _LOG_FILE_NAME)
# Generate config.txt according to given parameters
with open(config_file_path, 'w') as f:
f.write('\n'.join("%s: %s" % item for item in vars(params).items() if item[1] is not None))
# Launch mesh generator
with open(log_file_path, 'w') as log_file:
_LOGGER.debug('Start mesh generator in subprocess.')
subprocess.call(MESH_GENERATOR_BIN, cwd=meshing_dir, stdout=log_file)
_LOGGER.debug('End mesh generator in subprocess.')
# Read and return the log file content
with open(log_file_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when generating mesh. Caused by:\n' + unicode(str(e)))
def simulate(simulation_dir, params):
'''
Run simulation.
@param simulation_dir: the simulation directory
@param params: the simulation parameters
@return: the simulation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of SimulationParameters is not
of valid value
@raise ServiceError: if any other error occurred when launching the simulation
'''
signature = __name__ + '.simulate()'
helper.log_entrance(_LOGGER, signature,
{'simulation_dir': simulation_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
helper.check_type_value(params, 'params', SimulationParameters, False)
helper.check_not_none_nor_empty(params.rho, 'params.rho')
helper.check_not_none_nor_empty(params.g, 'params.g')
helper.check_not_none_nor_empty(params.depth, 'params.depth')
helper.check_not_none_nor_empty(params.xeff, 'params.xeff')
helper.check_not_none_nor_empty(params.yeff, 'params.yeff')
helper.check_not_none_nor_empty(params.wave_frequencies, 'params.wave_frequencies')
helper.check_not_none_nor_empty(params.min_wave_frequencies, 'params.min_wave_frequencies')
helper.check_not_none_nor_empty(params.max_wave_frequencies, 'params.max_wave_frequencies')
helper.check_not_none_nor_empty(params.wave_directions, 'params.wave_directions')
helper.check_not_none_nor_empty(params.min_wave_directions, 'params.min_wave_directions')
helper.check_not_none_nor_empty(params.max_wave_direction, 'params.max_wave_direction')
helper.check_not_none_nor_empty(params.indiq_solver, 'params.indiq_solver')
helper.check_not_none_nor_empty(params.ires, 'params.ires')
helper.check_not_none_nor_empty(params.tol_gmres, 'params.tol_gmres')
helper.check_not_none_nor_empty(params.max_iterations, 'params.max_iterations')
helper.check_not_none_nor_empty(params.save_potential, 'params.save_potential')
helper.check_not_none_nor_empty(params.green_tabulation_numx, 'params.green_tabulation_numx')
helper.check_not_none_nor_empty(params.green_tabulation_numz, 'params.green_tabulation_numz')
helper.check_not_none_nor_empty(params.green_tabulation_simpson_npoints, 'params.green_tabulation_simpson_npoints')
helper.check_not_none_nor_empty(params.use_ode_influence_coefficients, 'params.use_ode_influence_coefficients')
helper.check_not_none_nor_empty(params.use_higher_order, 'params.use_higher_order')
helper.check_not_none_nor_empty(params.num_panel_higher_order, 'params.num_panel_higher_order')
helper.check_not_none_nor_empty(params.b_spline_order, 'params.b_spline_order')
helper.check_not_none_nor_empty(params.use_dipoles_implementation, 'params.use_dipoles_implementation')
helper.check_not_none_nor_empty(params.thin_panels, 'params.thin_panels')
helper.check_not_none_nor_empty(params.compute_drift_forces, 'params.compute_drift_forces')
helper.check_not_none_nor_empty(params.remove_irregular_frequencies, 'params.remove_irregular_frequencies')
helper.check_not_none_nor_empty(params.compute_yaw_moment, 'params.compute_yaw_moment')
helper.check_type_value(params.floating_bodies, 'params.floating_bodies', list, True)
if params.floating_bodies is not None:
for body in params.floating_bodies:
helper.check_type_value(body, 'params.floating_bodies item', FloatingBody, False)
helper.check_not_none_nor_empty(body.mesh_file, 'body.mesh_file')
helper.check_not_none_nor_empty(body.points, 'body.points')
helper.check_not_none_nor_empty(body.panels, 'body.panels')
helper.check_not_none_nor_empty(body.degrees_of_freedom, 'body.degrees_of_freedom')
helper.check_not_none_nor_empty(body.resulting_generalised_forces, 'body.resulting_generalised_forces')
helper.check_not_none_nor_empty(body.additional_info_lines, 'body.additional_info_lines')
try:
# Write the hdf5 inputs according to given parameters
with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), "a") as hdf5_data:
utility.write_calculations(params, hdf5_data)
# Launch preProcessor and Solver
# A prepared 'results' folder is necessary for the Nemoh software suite
os.mkdir(os.path.join(simulation_dir, 'results'))
simulation_log_path = os.path.join(simulation_dir, 'simulation_log.txt')
custom_config = {
'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'),
'NEMOH_CALCULATIONS_FILE': None,
'NEMOH_INPUT_FILE': None,
'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'),
'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'),
'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'),
'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'),
'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'),
'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'),
'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'),
'GREEN_TABULATION_NUMX' : int(params.green_tabulation_numx),
'GREEN_TABULATION_NUMZ' : int(params.green_tabulation_numz),
'GREEN_TABULATION_SIMPSON_NPOINTS' : int(params.green_tabulation_simpson_npoints),
'USE_ODE_INFLUENCE_COEFFICIENTS': bool(int(params.use_ode_influence_coefficients)),
'USE_HIGHER_ORDER' : bool(int(params.use_higher_order)),
'NUM_PANEL_HIGHER_ORDER' : int(params.num_panel_higher_order),
'B_SPLINE_ORDER': int(params.b_spline_order),
'USE_DIPOLES_IMPLEMENTATION': bool(int(params.use_dipoles_implementation)),
'THIN_PANELS': [int(i) for i in params.thin_panels.split()],
'COMPUTE_DRIFT_FORCES' : bool(int(params.compute_drift_forces)),
'COMPUTE_YAW_MOMENT': bool(int(params.compute_yaw_moment)),
'REMOVE_IRREGULAR_FREQUENCIES' : bool(int(params.remove_irregular_frequencies))
}
_LOGGER.debug('Start preProcessor function.')
run_thread(preprocessor.preprocess, (custom_config,), simulation_log_path)
_LOGGER.debug('End preProcessor function.')
_LOGGER.debug('Start solver function.')
output = run_thread(solver.solve, (custom_config,), None)
with open(simulation_log_path, 'a') as log_file:
log_file.write(output)
_LOGGER.debug('End solver function.')
with open(simulation_log_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when doing simulation. Caused by:\n' + unicode(str(e)))
def postprocess(simulation_dir, params):
'''
Run post-processing.
@param simulation_dir: the simulation directory
@param params: the post-processing parameters
@return: the post-processing log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of PostprocessingParameters is not
of valid value
@raise ServiceError: if error occurs during launching the post-processing
'''
signature = __name__ + '.postprocess()'
helper.log_entrance(_LOGGER, signature,
{'simulation_dir': simulation_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
helper.check_type_value(params, 'params', PostprocessingParameters, False)
helper.check_type_value(params.irf, 'params.irf', list, False)
for irf_item in params.irf:
helper.check_not_none_nor_empty(irf_item, 'irf_item')
helper.check_not_none_nor_empty(params.show_pressure, 'params.show_pressure')
helper.check_type_value(params.kochin_function, 'params.kochin_function', list, False)
for kochin_function_item in params.kochin_function:
helper.check_not_none_nor_empty(kochin_function_item, 'kochin_function_item')
helper.check_type_value(params.free_surface_elevation, 'params.free_surface_elevation', list, False)
for elevation_item in params.free_surface_elevation:
helper.check_not_none_nor_empty(elevation_item, 'elevation_item')
try:
with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), "a") as hdf5_data:
utility.write_postprocessing_section(params, hdf5_data)
# Launch postProcessor
postprocessing_log_path = os.path.join(simulation_dir, 'postprocessing_log.txt')
custom_config = {
'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'),
'NEMOH_CALCULATIONS_FILE': None,
'NEMOH_INPUT_FILE': None,
'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'),
'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'),
'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'),
'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'),
'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'),
'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'),
'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'),
'GREEN_TABULATION_NUMX' : 328,
'GREEN_TABULATION_NUMZ' : 46,
'GREEN_TABULATION_SIMPSON_NPOINTS' : 251,
'USE_ODE_INFLUENCE_COEFFICIENTS': False,
'USE_HIGHER_ORDER' : False,
'NUM_PANEL_HIGHER_ORDER' : 1,
'B_SPLINE_ORDER': 1,
'USE_DIPOLES_IMPLEMENTATION': False,
'THIN_PANELS': [-1],
'COMPUTE_DRIFT_FORCES' : False,
'COMPUTE_YAW_MOMENT': False,
'REMOVE_IRREGULAR_FREQUENCIES' : False
}
_LOGGER.debug('Start postProcessor function.')
run_thread(postprocessor.postprocess, (custom_config,), postprocessing_log_path)
_LOGGER.debug('End postProcessor in subprocess.')
with open(postprocessing_log_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when running postprocess. Caused by:\n' + unicode(str(e)))
def visualize(simulation_dir):
'''
Launch ParaView to visualize simulation results.
@param simulation_dir: the simulation directory
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if error occurs during launching the ParaView
'''
signature = __name__ + '.visualize()'
helper.log_entrance(_LOGGER, signature, {'simulation_dir': simulation_dir})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
try:
# Filter files to be opened in ParaView
files = []
for f in os.listdir(os.path.join(simulation_dir, 'results')):
for ext in VISUALIZATION_FILE_EXTENSIONS:
if fnmatch.fnmatch(f, '*.' + ext):
files.append(os.path.join(simulation_dir, 'results', f))
# Check if there's tec/vtk/stl file to visualize
if len(files) == 0:
raise ServiceError('There is no accepted file to visualize.')
_LOGGER.debug('List of files to load:')
_LOGGER.debug(str(files))
# Prepare script to run by ParaView
paraview_script = os.path.join(os.path.join(simulation_dir, 'results'), 'load_data.py')
prepare_paraview_script(paraview_script, files)
# Launch ParaView without waiting for the ParaView to exit
_LOGGER.debug('Start launching ParaView in subprocess.')
subprocess.Popen([PARAVIEW_BIN, '--script=' + paraview_script + ''])
_LOGGER.debug('End launching ParaView in subprocess.')
helper.log_exit(_LOGGER, signature, None)
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when launching the ParaView. Caused by:\n' + unicode(str(e)))
def prepare_paraview_script(script_path, files):
'''
Prepare a script to be run by ParaView from a template.
@param script_path: path of the new script to create
@param files: a list of data files path
@raise Exception: to its caller if any error occurs
'''
# Since this is a inner function, no entrance/exit information would be logged.
with open(PARAVIEW_SCRIPT_TEMPLATE, 'r') as fin:
with open(script_path, 'w') as fout:
for line in fin.readlines():
fout.write(line.rstrip().replace('<parameter_files>', str(files)) + '\n')
# From http://code.activestate.com/recipes/577564-context-manager-for-low-level-redirection-of-stdou/
class Silence:
"""
Context manager which uses low-level file descriptors to suppress
output to stdout/stderr, optionally redirecting to the named file(s).
Example usage
with Silence(stderr='output.txt', mode='a'):
... # appending to existing file
... print >> sys.stderr, "Hello from stderr"
... print "Stdout redirected to os.devnull"
=== contents of 'output.txt' ===
"""
def __init__(self, stdout=os.devnull, stderr=os.devnull, mode='w'):
"""
Initialize
Args:
self: The class itself
stdout: the descriptor or file name where to redirect stdout
stdout: the descriptor or file name where to redirect stdout
mode: the output descriptor or file mode
"""
self.outfiles = stdout, stderr
self.combine = (stdout == stderr)
self.mode = mode
def __enter__(self):
"""
Enter the context
Args:
self: The class itself
"""
import sys
self.sys = sys
# save previous stdout/stderr
self.saved_streams = saved_streams = sys.__stdout__, sys.__stderr__
self.fds = fds = [s.fileno() for s in saved_streams]
self.saved_fds = map(os.dup, fds)
# flush any pending output
for s in saved_streams: s.flush()
# open surrogate files
if self.combine:
null_streams = [open(self.outfiles[0], self.mode, 0)] * 2
if self.outfiles[0] != os.devnull:
# disable buffering so output is merged immediately
sys.stdout, sys.stderr = map(os.fdopen, fds, ['w']*2, [0]*2)
else: null_streams = [open(f, self.mode, 0) for f in self.outfiles]
self.null_fds = null_fds = [s.fileno() for s in null_streams]
self.null_streams = null_streams
# overwrite file objects and low-level file descriptors
map(os.dup2, null_fds, fds)
def __exit__(self, *args):
"""
Exit the context
Args:
self: The class itself
args: other arguments
"""
sys = self.sys
# flush any pending output
for s in self.saved_streams: s.flush()
# restore original streams and file descriptors
map(os.dup2, self.saved_fds, self.fds)
sys.stdout, sys.stderr = self.saved_streams
# clean up
for s in self.null_streams: s.close()
for fd in self.saved_fds: os.close(fd)
return False
def wrapper_io(func, fd, args, return_dict):
"""
Run a function while redirecting its output to a file descriptor
Args:
func: A python function to run
fd: a file descriptor
args: A tuple containing argument for the function
return_dict: Dictionary where to put the result of the function
"""
return_dict["output"] = ''
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if fd:
with Silence(stdout=fd, stderr=os.devnull, mode='a'):
return_dict["output"] = func(*args)
else:
return_dict["output"] = func(*args)
def run_thread(func, args, fd):
"""
Run a python function in a thread and wait for it to complete.
Redirect its output to fd
Args:
func: A python function to run
args: A tuple containing argument for the function
fd: a file descriptor
"""
manager = Manager()
return_dict = manager.dict()
p = Process(target=wrapper_io, args=(func, fd, args, return_dict))
p.start()
p.join()
return return_dict["output"]
def writeline_if_not_none(fout, data):
'''
Write one line to the specified file if data is not None.
@param fout: the file object to write line in
@param data: the data to write as line
'''
# Since this is a inner function, no entrance/exit information would be logged.
if data is not None:
fout.write(str(data) + '\n')
def prepare_dir(prefix):
'''
Prepare a directory, the directory will be a sub-directory of USER_DATA_DIRECTORY with current timestamp
prefixed given prefix as the directory name.
@param prefix: the directory prefix
@return: the meshing/simulation directory full path
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if any error occurred when preparing the directory
'''
signature = __name__ + '.prepare_dir()'
helper.log_entrance(_LOGGER, signature, {'prefix': prefix})
# Checking parameters
helper.check_not_none_nor_empty(prefix, 'prefix')
try:
# Create a directory for this run (sub-directory name in format simulation_YYYYMMDDhhmmss)
# We should consider adding some more uuid suffix to allow more concurrent requests within 1 SINGLE second.
run_dir = os.path.join(USER_DATA_DIRECTORY, prefix + time.strftime('%Y%m%d%H%M%S') + '_' + uuid.uuid1().hex)
os.makedirs(run_dir)
helper.log_exit(_LOGGER, signature, [run_dir])
return run_dir
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when preparing the directory. Caused by:\n' + unicode(str(e)))
|
__exit__
|
Exit the context
Args:
self: The class itself
args: other arguments
|
# -*- coding: utf-8 -*-
"""
This Python module provides various service functions.
Updated since version 1.1:
1. Added support for postprocess and visualization.
2. Added file path validation for parameters of all related methods.
Updated since version 1.2: Merge Code and Update GUI
1. Integrate New Nemoh using hdf5 and python.
"""
__author__ = "caoweiquan322, TCSASSEMBLER"
__copyright__ = "Copyright (C) 2014-2015 TopCoder Inc. All rights reserved."
__version__ = "1.2"
import collections
import uuid
from settings import *
import os
import time
import subprocess
from multiprocessing import Process, Manager
import logging
from openwarp import helper
from nemoh import utility
from nemoh import preprocessor
from nemoh import postprocessor
from nemoh import solver
import warnings
import fnmatch
import h5py
# This class represents parameters used in the meshing process.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
MeshingParameters = collections.namedtuple('MeshingParameters',
'infile outfile maxh minh fineness grading usetolerance tolerance')
# This class represents parameters used in the simulation process.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
SimulationParameters = collections.namedtuple('SimulationParameters', 'rho g depth xeff yeff wave_frequencies ' +
'min_wave_frequencies max_wave_frequencies wave_directions ' +
'max_wave_direction min_wave_directions floating_bodies ' +
'indiq_solver ires tol_gmres max_iterations save_potential ' +
'green_tabulation_numx green_tabulation_numz ' +
'green_tabulation_simpson_npoints use_ode_influence_coefficients ' +
'use_higher_order num_panel_higher_order b_spline_order ' +
'use_dipoles_implementation thin_panels compute_drift_forces ' +
'compute_yaw_moment remove_irregular_frequencies')
# This class represents a floating body used in the SimulationParameters.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
FloatingBody = collections.namedtuple('FloatingBody', 'mesh_file points panels degrees_of_freedom surge sway ' +
'heave roll_about_cdg pitch_about_cdg yaw_about_cdg ' +
'resulting_generalised_forces force_in_x_direction force_in_y_direction ' +
'force_in_z_direction moment_cdg_force_in_x_direction ' +
'moment_cdg_force_in_y_direction moment_cdg_force_in_z_direction ' +
'additional_info_lines')
# This class represents parameters used in the post-proessing.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
PostprocessingParameters = collections.namedtuple('PostprocessingParameters', 'irf show_pressure ' +
'kochin_function free_surface_elevation')
# The pre-defined config file name used by MESH_GENERATOR_BIN.
_CONFIG_FILE_NAME = 'config.txt'
# The pre-defined stdout log file name.
_LOG_FILE_NAME = 'log.txt'
# The logger object for logging.
_LOGGER = logging.getLogger(__name__)
class ServiceError(Exception):
'''
This exception indicates a service error.
It will be raised by methods of this module.
'''
pass
def generate_mesh(meshing_dir, params):
'''
Launch Mesh Generator to generate mesh.
@param meshing_dir: the meshing directory
@param params: the meshing parameters
@return: the mesh generation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of MeshingParameters is not
of valid value
@raise ServiceError: if error occurs during generating mesh
'''
signature = __name__ + '.generate_mesh()'
helper.log_entrance(_LOGGER, signature,
{'meshing_dir': meshing_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(meshing_dir, 'meshing_dir')
helper.check_is_directory(meshing_dir, 'meshing_dir')
helper.check_type_value(params, 'params', MeshingParameters, False)
helper.check_not_none_nor_empty(params.infile, 'params.infile')
helper.check_is_file(params.infile, 'params.infile')
helper.check_not_none_nor_empty(params.outfile, 'params.outfile')
helper.check_not_none_nor_empty(params.maxh, 'params.maxh')
helper.check_not_none_nor_empty(params.minh, 'params.minh')
helper.check_not_none_nor_empty(params.fineness, 'params.fineness')
helper.check_not_none_nor_empty(params.grading, 'params.grading')
helper.check_not_none_nor_empty(params.usetolerance, 'params.usetolerance')
if params.usetolerance == '1':
helper.check_not_none_nor_empty(params.tolerance, 'params.tolerance')
try:
config_file_path = os.path.join(meshing_dir, _CONFIG_FILE_NAME)
log_file_path = os.path.join(meshing_dir, _LOG_FILE_NAME)
# Generate config.txt according to given parameters
with open(config_file_path, 'w') as f:
f.write('\n'.join("%s: %s" % item for item in vars(params).items() if item[1] is not None))
# Launch mesh generator
with open(log_file_path, 'w') as log_file:
_LOGGER.debug('Start mesh generator in subprocess.')
subprocess.call(MESH_GENERATOR_BIN, cwd=meshing_dir, stdout=log_file)
_LOGGER.debug('End mesh generator in subprocess.')
# Read and return the log file content
with open(log_file_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when generating mesh. Caused by:\n' + unicode(str(e)))
def simulate(simulation_dir, params):
'''
Run simulation.
@param simulation_dir: the simulation directory
@param params: the simulation parameters
@return: the simulation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of SimulationParameters is not
of valid value
@raise ServiceError: if any other error occurred when launching the simulation
'''
signature = __name__ + '.simulate()'
helper.log_entrance(_LOGGER, signature,
{'simulation_dir': simulation_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
helper.check_type_value(params, 'params', SimulationParameters, False)
helper.check_not_none_nor_empty(params.rho, 'params.rho')
helper.check_not_none_nor_empty(params.g, 'params.g')
helper.check_not_none_nor_empty(params.depth, 'params.depth')
helper.check_not_none_nor_empty(params.xeff, 'params.xeff')
helper.check_not_none_nor_empty(params.yeff, 'params.yeff')
helper.check_not_none_nor_empty(params.wave_frequencies, 'params.wave_frequencies')
helper.check_not_none_nor_empty(params.min_wave_frequencies, 'params.min_wave_frequencies')
helper.check_not_none_nor_empty(params.max_wave_frequencies, 'params.max_wave_frequencies')
helper.check_not_none_nor_empty(params.wave_directions, 'params.wave_directions')
helper.check_not_none_nor_empty(params.min_wave_directions, 'params.min_wave_directions')
helper.check_not_none_nor_empty(params.max_wave_direction, 'params.max_wave_direction')
helper.check_not_none_nor_empty(params.indiq_solver, 'params.indiq_solver')
helper.check_not_none_nor_empty(params.ires, 'params.ires')
helper.check_not_none_nor_empty(params.tol_gmres, 'params.tol_gmres')
helper.check_not_none_nor_empty(params.max_iterations, 'params.max_iterations')
helper.check_not_none_nor_empty(params.save_potential, 'params.save_potential')
helper.check_not_none_nor_empty(params.green_tabulation_numx, 'params.green_tabulation_numx')
helper.check_not_none_nor_empty(params.green_tabulation_numz, 'params.green_tabulation_numz')
helper.check_not_none_nor_empty(params.green_tabulation_simpson_npoints, 'params.green_tabulation_simpson_npoints')
helper.check_not_none_nor_empty(params.use_ode_influence_coefficients, 'params.use_ode_influence_coefficients')
helper.check_not_none_nor_empty(params.use_higher_order, 'params.use_higher_order')
helper.check_not_none_nor_empty(params.num_panel_higher_order, 'params.num_panel_higher_order')
helper.check_not_none_nor_empty(params.b_spline_order, 'params.b_spline_order')
helper.check_not_none_nor_empty(params.use_dipoles_implementation, 'params.use_dipoles_implementation')
helper.check_not_none_nor_empty(params.thin_panels, 'params.thin_panels')
helper.check_not_none_nor_empty(params.compute_drift_forces, 'params.compute_drift_forces')
helper.check_not_none_nor_empty(params.remove_irregular_frequencies, 'params.remove_irregular_frequencies')
helper.check_not_none_nor_empty(params.compute_yaw_moment, 'params.compute_yaw_moment')
helper.check_type_value(params.floating_bodies, 'params.floating_bodies', list, True)
if params.floating_bodies is not None:
for body in params.floating_bodies:
helper.check_type_value(body, 'params.floating_bodies item', FloatingBody, False)
helper.check_not_none_nor_empty(body.mesh_file, 'body.mesh_file')
helper.check_not_none_nor_empty(body.points, 'body.points')
helper.check_not_none_nor_empty(body.panels, 'body.panels')
helper.check_not_none_nor_empty(body.degrees_of_freedom, 'body.degrees_of_freedom')
helper.check_not_none_nor_empty(body.resulting_generalised_forces, 'body.resulting_generalised_forces')
helper.check_not_none_nor_empty(body.additional_info_lines, 'body.additional_info_lines')
try:
# Write the hdf5 inputs according to given parameters
with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), "a") as hdf5_data:
utility.write_calculations(params, hdf5_data)
# Launch preProcessor and Solver
# A prepared 'results' folder is necessary for the Nemoh software suite
os.mkdir(os.path.join(simulation_dir, 'results'))
simulation_log_path = os.path.join(simulation_dir, 'simulation_log.txt')
custom_config = {
'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'),
'NEMOH_CALCULATIONS_FILE': None,
'NEMOH_INPUT_FILE': None,
'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'),
'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'),
'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'),
'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'),
'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'),
'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'),
'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'),
'GREEN_TABULATION_NUMX' : int(params.green_tabulation_numx),
'GREEN_TABULATION_NUMZ' : int(params.green_tabulation_numz),
'GREEN_TABULATION_SIMPSON_NPOINTS' : int(params.green_tabulation_simpson_npoints),
'USE_ODE_INFLUENCE_COEFFICIENTS': bool(int(params.use_ode_influence_coefficients)),
'USE_HIGHER_ORDER' : bool(int(params.use_higher_order)),
'NUM_PANEL_HIGHER_ORDER' : int(params.num_panel_higher_order),
'B_SPLINE_ORDER': int(params.b_spline_order),
'USE_DIPOLES_IMPLEMENTATION': bool(int(params.use_dipoles_implementation)),
'THIN_PANELS': [int(i) for i in params.thin_panels.split()],
'COMPUTE_DRIFT_FORCES' : bool(int(params.compute_drift_forces)),
'COMPUTE_YAW_MOMENT': bool(int(params.compute_yaw_moment)),
'REMOVE_IRREGULAR_FREQUENCIES' : bool(int(params.remove_irregular_frequencies))
}
_LOGGER.debug('Start preProcessor function.')
run_thread(preprocessor.preprocess, (custom_config,), simulation_log_path)
_LOGGER.debug('End preProcessor function.')
_LOGGER.debug('Start solver function.')
output = run_thread(solver.solve, (custom_config,), None)
with open(simulation_log_path, 'a') as log_file:
log_file.write(output)
_LOGGER.debug('End solver function.')
with open(simulation_log_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when doing simulation. Caused by:\n' + unicode(str(e)))
def postprocess(simulation_dir, params):
'''
Run post-processing.
@param simulation_dir: the simulation directory
@param params: the post-processing parameters
@return: the post-processing log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of PostprocessingParameters is not
of valid value
@raise ServiceError: if error occurs during launching the post-processing
'''
signature = __name__ + '.postprocess()'
helper.log_entrance(_LOGGER, signature,
{'simulation_dir': simulation_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
helper.check_type_value(params, 'params', PostprocessingParameters, False)
helper.check_type_value(params.irf, 'params.irf', list, False)
for irf_item in params.irf:
helper.check_not_none_nor_empty(irf_item, 'irf_item')
helper.check_not_none_nor_empty(params.show_pressure, 'params.show_pressure')
helper.check_type_value(params.kochin_function, 'params.kochin_function', list, False)
for kochin_function_item in params.kochin_function:
helper.check_not_none_nor_empty(kochin_function_item, 'kochin_function_item')
helper.check_type_value(params.free_surface_elevation, 'params.free_surface_elevation', list, False)
for elevation_item in params.free_surface_elevation:
helper.check_not_none_nor_empty(elevation_item, 'elevation_item')
try:
with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), "a") as hdf5_data:
utility.write_postprocessing_section(params, hdf5_data)
# Launch postProcessor
postprocessing_log_path = os.path.join(simulation_dir, 'postprocessing_log.txt')
custom_config = {
'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'),
'NEMOH_CALCULATIONS_FILE': None,
'NEMOH_INPUT_FILE': None,
'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'),
'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'),
'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'),
'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'),
'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'),
'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'),
'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'),
'GREEN_TABULATION_NUMX' : 328,
'GREEN_TABULATION_NUMZ' : 46,
'GREEN_TABULATION_SIMPSON_NPOINTS' : 251,
'USE_ODE_INFLUENCE_COEFFICIENTS': False,
'USE_HIGHER_ORDER' : False,
'NUM_PANEL_HIGHER_ORDER' : 1,
'B_SPLINE_ORDER': 1,
'USE_DIPOLES_IMPLEMENTATION': False,
'THIN_PANELS': [-1],
'COMPUTE_DRIFT_FORCES' : False,
'COMPUTE_YAW_MOMENT': False,
'REMOVE_IRREGULAR_FREQUENCIES' : False
}
_LOGGER.debug('Start postProcessor function.')
run_thread(postprocessor.postprocess, (custom_config,), postprocessing_log_path)
_LOGGER.debug('End postProcessor in subprocess.')
with open(postprocessing_log_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when running postprocess. Caused by:\n' + unicode(str(e)))
def visualize(simulation_dir):
'''
Launch ParaView to visualize simulation results.
@param simulation_dir: the simulation directory
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if error occurs during launching the ParaView
'''
signature = __name__ + '.visualize()'
helper.log_entrance(_LOGGER, signature, {'simulation_dir': simulation_dir})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
try:
# Filter files to be opened in ParaView
files = []
for f in os.listdir(os.path.join(simulation_dir, 'results')):
for ext in VISUALIZATION_FILE_EXTENSIONS:
if fnmatch.fnmatch(f, '*.' + ext):
files.append(os.path.join(simulation_dir, 'results', f))
# Check if there's tec/vtk/stl file to visualize
if len(files) == 0:
raise ServiceError('There is no accepted file to visualize.')
_LOGGER.debug('List of files to load:')
_LOGGER.debug(str(files))
# Prepare script to run by ParaView
paraview_script = os.path.join(os.path.join(simulation_dir, 'results'), 'load_data.py')
prepare_paraview_script(paraview_script, files)
# Launch ParaView without waiting for the ParaView to exit
_LOGGER.debug('Start launching ParaView in subprocess.')
subprocess.Popen([PARAVIEW_BIN, '--script=' + paraview_script + ''])
_LOGGER.debug('End launching ParaView in subprocess.')
helper.log_exit(_LOGGER, signature, None)
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when launching the ParaView. Caused by:\n' + unicode(str(e)))
def prepare_paraview_script(script_path, files):
'''
Prepare a script to be run by ParaView from a template.
@param script_path: path of the new script to create
@param files: a list of data files path
@raise Exception: to its caller if any error occurs
'''
# Since this is a inner function, no entrance/exit information would be logged.
with open(PARAVIEW_SCRIPT_TEMPLATE, 'r') as fin:
with open(script_path, 'w') as fout:
for line in fin.readlines():
fout.write(line.rstrip().replace('<parameter_files>', str(files)) + '\n')
# From http://code.activestate.com/recipes/577564-context-manager-for-low-level-redirection-of-stdou/
class Silence:
"""
Context manager which uses low-level file descriptors to suppress
output to stdout/stderr, optionally redirecting to the named file(s).
Example usage
with Silence(stderr='output.txt', mode='a'):
... # appending to existing file
... print >> sys.stderr, "Hello from stderr"
... print "Stdout redirected to os.devnull"
=== contents of 'output.txt' ===
"""
def __init__(self, stdout=os.devnull, stderr=os.devnull, mode='w'):
"""
Initialize
Args:
self: The class itself
stdout: the descriptor or file name where to redirect stdout
stdout: the descriptor or file name where to redirect stdout
mode: the output descriptor or file mode
"""
self.outfiles = stdout, stderr
self.combine = (stdout == stderr)
self.mode = mode
def __enter__(self):
"""
Enter the context
Args:
self: The class itself
"""
import sys
self.sys = sys
# save previous stdout/stderr
self.saved_streams = saved_streams = sys.__stdout__, sys.__stderr__
self.fds = fds = [s.fileno() for s in saved_streams]
self.saved_fds = map(os.dup, fds)
# flush any pending output
for s in saved_streams: s.flush()
# open surrogate files
if self.combine:
null_streams = [open(self.outfiles[0], self.mode, 0)] * 2
if self.outfiles[0] != os.devnull:
# disable buffering so output is merged immediately
sys.stdout, sys.stderr = map(os.fdopen, fds, ['w']*2, [0]*2)
else: null_streams = [open(f, self.mode, 0) for f in self.outfiles]
self.null_fds = null_fds = [s.fileno() for s in null_streams]
self.null_streams = null_streams
# overwrite file objects and low-level file descriptors
map(os.dup2, null_fds, fds)
# MASKED: __exit__ function (lines 431-447)
def wrapper_io(func, fd, args, return_dict):
"""
Run a function while redirecting its output to a file descriptor
Args:
func: A python function to run
fd: a file descriptor
args: A tuple containing argument for the function
return_dict: Dictionary where to put the result of the function
"""
return_dict["output"] = ''
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if fd:
with Silence(stdout=fd, stderr=os.devnull, mode='a'):
return_dict["output"] = func(*args)
else:
return_dict["output"] = func(*args)
def run_thread(func, args, fd):
"""
Run a python function in a thread and wait for it to complete.
Redirect its output to fd
Args:
func: A python function to run
args: A tuple containing argument for the function
fd: a file descriptor
"""
manager = Manager()
return_dict = manager.dict()
p = Process(target=wrapper_io, args=(func, fd, args, return_dict))
p.start()
p.join()
return return_dict["output"]
def writeline_if_not_none(fout, data):
'''
Write one line to the specified file if data is not None.
@param fout: the file object to write line in
@param data: the data to write as line
'''
# Since this is a inner function, no entrance/exit information would be logged.
if data is not None:
fout.write(str(data) + '\n')
def prepare_dir(prefix):
'''
Prepare a directory, the directory will be a sub-directory of USER_DATA_DIRECTORY with current timestamp
prefixed given prefix as the directory name.
@param prefix: the directory prefix
@return: the meshing/simulation directory full path
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if any error occurred when preparing the directory
'''
signature = __name__ + '.prepare_dir()'
helper.log_entrance(_LOGGER, signature, {'prefix': prefix})
# Checking parameters
helper.check_not_none_nor_empty(prefix, 'prefix')
try:
# Create a directory for this run (sub-directory name in format simulation_YYYYMMDDhhmmss)
# We should consider adding some more uuid suffix to allow more concurrent requests within 1 SINGLE second.
run_dir = os.path.join(USER_DATA_DIRECTORY, prefix + time.strftime('%Y%m%d%H%M%S') + '_' + uuid.uuid1().hex)
os.makedirs(run_dir)
helper.log_exit(_LOGGER, signature, [run_dir])
return run_dir
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when preparing the directory. Caused by:\n' + unicode(str(e)))
|
def __exit__(self, *args):
"""
Exit the context
Args:
self: The class itself
args: other arguments
"""
sys = self.sys
# flush any pending output
for s in self.saved_streams: s.flush()
# restore original streams and file descriptors
map(os.dup2, self.saved_fds, self.fds)
sys.stdout, sys.stderr = self.saved_streams
# clean up
for s in self.null_streams: s.close()
for fd in self.saved_fds: os.close(fd)
return False
| 431 | 447 |
# -*- coding: utf-8 -*-
"""
This Python module provides various service functions.
Updated since version 1.1:
1. Added support for postprocess and visualization.
2. Added file path validation for parameters of all related methods.
Updated since version 1.2: Merge Code and Update GUI
1. Integrate New Nemoh using hdf5 and python.
"""
__author__ = "caoweiquan322, TCSASSEMBLER"
__copyright__ = "Copyright (C) 2014-2015 TopCoder Inc. All rights reserved."
__version__ = "1.2"
import collections
import uuid
from settings import *
import os
import time
import subprocess
from multiprocessing import Process, Manager
import logging
from openwarp import helper
from nemoh import utility
from nemoh import preprocessor
from nemoh import postprocessor
from nemoh import solver
import warnings
import fnmatch
import h5py
# This class represents parameters used in the meshing process.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
MeshingParameters = collections.namedtuple('MeshingParameters',
'infile outfile maxh minh fineness grading usetolerance tolerance')
# This class represents parameters used in the simulation process.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
SimulationParameters = collections.namedtuple('SimulationParameters', 'rho g depth xeff yeff wave_frequencies ' +
'min_wave_frequencies max_wave_frequencies wave_directions ' +
'max_wave_direction min_wave_directions floating_bodies ' +
'indiq_solver ires tol_gmres max_iterations save_potential ' +
'green_tabulation_numx green_tabulation_numz ' +
'green_tabulation_simpson_npoints use_ode_influence_coefficients ' +
'use_higher_order num_panel_higher_order b_spline_order ' +
'use_dipoles_implementation thin_panels compute_drift_forces ' +
'compute_yaw_moment remove_irregular_frequencies')
# This class represents a floating body used in the SimulationParameters.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
FloatingBody = collections.namedtuple('FloatingBody', 'mesh_file points panels degrees_of_freedom surge sway ' +
'heave roll_about_cdg pitch_about_cdg yaw_about_cdg ' +
'resulting_generalised_forces force_in_x_direction force_in_y_direction ' +
'force_in_z_direction moment_cdg_force_in_x_direction ' +
'moment_cdg_force_in_y_direction moment_cdg_force_in_z_direction ' +
'additional_info_lines')
# This class represents parameters used in the post-proessing.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
PostprocessingParameters = collections.namedtuple('PostprocessingParameters', 'irf show_pressure ' +
'kochin_function free_surface_elevation')
# The pre-defined config file name used by MESH_GENERATOR_BIN.
_CONFIG_FILE_NAME = 'config.txt'
# The pre-defined stdout log file name.
_LOG_FILE_NAME = 'log.txt'
# The logger object for logging.
_LOGGER = logging.getLogger(__name__)
class ServiceError(Exception):
'''
This exception indicates a service error.
It will be raised by methods of this module.
'''
pass
def generate_mesh(meshing_dir, params):
'''
Launch Mesh Generator to generate mesh.
@param meshing_dir: the meshing directory
@param params: the meshing parameters
@return: the mesh generation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of MeshingParameters is not
of valid value
@raise ServiceError: if error occurs during generating mesh
'''
signature = __name__ + '.generate_mesh()'
helper.log_entrance(_LOGGER, signature,
{'meshing_dir': meshing_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(meshing_dir, 'meshing_dir')
helper.check_is_directory(meshing_dir, 'meshing_dir')
helper.check_type_value(params, 'params', MeshingParameters, False)
helper.check_not_none_nor_empty(params.infile, 'params.infile')
helper.check_is_file(params.infile, 'params.infile')
helper.check_not_none_nor_empty(params.outfile, 'params.outfile')
helper.check_not_none_nor_empty(params.maxh, 'params.maxh')
helper.check_not_none_nor_empty(params.minh, 'params.minh')
helper.check_not_none_nor_empty(params.fineness, 'params.fineness')
helper.check_not_none_nor_empty(params.grading, 'params.grading')
helper.check_not_none_nor_empty(params.usetolerance, 'params.usetolerance')
if params.usetolerance == '1':
helper.check_not_none_nor_empty(params.tolerance, 'params.tolerance')
try:
config_file_path = os.path.join(meshing_dir, _CONFIG_FILE_NAME)
log_file_path = os.path.join(meshing_dir, _LOG_FILE_NAME)
# Generate config.txt according to given parameters
with open(config_file_path, 'w') as f:
f.write('\n'.join("%s: %s" % item for item in vars(params).items() if item[1] is not None))
# Launch mesh generator
with open(log_file_path, 'w') as log_file:
_LOGGER.debug('Start mesh generator in subprocess.')
subprocess.call(MESH_GENERATOR_BIN, cwd=meshing_dir, stdout=log_file)
_LOGGER.debug('End mesh generator in subprocess.')
# Read and return the log file content
with open(log_file_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when generating mesh. Caused by:\n' + unicode(str(e)))
def simulate(simulation_dir, params):
'''
Run simulation.
@param simulation_dir: the simulation directory
@param params: the simulation parameters
@return: the simulation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of SimulationParameters is not
of valid value
@raise ServiceError: if any other error occurred when launching the simulation
'''
signature = __name__ + '.simulate()'
helper.log_entrance(_LOGGER, signature,
{'simulation_dir': simulation_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
helper.check_type_value(params, 'params', SimulationParameters, False)
helper.check_not_none_nor_empty(params.rho, 'params.rho')
helper.check_not_none_nor_empty(params.g, 'params.g')
helper.check_not_none_nor_empty(params.depth, 'params.depth')
helper.check_not_none_nor_empty(params.xeff, 'params.xeff')
helper.check_not_none_nor_empty(params.yeff, 'params.yeff')
helper.check_not_none_nor_empty(params.wave_frequencies, 'params.wave_frequencies')
helper.check_not_none_nor_empty(params.min_wave_frequencies, 'params.min_wave_frequencies')
helper.check_not_none_nor_empty(params.max_wave_frequencies, 'params.max_wave_frequencies')
helper.check_not_none_nor_empty(params.wave_directions, 'params.wave_directions')
helper.check_not_none_nor_empty(params.min_wave_directions, 'params.min_wave_directions')
helper.check_not_none_nor_empty(params.max_wave_direction, 'params.max_wave_direction')
helper.check_not_none_nor_empty(params.indiq_solver, 'params.indiq_solver')
helper.check_not_none_nor_empty(params.ires, 'params.ires')
helper.check_not_none_nor_empty(params.tol_gmres, 'params.tol_gmres')
helper.check_not_none_nor_empty(params.max_iterations, 'params.max_iterations')
helper.check_not_none_nor_empty(params.save_potential, 'params.save_potential')
helper.check_not_none_nor_empty(params.green_tabulation_numx, 'params.green_tabulation_numx')
helper.check_not_none_nor_empty(params.green_tabulation_numz, 'params.green_tabulation_numz')
helper.check_not_none_nor_empty(params.green_tabulation_simpson_npoints, 'params.green_tabulation_simpson_npoints')
helper.check_not_none_nor_empty(params.use_ode_influence_coefficients, 'params.use_ode_influence_coefficients')
helper.check_not_none_nor_empty(params.use_higher_order, 'params.use_higher_order')
helper.check_not_none_nor_empty(params.num_panel_higher_order, 'params.num_panel_higher_order')
helper.check_not_none_nor_empty(params.b_spline_order, 'params.b_spline_order')
helper.check_not_none_nor_empty(params.use_dipoles_implementation, 'params.use_dipoles_implementation')
helper.check_not_none_nor_empty(params.thin_panels, 'params.thin_panels')
helper.check_not_none_nor_empty(params.compute_drift_forces, 'params.compute_drift_forces')
helper.check_not_none_nor_empty(params.remove_irregular_frequencies, 'params.remove_irregular_frequencies')
helper.check_not_none_nor_empty(params.compute_yaw_moment, 'params.compute_yaw_moment')
helper.check_type_value(params.floating_bodies, 'params.floating_bodies', list, True)
if params.floating_bodies is not None:
for body in params.floating_bodies:
helper.check_type_value(body, 'params.floating_bodies item', FloatingBody, False)
helper.check_not_none_nor_empty(body.mesh_file, 'body.mesh_file')
helper.check_not_none_nor_empty(body.points, 'body.points')
helper.check_not_none_nor_empty(body.panels, 'body.panels')
helper.check_not_none_nor_empty(body.degrees_of_freedom, 'body.degrees_of_freedom')
helper.check_not_none_nor_empty(body.resulting_generalised_forces, 'body.resulting_generalised_forces')
helper.check_not_none_nor_empty(body.additional_info_lines, 'body.additional_info_lines')
try:
# Write the hdf5 inputs according to given parameters
with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), "a") as hdf5_data:
utility.write_calculations(params, hdf5_data)
# Launch preProcessor and Solver
# A prepared 'results' folder is necessary for the Nemoh software suite
os.mkdir(os.path.join(simulation_dir, 'results'))
simulation_log_path = os.path.join(simulation_dir, 'simulation_log.txt')
custom_config = {
'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'),
'NEMOH_CALCULATIONS_FILE': None,
'NEMOH_INPUT_FILE': None,
'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'),
'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'),
'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'),
'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'),
'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'),
'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'),
'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'),
'GREEN_TABULATION_NUMX' : int(params.green_tabulation_numx),
'GREEN_TABULATION_NUMZ' : int(params.green_tabulation_numz),
'GREEN_TABULATION_SIMPSON_NPOINTS' : int(params.green_tabulation_simpson_npoints),
'USE_ODE_INFLUENCE_COEFFICIENTS': bool(int(params.use_ode_influence_coefficients)),
'USE_HIGHER_ORDER' : bool(int(params.use_higher_order)),
'NUM_PANEL_HIGHER_ORDER' : int(params.num_panel_higher_order),
'B_SPLINE_ORDER': int(params.b_spline_order),
'USE_DIPOLES_IMPLEMENTATION': bool(int(params.use_dipoles_implementation)),
'THIN_PANELS': [int(i) for i in params.thin_panels.split()],
'COMPUTE_DRIFT_FORCES' : bool(int(params.compute_drift_forces)),
'COMPUTE_YAW_MOMENT': bool(int(params.compute_yaw_moment)),
'REMOVE_IRREGULAR_FREQUENCIES' : bool(int(params.remove_irregular_frequencies))
}
_LOGGER.debug('Start preProcessor function.')
run_thread(preprocessor.preprocess, (custom_config,), simulation_log_path)
_LOGGER.debug('End preProcessor function.')
_LOGGER.debug('Start solver function.')
output = run_thread(solver.solve, (custom_config,), None)
with open(simulation_log_path, 'a') as log_file:
log_file.write(output)
_LOGGER.debug('End solver function.')
with open(simulation_log_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when doing simulation. Caused by:\n' + unicode(str(e)))
def postprocess(simulation_dir, params):
'''
Run post-processing.
@param simulation_dir: the simulation directory
@param params: the post-processing parameters
@return: the post-processing log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of PostprocessingParameters is not
of valid value
@raise ServiceError: if error occurs during launching the post-processing
'''
signature = __name__ + '.postprocess()'
helper.log_entrance(_LOGGER, signature,
{'simulation_dir': simulation_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
helper.check_type_value(params, 'params', PostprocessingParameters, False)
helper.check_type_value(params.irf, 'params.irf', list, False)
for irf_item in params.irf:
helper.check_not_none_nor_empty(irf_item, 'irf_item')
helper.check_not_none_nor_empty(params.show_pressure, 'params.show_pressure')
helper.check_type_value(params.kochin_function, 'params.kochin_function', list, False)
for kochin_function_item in params.kochin_function:
helper.check_not_none_nor_empty(kochin_function_item, 'kochin_function_item')
helper.check_type_value(params.free_surface_elevation, 'params.free_surface_elevation', list, False)
for elevation_item in params.free_surface_elevation:
helper.check_not_none_nor_empty(elevation_item, 'elevation_item')
try:
with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), "a") as hdf5_data:
utility.write_postprocessing_section(params, hdf5_data)
# Launch postProcessor
postprocessing_log_path = os.path.join(simulation_dir, 'postprocessing_log.txt')
custom_config = {
'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'),
'NEMOH_CALCULATIONS_FILE': None,
'NEMOH_INPUT_FILE': None,
'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'),
'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'),
'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'),
'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'),
'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'),
'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'),
'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'),
'GREEN_TABULATION_NUMX' : 328,
'GREEN_TABULATION_NUMZ' : 46,
'GREEN_TABULATION_SIMPSON_NPOINTS' : 251,
'USE_ODE_INFLUENCE_COEFFICIENTS': False,
'USE_HIGHER_ORDER' : False,
'NUM_PANEL_HIGHER_ORDER' : 1,
'B_SPLINE_ORDER': 1,
'USE_DIPOLES_IMPLEMENTATION': False,
'THIN_PANELS': [-1],
'COMPUTE_DRIFT_FORCES' : False,
'COMPUTE_YAW_MOMENT': False,
'REMOVE_IRREGULAR_FREQUENCIES' : False
}
_LOGGER.debug('Start postProcessor function.')
run_thread(postprocessor.postprocess, (custom_config,), postprocessing_log_path)
_LOGGER.debug('End postProcessor in subprocess.')
with open(postprocessing_log_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when running postprocess. Caused by:\n' + unicode(str(e)))
def visualize(simulation_dir):
'''
Launch ParaView to visualize simulation results.
@param simulation_dir: the simulation directory
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if error occurs during launching the ParaView
'''
signature = __name__ + '.visualize()'
helper.log_entrance(_LOGGER, signature, {'simulation_dir': simulation_dir})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
try:
# Filter files to be opened in ParaView
files = []
for f in os.listdir(os.path.join(simulation_dir, 'results')):
for ext in VISUALIZATION_FILE_EXTENSIONS:
if fnmatch.fnmatch(f, '*.' + ext):
files.append(os.path.join(simulation_dir, 'results', f))
# Check if there's tec/vtk/stl file to visualize
if len(files) == 0:
raise ServiceError('There is no accepted file to visualize.')
_LOGGER.debug('List of files to load:')
_LOGGER.debug(str(files))
# Prepare script to run by ParaView
paraview_script = os.path.join(os.path.join(simulation_dir, 'results'), 'load_data.py')
prepare_paraview_script(paraview_script, files)
# Launch ParaView without waiting for the ParaView to exit
_LOGGER.debug('Start launching ParaView in subprocess.')
subprocess.Popen([PARAVIEW_BIN, '--script=' + paraview_script + ''])
_LOGGER.debug('End launching ParaView in subprocess.')
helper.log_exit(_LOGGER, signature, None)
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when launching the ParaView. Caused by:\n' + unicode(str(e)))
def prepare_paraview_script(script_path, files):
'''
Prepare a script to be run by ParaView from a template.
@param script_path: path of the new script to create
@param files: a list of data files path
@raise Exception: to its caller if any error occurs
'''
# Since this is a inner function, no entrance/exit information would be logged.
with open(PARAVIEW_SCRIPT_TEMPLATE, 'r') as fin:
with open(script_path, 'w') as fout:
for line in fin.readlines():
fout.write(line.rstrip().replace('<parameter_files>', str(files)) + '\n')
# From http://code.activestate.com/recipes/577564-context-manager-for-low-level-redirection-of-stdou/
class Silence:
"""
Context manager which uses low-level file descriptors to suppress
output to stdout/stderr, optionally redirecting to the named file(s).
Example usage
with Silence(stderr='output.txt', mode='a'):
... # appending to existing file
... print >> sys.stderr, "Hello from stderr"
... print "Stdout redirected to os.devnull"
=== contents of 'output.txt' ===
"""
def __init__(self, stdout=os.devnull, stderr=os.devnull, mode='w'):
"""
Initialize
Args:
self: The class itself
stdout: the descriptor or file name where to redirect stdout
stdout: the descriptor or file name where to redirect stdout
mode: the output descriptor or file mode
"""
self.outfiles = stdout, stderr
self.combine = (stdout == stderr)
self.mode = mode
def __enter__(self):
"""
Enter the context
Args:
self: The class itself
"""
import sys
self.sys = sys
# save previous stdout/stderr
self.saved_streams = saved_streams = sys.__stdout__, sys.__stderr__
self.fds = fds = [s.fileno() for s in saved_streams]
self.saved_fds = map(os.dup, fds)
# flush any pending output
for s in saved_streams: s.flush()
# open surrogate files
if self.combine:
null_streams = [open(self.outfiles[0], self.mode, 0)] * 2
if self.outfiles[0] != os.devnull:
# disable buffering so output is merged immediately
sys.stdout, sys.stderr = map(os.fdopen, fds, ['w']*2, [0]*2)
else: null_streams = [open(f, self.mode, 0) for f in self.outfiles]
self.null_fds = null_fds = [s.fileno() for s in null_streams]
self.null_streams = null_streams
# overwrite file objects and low-level file descriptors
map(os.dup2, null_fds, fds)
def __exit__(self, *args):
"""
Exit the context
Args:
self: The class itself
args: other arguments
"""
sys = self.sys
# flush any pending output
for s in self.saved_streams: s.flush()
# restore original streams and file descriptors
map(os.dup2, self.saved_fds, self.fds)
sys.stdout, sys.stderr = self.saved_streams
# clean up
for s in self.null_streams: s.close()
for fd in self.saved_fds: os.close(fd)
return False
def wrapper_io(func, fd, args, return_dict):
"""
Run a function while redirecting its output to a file descriptor
Args:
func: A python function to run
fd: a file descriptor
args: A tuple containing argument for the function
return_dict: Dictionary where to put the result of the function
"""
return_dict["output"] = ''
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if fd:
with Silence(stdout=fd, stderr=os.devnull, mode='a'):
return_dict["output"] = func(*args)
else:
return_dict["output"] = func(*args)
def run_thread(func, args, fd):
"""
Run a python function in a thread and wait for it to complete.
Redirect its output to fd
Args:
func: A python function to run
args: A tuple containing argument for the function
fd: a file descriptor
"""
manager = Manager()
return_dict = manager.dict()
p = Process(target=wrapper_io, args=(func, fd, args, return_dict))
p.start()
p.join()
return return_dict["output"]
def writeline_if_not_none(fout, data):
'''
Write one line to the specified file if data is not None.
@param fout: the file object to write line in
@param data: the data to write as line
'''
# Since this is a inner function, no entrance/exit information would be logged.
if data is not None:
fout.write(str(data) + '\n')
def prepare_dir(prefix):
'''
Prepare a directory, the directory will be a sub-directory of USER_DATA_DIRECTORY with current timestamp
prefixed given prefix as the directory name.
@param prefix: the directory prefix
@return: the meshing/simulation directory full path
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if any error occurred when preparing the directory
'''
signature = __name__ + '.prepare_dir()'
helper.log_entrance(_LOGGER, signature, {'prefix': prefix})
# Checking parameters
helper.check_not_none_nor_empty(prefix, 'prefix')
try:
# Create a directory for this run (sub-directory name in format simulation_YYYYMMDDhhmmss)
# We should consider adding some more uuid suffix to allow more concurrent requests within 1 SINGLE second.
run_dir = os.path.join(USER_DATA_DIRECTORY, prefix + time.strftime('%Y%m%d%H%M%S') + '_' + uuid.uuid1().hex)
os.makedirs(run_dir)
helper.log_exit(_LOGGER, signature, [run_dir])
return run_dir
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when preparing the directory. Caused by:\n' + unicode(str(e)))
|
cutouts
|
Custom version to extract stars cutouts
Parameters
----------
Parameters
----------
image: np.ndarray or path
stars: np.ndarray
stars positions with shape (n,2)
size: int
size of the cuts around stars (in pixels), by default 15
Returns
-------
np.ndarray of shape (size, size)
|
from scipy.optimize import minimize
import warnings
import numpy as np
from astropy.io import fits
from astropy.table import Table
from astropy.nddata import NDData
from photutils.psf import extract_stars
from astropy.stats import gaussian_sigma_to_fwhm
from ..core import Block
import matplotlib.pyplot as plt
from collections import OrderedDict
from ..utils import fast_binning
def image_psf(image, stars, size=15, normalize=False, return_cutouts=False):
"""
Get global psf from image using photutils routines
Parameters
----------
image: np.ndarray or path
stars: np.ndarray
stars positions with shape (n,2)
size: int
size of the cuts around stars (in pixels)
normalize: bool, optional
weather to normalize the cutout, default is False
Returns
-------
np.ndarray of shape (size, size)
"""
_, cuts = cutouts(image, stars, size=size)
cuts = cuts.data
if normalize:
cuts = [c/np.sum(c) for c in cuts]
if return_cutouts:
return np.median(cuts, axis=0), cuts
else:
return np.median(cuts, axis=0)
# MASKED: cutouts function (lines 43-77)
def good_cutouts(image, xy, r=30, upper=40000, lower=1000, trim=100):
idxs, _cuts = cutouts(image, xy, r)
cuts = OrderedDict(zip(idxs, _cuts))
peaks = [cutout.data.max() for cutout in cuts.values()]
for i, cutout in cuts.copy().items():
if i in cuts:
peak = cutout.data.max()
center = cutout.center
# removing saturated and faint stars
if peak > upper or peak < lower:
del cuts[i]
# removing stars on borders
elif np.any(center < [trim, trim]) or np.any(center > np.array(image.shape) - trim):
del cuts[i]
# removing close stars
closest = idxs[np.nonzero(np.linalg.norm(center - xy[idxs], axis=1) < r)[0]]
if len(closest) > 1:
for j in closest:
if j in cuts:
del cuts[j]
return cuts
def moments(data):
"""Returns (height, x, y, width_x, width_y)
the gaussian parameters of a 2D distribution by calculating its
moments """
height = data.max()
background = data.min()
data = data-np.min(data)
total = data.sum()
x, y = np.indices(data.shape)
x = (x * data).sum() / total
y = (y * data).sum() / total
col = data[:, int(y)]
width_x = np.sqrt(abs((np.arange(col.size) - y) ** 2 * col).sum() / col.sum())
row = data[int(x), :]
width_y = np.sqrt(abs((np.arange(row.size) - x) ** 2 * row).sum() / row.sum())
width_x /= gaussian_sigma_to_fwhm
width_y /= gaussian_sigma_to_fwhm
return height, x, y, width_x, width_y, 0.0, background
class PSFModel(Block):
def __init__(self, cutout_size=21, save_cutouts=False, **kwargs):
super().__init__(**kwargs)
self.cutout_size = cutout_size
self.save_cutouts = save_cutouts
self.x, self.y = np.indices((self.cutout_size, self.cutout_size))
self.epsf = None
@property
def optimized_model(self):
return self.model(*self.optimized_params)
def build_epsf(self, image, stars):
return image_psf(image, stars.copy(), size=self.cutout_size, return_cutouts=self.save_cutouts)
def model(self):
raise NotImplementedError("")
def nll(self, p):
ll = np.sum(np.power((self.model(*p) - self.epsf), 2) * self.epsf)
return ll if np.isfinite(ll) else 1e25
def optimize(self):
raise NotImplementedError("")
def sigma_to_fwhm(self, *args):
return gaussian_sigma_to_fwhm
def run(self, image):
if self.save_cutouts:
self.epsf, image.cutouts = self.build_epsf(image.data, image.stars_coords)
else:
self.epsf = self.build_epsf(image.data, image.stars_coords)
image.fwhmx, image.fwhmy, image.theta = self.optimize()
image.fwhm = np.mean([image.fwhmx, image.fwhmy])
image.psf_sigma_x = image.fwhmx / self.sigma_to_fwhm()
image.psf_sigma_y = image.fwhmy / self.sigma_to_fwhm()
image.header["FWHM"] = image.fwhm
image.header["FWHMX"] = image.fwhmx
image.header["FWHMY"] = image.fwhmy
image.header["PSFANGLE"] = image.theta
image.header["FWHMALG"] = self.__class__.__name__
def show_residuals(self):
plt.imshow(self.epsf - self.optimized_model)
plt.colorbar()
ax = plt.gca()
plt.text(0.05, 0.05, "$\Delta f=$ {:.2f}%".format(100*np.sum(np.abs(self.epsf - self.optimized_model))/np.sum(self.epsf)),
fontsize=14, horizontalalignment='left', verticalalignment='bottom', transform=ax.transAxes, c="w")
def __call__(self, data):
self.epsf = data
return self.optimize()
class FWHM(PSFModel):
"""
Fast empirical FWHM (based on Arielle Bertrou-Cantou's idea)
"""
def __init__(self, cutout_size=51, **kwargs):
super().__init__(cutout_size=cutout_size, **kwargs)
Y, X = np.indices((self.cutout_size,self.cutout_size))
x = y = self.cutout_size/2
self.radii = (np.sqrt((X - x) ** 2 + (Y - y) ** 2)).flatten()
def optimize(self):
psf = self.epsf.copy()
psf -= np.min(psf)
pixels = psf.flatten()
binned_radii, binned_pixels, _ = fast_binning(self.radii, pixels, bins=1)
fwhm = 2*binned_radii[np.flatnonzero(binned_pixels > np.max(binned_pixels)/2)[-1]]
return fwhm, fwhm, 0
class FastGaussian(PSFModel):
"""
Fit a symetric 2D Gaussian model to an image effective PSF
"""
def __init__(self, cutout_size=21, **kwargs):
super().__init__(cutout_size=cutout_size, **kwargs)
def model(self, height, s, m):
dx = self.x - self.cutout_size/2
dy = self.y - self.cutout_size/2
psf = height * np.exp(-((dx/(2*s))**2 + (dy/(2*s))**2))
return psf + m
def optimize(self):
p0 = [np.max(self.epsf), 4, np.min(self.epsf)]
min_sigma = 0.5
bounds = [
(0, np.infty),
(min_sigma, np.infty),
(0, np.mean(self.epsf)),
]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
params = minimize(self.nll, p0, bounds=bounds).x
self.optimized_params = params
return params[1]*self.sigma_to_fwhm(), params[1]*self.sigma_to_fwhm(), 0
def citations(self):
return "scipy", "photutils"
class Gaussian2D(PSFModel):
"""
Fit an elliptical 2D Gaussian model to an image effective PSF
"""
def __init__(self, cutout_size=21, **kwargs):
super().__init__(cutout_size=cutout_size, **kwargs)
def model(self, height, xo, yo, sx, sy, theta, m):
dx = self.x - xo
dy = self.y - yo
a = (np.cos(theta)**2)/(2*sx**2) + (np.sin(theta)**2)/(2*sy**2)
b = -(np.sin(2*theta))/(4*sx**2) + (np.sin(2*theta))/(4*sy**2)
c = (np.sin(theta)**2)/(2*sx**2) + (np.cos(theta)**2)/(2*sy**2)
psf = height * np.exp(-(a * dx ** 2 + 2 * b * dx * dy + c * dy ** 2))
return psf + m
def optimize(self):
p0 = moments(self.epsf)
x0, y0 = p0[1], p0[2]
min_sigma = 0.5
bounds = [
(0, np.infty),
(x0 - 3, x0 + 3),
(y0 - 3, y0 + 3),
(min_sigma, np.infty),
(min_sigma, np.infty),
(0, 4),
(0, np.mean(self.epsf)),
]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
params = minimize(self.nll, p0, bounds=bounds).x
self.optimized_params = params
return params[3]*self.sigma_to_fwhm(), params[4]*self.sigma_to_fwhm(), params[-2]
def citations(self):
return "scipy", "photutils"
class Moffat2D(PSFModel):
"""
Fit an elliptical 2D Moffat model to an image effective PSF
"""
def __init__(self, cutout_size=21, **kwargs):
super().__init__(cutout_size=cutout_size, **kwargs)
def model(self, a, x0, y0, sx, sy, theta, b, beta):
# https://pixinsight.com/doc/tools/DynamicPSF/DynamicPSF.html
dx_ = self.x - x0
dy_ = self.y - y0
dx = dx_*np.cos(theta) + dy_*np.sin(theta)
dy = -dx_*np.sin(theta) + dy_*np.cos(theta)
return b + a / np.power(1 + (dx/sx)**2 + (dy/sy)**2, beta)
def sigma_to_fwhm(self):
return 2*np.sqrt(np.power(2, 1/self.optimized_params[-1]) - 1)
def optimize(self):
p0 = list(moments(self.epsf))
p0.append(1)
x0, y0 = p0[1], p0[2]
min_sigma = 0.5
bounds = [
(0, np.infty),
(x0 - 3, x0 + 3),
(y0 - 3, y0 + 3),
(min_sigma, np.infty),
(min_sigma, np.infty),
(0, 4),
(0, np.mean(self.epsf)),
(1, 8),
]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
params = minimize(self.nll, p0, bounds=bounds).x
self.optimized_params = params
sm = self.sigma_to_fwhm()
return params[3]*sm, params[4]*sm, params[-2]
def citations(self):
return "scipy", "photutils"
class KeepGoodStars(Block):
def __init__(self, n=-1, **kwargs):
super().__init__(**kwargs)
self.n = n
def run(self, image, n=-1):
good_stars = self(image.data, image.stars_coords)
image.stars_coords = good_stars
def __call__(self, data, stars):
i, _stars = cutouts(data, stars, size=21)
#good = np.array([shapiro(s.data).statistic for s in _stars]) > 0.33
good = np.array([np.std(s.data) for s in _stars]) > 1000
return stars[i][np.argwhere(good).squeeze()][0:self.n]
|
def cutouts(image, stars, size=15):
"""Custom version to extract stars cutouts
Parameters
----------
Parameters
----------
image: np.ndarray or path
stars: np.ndarray
stars positions with shape (n,2)
size: int
size of the cuts around stars (in pixels), by default 15
Returns
-------
np.ndarray of shape (size, size)
"""
if isinstance(image, str):
image = fits.getdata(image)
warnings.simplefilter("ignore")
if np.shape(stars) > (1,2):
stars_tbl = Table(
[stars[:, 0], stars[:, 1], np.arange(len(stars))],
names=["x", "y", "id"])
stars = extract_stars(NDData(data=image), stars_tbl, size=size)
idxs = np.array([s.id_label for s in stars])
return idxs, stars
else:
stars_tbl = Table(
data=np.array([stars[0][0], stars[0][1]]),
names=["x", "y"])
stars = extract_stars(NDData(data=image), stars_tbl, size=size)
return stars
| 43 | 77 |
from scipy.optimize import minimize
import warnings
import numpy as np
from astropy.io import fits
from astropy.table import Table
from astropy.nddata import NDData
from photutils.psf import extract_stars
from astropy.stats import gaussian_sigma_to_fwhm
from ..core import Block
import matplotlib.pyplot as plt
from collections import OrderedDict
from ..utils import fast_binning
def image_psf(image, stars, size=15, normalize=False, return_cutouts=False):
"""
Get global psf from image using photutils routines
Parameters
----------
image: np.ndarray or path
stars: np.ndarray
stars positions with shape (n,2)
size: int
size of the cuts around stars (in pixels)
normalize: bool, optional
weather to normalize the cutout, default is False
Returns
-------
np.ndarray of shape (size, size)
"""
_, cuts = cutouts(image, stars, size=size)
cuts = cuts.data
if normalize:
cuts = [c/np.sum(c) for c in cuts]
if return_cutouts:
return np.median(cuts, axis=0), cuts
else:
return np.median(cuts, axis=0)
def cutouts(image, stars, size=15):
"""Custom version to extract stars cutouts
Parameters
----------
Parameters
----------
image: np.ndarray or path
stars: np.ndarray
stars positions with shape (n,2)
size: int
size of the cuts around stars (in pixels), by default 15
Returns
-------
np.ndarray of shape (size, size)
"""
if isinstance(image, str):
image = fits.getdata(image)
warnings.simplefilter("ignore")
if np.shape(stars) > (1,2):
stars_tbl = Table(
[stars[:, 0], stars[:, 1], np.arange(len(stars))],
names=["x", "y", "id"])
stars = extract_stars(NDData(data=image), stars_tbl, size=size)
idxs = np.array([s.id_label for s in stars])
return idxs, stars
else:
stars_tbl = Table(
data=np.array([stars[0][0], stars[0][1]]),
names=["x", "y"])
stars = extract_stars(NDData(data=image), stars_tbl, size=size)
return stars
def good_cutouts(image, xy, r=30, upper=40000, lower=1000, trim=100):
idxs, _cuts = cutouts(image, xy, r)
cuts = OrderedDict(zip(idxs, _cuts))
peaks = [cutout.data.max() for cutout in cuts.values()]
for i, cutout in cuts.copy().items():
if i in cuts:
peak = cutout.data.max()
center = cutout.center
# removing saturated and faint stars
if peak > upper or peak < lower:
del cuts[i]
# removing stars on borders
elif np.any(center < [trim, trim]) or np.any(center > np.array(image.shape) - trim):
del cuts[i]
# removing close stars
closest = idxs[np.nonzero(np.linalg.norm(center - xy[idxs], axis=1) < r)[0]]
if len(closest) > 1:
for j in closest:
if j in cuts:
del cuts[j]
return cuts
def moments(data):
"""Returns (height, x, y, width_x, width_y)
the gaussian parameters of a 2D distribution by calculating its
moments """
height = data.max()
background = data.min()
data = data-np.min(data)
total = data.sum()
x, y = np.indices(data.shape)
x = (x * data).sum() / total
y = (y * data).sum() / total
col = data[:, int(y)]
width_x = np.sqrt(abs((np.arange(col.size) - y) ** 2 * col).sum() / col.sum())
row = data[int(x), :]
width_y = np.sqrt(abs((np.arange(row.size) - x) ** 2 * row).sum() / row.sum())
width_x /= gaussian_sigma_to_fwhm
width_y /= gaussian_sigma_to_fwhm
return height, x, y, width_x, width_y, 0.0, background
class PSFModel(Block):
def __init__(self, cutout_size=21, save_cutouts=False, **kwargs):
super().__init__(**kwargs)
self.cutout_size = cutout_size
self.save_cutouts = save_cutouts
self.x, self.y = np.indices((self.cutout_size, self.cutout_size))
self.epsf = None
@property
def optimized_model(self):
return self.model(*self.optimized_params)
def build_epsf(self, image, stars):
return image_psf(image, stars.copy(), size=self.cutout_size, return_cutouts=self.save_cutouts)
def model(self):
raise NotImplementedError("")
def nll(self, p):
ll = np.sum(np.power((self.model(*p) - self.epsf), 2) * self.epsf)
return ll if np.isfinite(ll) else 1e25
def optimize(self):
raise NotImplementedError("")
def sigma_to_fwhm(self, *args):
return gaussian_sigma_to_fwhm
def run(self, image):
if self.save_cutouts:
self.epsf, image.cutouts = self.build_epsf(image.data, image.stars_coords)
else:
self.epsf = self.build_epsf(image.data, image.stars_coords)
image.fwhmx, image.fwhmy, image.theta = self.optimize()
image.fwhm = np.mean([image.fwhmx, image.fwhmy])
image.psf_sigma_x = image.fwhmx / self.sigma_to_fwhm()
image.psf_sigma_y = image.fwhmy / self.sigma_to_fwhm()
image.header["FWHM"] = image.fwhm
image.header["FWHMX"] = image.fwhmx
image.header["FWHMY"] = image.fwhmy
image.header["PSFANGLE"] = image.theta
image.header["FWHMALG"] = self.__class__.__name__
def show_residuals(self):
plt.imshow(self.epsf - self.optimized_model)
plt.colorbar()
ax = plt.gca()
plt.text(0.05, 0.05, "$\Delta f=$ {:.2f}%".format(100*np.sum(np.abs(self.epsf - self.optimized_model))/np.sum(self.epsf)),
fontsize=14, horizontalalignment='left', verticalalignment='bottom', transform=ax.transAxes, c="w")
def __call__(self, data):
self.epsf = data
return self.optimize()
class FWHM(PSFModel):
"""
Fast empirical FWHM (based on Arielle Bertrou-Cantou's idea)
"""
def __init__(self, cutout_size=51, **kwargs):
super().__init__(cutout_size=cutout_size, **kwargs)
Y, X = np.indices((self.cutout_size,self.cutout_size))
x = y = self.cutout_size/2
self.radii = (np.sqrt((X - x) ** 2 + (Y - y) ** 2)).flatten()
def optimize(self):
psf = self.epsf.copy()
psf -= np.min(psf)
pixels = psf.flatten()
binned_radii, binned_pixels, _ = fast_binning(self.radii, pixels, bins=1)
fwhm = 2*binned_radii[np.flatnonzero(binned_pixels > np.max(binned_pixels)/2)[-1]]
return fwhm, fwhm, 0
class FastGaussian(PSFModel):
"""
Fit a symetric 2D Gaussian model to an image effective PSF
"""
def __init__(self, cutout_size=21, **kwargs):
super().__init__(cutout_size=cutout_size, **kwargs)
def model(self, height, s, m):
dx = self.x - self.cutout_size/2
dy = self.y - self.cutout_size/2
psf = height * np.exp(-((dx/(2*s))**2 + (dy/(2*s))**2))
return psf + m
def optimize(self):
p0 = [np.max(self.epsf), 4, np.min(self.epsf)]
min_sigma = 0.5
bounds = [
(0, np.infty),
(min_sigma, np.infty),
(0, np.mean(self.epsf)),
]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
params = minimize(self.nll, p0, bounds=bounds).x
self.optimized_params = params
return params[1]*self.sigma_to_fwhm(), params[1]*self.sigma_to_fwhm(), 0
def citations(self):
return "scipy", "photutils"
class Gaussian2D(PSFModel):
"""
Fit an elliptical 2D Gaussian model to an image effective PSF
"""
def __init__(self, cutout_size=21, **kwargs):
super().__init__(cutout_size=cutout_size, **kwargs)
def model(self, height, xo, yo, sx, sy, theta, m):
dx = self.x - xo
dy = self.y - yo
a = (np.cos(theta)**2)/(2*sx**2) + (np.sin(theta)**2)/(2*sy**2)
b = -(np.sin(2*theta))/(4*sx**2) + (np.sin(2*theta))/(4*sy**2)
c = (np.sin(theta)**2)/(2*sx**2) + (np.cos(theta)**2)/(2*sy**2)
psf = height * np.exp(-(a * dx ** 2 + 2 * b * dx * dy + c * dy ** 2))
return psf + m
def optimize(self):
p0 = moments(self.epsf)
x0, y0 = p0[1], p0[2]
min_sigma = 0.5
bounds = [
(0, np.infty),
(x0 - 3, x0 + 3),
(y0 - 3, y0 + 3),
(min_sigma, np.infty),
(min_sigma, np.infty),
(0, 4),
(0, np.mean(self.epsf)),
]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
params = minimize(self.nll, p0, bounds=bounds).x
self.optimized_params = params
return params[3]*self.sigma_to_fwhm(), params[4]*self.sigma_to_fwhm(), params[-2]
def citations(self):
return "scipy", "photutils"
class Moffat2D(PSFModel):
"""
Fit an elliptical 2D Moffat model to an image effective PSF
"""
def __init__(self, cutout_size=21, **kwargs):
super().__init__(cutout_size=cutout_size, **kwargs)
def model(self, a, x0, y0, sx, sy, theta, b, beta):
# https://pixinsight.com/doc/tools/DynamicPSF/DynamicPSF.html
dx_ = self.x - x0
dy_ = self.y - y0
dx = dx_*np.cos(theta) + dy_*np.sin(theta)
dy = -dx_*np.sin(theta) + dy_*np.cos(theta)
return b + a / np.power(1 + (dx/sx)**2 + (dy/sy)**2, beta)
def sigma_to_fwhm(self):
return 2*np.sqrt(np.power(2, 1/self.optimized_params[-1]) - 1)
def optimize(self):
p0 = list(moments(self.epsf))
p0.append(1)
x0, y0 = p0[1], p0[2]
min_sigma = 0.5
bounds = [
(0, np.infty),
(x0 - 3, x0 + 3),
(y0 - 3, y0 + 3),
(min_sigma, np.infty),
(min_sigma, np.infty),
(0, 4),
(0, np.mean(self.epsf)),
(1, 8),
]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
params = minimize(self.nll, p0, bounds=bounds).x
self.optimized_params = params
sm = self.sigma_to_fwhm()
return params[3]*sm, params[4]*sm, params[-2]
def citations(self):
return "scipy", "photutils"
class KeepGoodStars(Block):
def __init__(self, n=-1, **kwargs):
super().__init__(**kwargs)
self.n = n
def run(self, image, n=-1):
good_stars = self(image.data, image.stars_coords)
image.stars_coords = good_stars
def __call__(self, data, stars):
i, _stars = cutouts(data, stars, size=21)
#good = np.array([shapiro(s.data).statistic for s in _stars]) > 0.33
good = np.array([np.std(s.data) for s in _stars]) > 1000
return stars[i][np.argwhere(good).squeeze()][0:self.n]
|
_parse_github_path
|
Parse the absolute github path.
Args:
path: The full github path.
Returns:
repo: The repository identifiant.
branch: Repository branch.
subpath: The inner path.
Raises:
ValueError: If the path is invalid
|
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Github pathlib-like util."""
import dataclasses
import functools
import os
import pathlib
import posixpath
from typing import Iterator, Mapping, MutableMapping, Optional, Set, Tuple
import requests
from tensorflow_datasets.core import utils
JsonValue = utils.JsonValue
_URI_PREFIX = 'github://'
def _get_token():
# Get the secret API token to avoid the 60 calls/hour limit
# To get the current quota or test the token:
# curl -H "Authorization: token ${GITHUB_TOKEN}" https://api.github.com/rate_limit # pylint: disable=line-too-long
return os.environ.get('GITHUB_TOKEN')
def get_content(url: str) -> bytes:
resp = requests.get(url)
if resp.status_code != 200:
raise FileNotFoundError(f'Request failed for {url}\n'
f' Error: {resp.status_code}\n'
f' Reason: {resp.content}')
return resp.content
class GithubApi:
"""Class to issue calls to the Github API."""
def __init__(self, token: Optional[str] = None):
self._token = token or _get_token()
def query(self, url: str) -> JsonValue:
"""Launches a Github API query and returns the result."""
headers = {}
if self._token:
headers['Authorization'] = f'token {self._token}'
resp = requests.get(url, headers=headers)
if resp.status_code != 200:
raise FileNotFoundError(
f'Request failed:\n'
f' Request: {url}\n'
f' Error: {resp.status_code}\n'
f' Reason: {resp.content}',)
return resp.json()
def query_tree(self, repo: str, branch: str) -> JsonValue:
"""Queries a repository tree.
See https://docs.github.com/en/rest/reference/git#trees
Args:
repo: the repository
branch: the branch for which to get the tree
Returns:
JSON dict with the tree.
"""
url = f'https://api.github.com/repos/{repo}/git/trees/{branch}?recursive=1'
return self.query(url)
def _correct_folder(folder: str) -> str:
"""Ensures the folder follows a standard.
Pathlib.parent in the root folder results in '.', whereas in other places
we should use '' for the root folder. This function makes sure the root
folder is always empty string.
Args:
folder: the folder to be corrected.
Returns:
The corrected folder.
"""
if folder == '.':
return ''
return folder
def _get_parent_folder(path: pathlib.PurePosixPath) -> str:
return _correct_folder(os.fspath(path.parent))
@dataclasses.dataclass(frozen=True)
class _GithubElement:
"""Representation of an element in a Github tree (a file or folder).
Attributes:
parent_folder: the folder in which this element resides.
name: the name of this element, e.g. the file name or the folder name.
is_folder: whether this element is a folder or not.
"""
parent_folder: str
name: str
is_folder: bool
@classmethod
def from_path(cls, path: pathlib.PurePosixPath,
is_folder: bool) -> '_GithubElement':
parent_folder = _get_parent_folder(path)
name = path.name
return cls(parent_folder=parent_folder, name=name, is_folder=is_folder)
@dataclasses.dataclass(frozen=True)
class _GithubTree:
"""A Github tree of a repository."""
files_per_folder: Mapping[str, Set[_GithubElement]]
def is_folder(self, path: str) -> bool:
return _correct_folder(path) in self.files_per_folder
def is_file(self, path: pathlib.PurePosixPath) -> bool:
parent_folder = _get_parent_folder(path)
files = self.files_per_folder.get(parent_folder)
if not files:
return False
file = _GithubElement(
parent_folder=parent_folder, name=path.name, is_folder=False)
return file in files
@classmethod
def from_json(cls, value) -> '_GithubTree':
"""Parses a GithubTree from the given JSON."""
if not isinstance(value, dict) or 'tree' not in value:
raise ValueError(f'Github API response not supported: {value}')
files_per_folder: MutableMapping[str, Set[str]] = {}
for element in value['tree']:
github_element = _GithubElement.from_path(
path=pathlib.PurePosixPath(element['path']),
is_folder=(element['type'] == 'tree'))
if element['type'] in {'blob', 'tree'}:
files_per_folder.setdefault(github_element.parent_folder, set())
files_per_folder[github_element.parent_folder].add(github_element)
return _GithubTree(files_per_folder=files_per_folder)
@staticmethod
@functools.lru_cache(maxsize=None)
def from_cache(repo: str, branch: str) -> '_GithubTree':
"""Factory which caches the entire Github tree."""
tree_json = GithubApi().query_tree(repo, branch)
# If the tree is truncated, then we'll need a more sophisticated method to
# retrieve the whole tree. Since this is currently not supported, it raises
# an exception.
assert not tree_json.get('truncated', False)
return _GithubTree.from_json(tree_json)
@dataclasses.dataclass(frozen=True, eq=True)
class _PathMetadata:
"""Github metadata of a file or directory."""
path: str
repo: str # e.g. `tensorflow/datasets`
branch: str # e.g. `master`
subpath: str # e.g 'core/__init__.py'
@classmethod
def from_path(cls, path: str) -> '_PathMetadata':
repo, branch, subpath = _parse_github_path(path)
return cls(path=path, repo=repo, branch=branch, subpath=subpath)
@utils.register_pathlike_cls(_URI_PREFIX)
class GithubPath(pathlib.PurePosixPath):
"""`pathlib.Path` like object for manipulating Github paths.
Example:
```
path = GithubPath.from_repo('tensorflow/datasets')
path = path / 'docs' / 'catalog'
assert path.is_dir()
datasets = [
p.name for p in path.iterdir() if p.match('*.md')
]
path = GithubPath('github://tensorflow/datasets/tree/master/docs/README.md')
assert path.subpath == 'docs/README.md'
assert path.repo == 'tensorflow/datasets'
assert path.branch == 'master'
```
"""
def __new__(cls, *parts: utils.PathLike) -> 'GithubPath':
full_path = '/'.join(os.fspath(p) for p in parts)
_parse_github_path(full_path)
return super().__new__(cls, full_path.replace(_URI_PREFIX, '/github/', 1))
@utils.memoized_property
def _path_str(self) -> str:
return posixpath.join(_URI_PREFIX, *self.parts[2:])
def __fspath__(self) -> str:
return self._path_str
def __str__(self) -> str: # pylint: disable=invalid-str-returned
return self._path_str
@classmethod
def from_repo(cls, repo: str, branch: str = 'master') -> 'GithubPath':
"""Factory to creates a GithubPath from a repo name.
Args:
repo: Repo name (e.g. `tensorflow/datasets`)
branch: Branch name (e.g. `master`, 'v1.2.0', '0d240e8b85c'). Default to
master.
Returns:
github_path: The repository root dir at head
"""
return cls(f'github://{repo}/tree/{branch}')
@utils.memoized_property
def _metadata(self) -> _PathMetadata:
return _PathMetadata.from_path(os.fspath(self))
@property
def subpath(self) -> str:
"""The inner path (e.g. `core/__init__.py`)."""
return self._metadata.subpath
@property
def repo(self) -> str:
"""The repository identifier (e.g. `tensorflow/datasets`)."""
return self._metadata.repo
@property
def branch(self) -> str:
"""The branch (e.g. `master`, `v2`, `43bbad116df`,...)."""
return self._metadata.branch
@property
def github_tree(self) -> _GithubTree:
return _GithubTree.from_cache(self.repo, self.branch)
def as_raw_url(self) -> str:
"""Returns the raw content url (https://raw.githubusercontent.com)."""
return ('https://raw.githubusercontent.com/'
f'{self.repo}/{self.branch}/{self.subpath}')
def as_human_friendly_url(self) -> str:
"""Returns the human friendly url."""
return f'https://github.com/{self.repo}/blob/{self.branch}/{self.subpath}'
def iterdir(self) -> Iterator['GithubPath']:
"""Yields the sub-paths."""
if not self.is_dir():
raise NotADirectoryError(f'{self.subpath} is not a directory.')
for filename in self.github_tree.files_per_folder[self.subpath]:
yield self / filename.name
def is_dir(self) -> bool:
"""Returns True if the path is a directory or submodule."""
return self.github_tree.is_folder(self.subpath)
def is_file(self) -> bool:
"""Returns True if the path is a file."""
return self.github_tree.is_file(pathlib.PurePosixPath(self.subpath))
def exists(self) -> bool:
"""Returns True if the path exists."""
return self.is_dir() or self.is_file()
def read_bytes(self) -> bytes:
"""Returns the file content as bytes."""
# As the content is fetched during the Github API calls, we could cache it
# and return it directly here, rather than using an additional query.
# However this might have significant memory impact if many `GithubPath`
# are used, so would require some additional cleanup (weakref ?).
# Using raw_url doesn't count in the API calls quota and should works with
# arbitrary sized files.
url = self.as_raw_url()
return get_content(url)
def read_text(self, encoding: Optional[str] = None) -> str:
"""Returns the file content as string."""
return self.read_bytes().decode(encoding=encoding or 'utf-8')
def copy(
self,
dst: utils.PathLike,
overwrite: bool = False,
) -> utils.ReadWritePath:
"""Copy the current file to the given destination.
Args:
dst: Target file. It can be any PathLike compatible path (e.g. `gs://...`)
overwrite: Whether the file should be overwritten or not
Returns:
The new created file.
Raises:
FileExistsError: If `overwrite` is false and destination exists.
"""
dst = utils.as_path(dst)
if not overwrite and dst.exists():
raise FileExistsError(f'Cannot copy {self}. Destination {dst} exists.')
# Otherwise, copy src to dst
dst.write_bytes(self.read_bytes())
return dst
# MASKED: _parse_github_path function (lines 328-358)
|
def _parse_github_path(path: str) -> Tuple[str, str, str]:
"""Parse the absolute github path.
Args:
path: The full github path.
Returns:
repo: The repository identifiant.
branch: Repository branch.
subpath: The inner path.
Raises:
ValueError: If the path is invalid
"""
err_msg = (f'Invalid github path: {path}. Expected format: '
'`github://<owner>/<name>/tree/<branch>[/<path>]`.')
if not path.startswith(_URI_PREFIX):
raise ValueError(err_msg)
if path.endswith('/'):
raise ValueError(err_msg + ' Trailing `/` not supported.')
parts = path[len(_URI_PREFIX):].split('/')
if len(parts) < 4:
raise ValueError(err_msg)
# 'tensorflow', 'datasets', 'tree', 'master', ...
owner, repo, tree, branch, *subpath = parts
if tree != 'tree':
raise ValueError(err_msg + '. `/blob/` isn\'t accepted. Only `/tree/`.')
return f'{owner}/{repo}', branch, '/'.join(subpath)
| 328 | 358 |
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Github pathlib-like util."""
import dataclasses
import functools
import os
import pathlib
import posixpath
from typing import Iterator, Mapping, MutableMapping, Optional, Set, Tuple
import requests
from tensorflow_datasets.core import utils
JsonValue = utils.JsonValue
_URI_PREFIX = 'github://'
def _get_token():
# Get the secret API token to avoid the 60 calls/hour limit
# To get the current quota or test the token:
# curl -H "Authorization: token ${GITHUB_TOKEN}" https://api.github.com/rate_limit # pylint: disable=line-too-long
return os.environ.get('GITHUB_TOKEN')
def get_content(url: str) -> bytes:
resp = requests.get(url)
if resp.status_code != 200:
raise FileNotFoundError(f'Request failed for {url}\n'
f' Error: {resp.status_code}\n'
f' Reason: {resp.content}')
return resp.content
class GithubApi:
"""Class to issue calls to the Github API."""
def __init__(self, token: Optional[str] = None):
self._token = token or _get_token()
def query(self, url: str) -> JsonValue:
"""Launches a Github API query and returns the result."""
headers = {}
if self._token:
headers['Authorization'] = f'token {self._token}'
resp = requests.get(url, headers=headers)
if resp.status_code != 200:
raise FileNotFoundError(
f'Request failed:\n'
f' Request: {url}\n'
f' Error: {resp.status_code}\n'
f' Reason: {resp.content}',)
return resp.json()
def query_tree(self, repo: str, branch: str) -> JsonValue:
"""Queries a repository tree.
See https://docs.github.com/en/rest/reference/git#trees
Args:
repo: the repository
branch: the branch for which to get the tree
Returns:
JSON dict with the tree.
"""
url = f'https://api.github.com/repos/{repo}/git/trees/{branch}?recursive=1'
return self.query(url)
def _correct_folder(folder: str) -> str:
"""Ensures the folder follows a standard.
Pathlib.parent in the root folder results in '.', whereas in other places
we should use '' for the root folder. This function makes sure the root
folder is always empty string.
Args:
folder: the folder to be corrected.
Returns:
The corrected folder.
"""
if folder == '.':
return ''
return folder
def _get_parent_folder(path: pathlib.PurePosixPath) -> str:
return _correct_folder(os.fspath(path.parent))
@dataclasses.dataclass(frozen=True)
class _GithubElement:
"""Representation of an element in a Github tree (a file or folder).
Attributes:
parent_folder: the folder in which this element resides.
name: the name of this element, e.g. the file name or the folder name.
is_folder: whether this element is a folder or not.
"""
parent_folder: str
name: str
is_folder: bool
@classmethod
def from_path(cls, path: pathlib.PurePosixPath,
is_folder: bool) -> '_GithubElement':
parent_folder = _get_parent_folder(path)
name = path.name
return cls(parent_folder=parent_folder, name=name, is_folder=is_folder)
@dataclasses.dataclass(frozen=True)
class _GithubTree:
"""A Github tree of a repository."""
files_per_folder: Mapping[str, Set[_GithubElement]]
def is_folder(self, path: str) -> bool:
return _correct_folder(path) in self.files_per_folder
def is_file(self, path: pathlib.PurePosixPath) -> bool:
parent_folder = _get_parent_folder(path)
files = self.files_per_folder.get(parent_folder)
if not files:
return False
file = _GithubElement(
parent_folder=parent_folder, name=path.name, is_folder=False)
return file in files
@classmethod
def from_json(cls, value) -> '_GithubTree':
"""Parses a GithubTree from the given JSON."""
if not isinstance(value, dict) or 'tree' not in value:
raise ValueError(f'Github API response not supported: {value}')
files_per_folder: MutableMapping[str, Set[str]] = {}
for element in value['tree']:
github_element = _GithubElement.from_path(
path=pathlib.PurePosixPath(element['path']),
is_folder=(element['type'] == 'tree'))
if element['type'] in {'blob', 'tree'}:
files_per_folder.setdefault(github_element.parent_folder, set())
files_per_folder[github_element.parent_folder].add(github_element)
return _GithubTree(files_per_folder=files_per_folder)
@staticmethod
@functools.lru_cache(maxsize=None)
def from_cache(repo: str, branch: str) -> '_GithubTree':
"""Factory which caches the entire Github tree."""
tree_json = GithubApi().query_tree(repo, branch)
# If the tree is truncated, then we'll need a more sophisticated method to
# retrieve the whole tree. Since this is currently not supported, it raises
# an exception.
assert not tree_json.get('truncated', False)
return _GithubTree.from_json(tree_json)
@dataclasses.dataclass(frozen=True, eq=True)
class _PathMetadata:
"""Github metadata of a file or directory."""
path: str
repo: str # e.g. `tensorflow/datasets`
branch: str # e.g. `master`
subpath: str # e.g 'core/__init__.py'
@classmethod
def from_path(cls, path: str) -> '_PathMetadata':
repo, branch, subpath = _parse_github_path(path)
return cls(path=path, repo=repo, branch=branch, subpath=subpath)
@utils.register_pathlike_cls(_URI_PREFIX)
class GithubPath(pathlib.PurePosixPath):
"""`pathlib.Path` like object for manipulating Github paths.
Example:
```
path = GithubPath.from_repo('tensorflow/datasets')
path = path / 'docs' / 'catalog'
assert path.is_dir()
datasets = [
p.name for p in path.iterdir() if p.match('*.md')
]
path = GithubPath('github://tensorflow/datasets/tree/master/docs/README.md')
assert path.subpath == 'docs/README.md'
assert path.repo == 'tensorflow/datasets'
assert path.branch == 'master'
```
"""
def __new__(cls, *parts: utils.PathLike) -> 'GithubPath':
full_path = '/'.join(os.fspath(p) for p in parts)
_parse_github_path(full_path)
return super().__new__(cls, full_path.replace(_URI_PREFIX, '/github/', 1))
@utils.memoized_property
def _path_str(self) -> str:
return posixpath.join(_URI_PREFIX, *self.parts[2:])
def __fspath__(self) -> str:
return self._path_str
def __str__(self) -> str: # pylint: disable=invalid-str-returned
return self._path_str
@classmethod
def from_repo(cls, repo: str, branch: str = 'master') -> 'GithubPath':
"""Factory to creates a GithubPath from a repo name.
Args:
repo: Repo name (e.g. `tensorflow/datasets`)
branch: Branch name (e.g. `master`, 'v1.2.0', '0d240e8b85c'). Default to
master.
Returns:
github_path: The repository root dir at head
"""
return cls(f'github://{repo}/tree/{branch}')
@utils.memoized_property
def _metadata(self) -> _PathMetadata:
return _PathMetadata.from_path(os.fspath(self))
@property
def subpath(self) -> str:
"""The inner path (e.g. `core/__init__.py`)."""
return self._metadata.subpath
@property
def repo(self) -> str:
"""The repository identifier (e.g. `tensorflow/datasets`)."""
return self._metadata.repo
@property
def branch(self) -> str:
"""The branch (e.g. `master`, `v2`, `43bbad116df`,...)."""
return self._metadata.branch
@property
def github_tree(self) -> _GithubTree:
return _GithubTree.from_cache(self.repo, self.branch)
def as_raw_url(self) -> str:
"""Returns the raw content url (https://raw.githubusercontent.com)."""
return ('https://raw.githubusercontent.com/'
f'{self.repo}/{self.branch}/{self.subpath}')
def as_human_friendly_url(self) -> str:
"""Returns the human friendly url."""
return f'https://github.com/{self.repo}/blob/{self.branch}/{self.subpath}'
def iterdir(self) -> Iterator['GithubPath']:
"""Yields the sub-paths."""
if not self.is_dir():
raise NotADirectoryError(f'{self.subpath} is not a directory.')
for filename in self.github_tree.files_per_folder[self.subpath]:
yield self / filename.name
def is_dir(self) -> bool:
"""Returns True if the path is a directory or submodule."""
return self.github_tree.is_folder(self.subpath)
def is_file(self) -> bool:
"""Returns True if the path is a file."""
return self.github_tree.is_file(pathlib.PurePosixPath(self.subpath))
def exists(self) -> bool:
"""Returns True if the path exists."""
return self.is_dir() or self.is_file()
def read_bytes(self) -> bytes:
"""Returns the file content as bytes."""
# As the content is fetched during the Github API calls, we could cache it
# and return it directly here, rather than using an additional query.
# However this might have significant memory impact if many `GithubPath`
# are used, so would require some additional cleanup (weakref ?).
# Using raw_url doesn't count in the API calls quota and should works with
# arbitrary sized files.
url = self.as_raw_url()
return get_content(url)
def read_text(self, encoding: Optional[str] = None) -> str:
"""Returns the file content as string."""
return self.read_bytes().decode(encoding=encoding or 'utf-8')
def copy(
self,
dst: utils.PathLike,
overwrite: bool = False,
) -> utils.ReadWritePath:
"""Copy the current file to the given destination.
Args:
dst: Target file. It can be any PathLike compatible path (e.g. `gs://...`)
overwrite: Whether the file should be overwritten or not
Returns:
The new created file.
Raises:
FileExistsError: If `overwrite` is false and destination exists.
"""
dst = utils.as_path(dst)
if not overwrite and dst.exists():
raise FileExistsError(f'Cannot copy {self}. Destination {dst} exists.')
# Otherwise, copy src to dst
dst.write_bytes(self.read_bytes())
return dst
def _parse_github_path(path: str) -> Tuple[str, str, str]:
"""Parse the absolute github path.
Args:
path: The full github path.
Returns:
repo: The repository identifiant.
branch: Repository branch.
subpath: The inner path.
Raises:
ValueError: If the path is invalid
"""
err_msg = (f'Invalid github path: {path}. Expected format: '
'`github://<owner>/<name>/tree/<branch>[/<path>]`.')
if not path.startswith(_URI_PREFIX):
raise ValueError(err_msg)
if path.endswith('/'):
raise ValueError(err_msg + ' Trailing `/` not supported.')
parts = path[len(_URI_PREFIX):].split('/')
if len(parts) < 4:
raise ValueError(err_msg)
# 'tensorflow', 'datasets', 'tree', 'master', ...
owner, repo, tree, branch, *subpath = parts
if tree != 'tree':
raise ValueError(err_msg + '. `/blob/` isn\'t accepted. Only `/tree/`.')
return f'{owner}/{repo}', branch, '/'.join(subpath)
|
copy
|
Copy the current file to the given destination.
Args:
dst: Target file. It can be any PathLike compatible path (e.g. `gs://...`)
overwrite: Whether the file should be overwritten or not
Returns:
The new created file.
Raises:
FileExistsError: If `overwrite` is false and destination exists.
|
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Github pathlib-like util."""
import dataclasses
import functools
import os
import pathlib
import posixpath
from typing import Iterator, Mapping, MutableMapping, Optional, Set, Tuple
import requests
from tensorflow_datasets.core import utils
JsonValue = utils.JsonValue
_URI_PREFIX = 'github://'
def _get_token():
# Get the secret API token to avoid the 60 calls/hour limit
# To get the current quota or test the token:
# curl -H "Authorization: token ${GITHUB_TOKEN}" https://api.github.com/rate_limit # pylint: disable=line-too-long
return os.environ.get('GITHUB_TOKEN')
def get_content(url: str) -> bytes:
resp = requests.get(url)
if resp.status_code != 200:
raise FileNotFoundError(f'Request failed for {url}\n'
f' Error: {resp.status_code}\n'
f' Reason: {resp.content}')
return resp.content
class GithubApi:
"""Class to issue calls to the Github API."""
def __init__(self, token: Optional[str] = None):
self._token = token or _get_token()
def query(self, url: str) -> JsonValue:
"""Launches a Github API query and returns the result."""
headers = {}
if self._token:
headers['Authorization'] = f'token {self._token}'
resp = requests.get(url, headers=headers)
if resp.status_code != 200:
raise FileNotFoundError(
f'Request failed:\n'
f' Request: {url}\n'
f' Error: {resp.status_code}\n'
f' Reason: {resp.content}',)
return resp.json()
def query_tree(self, repo: str, branch: str) -> JsonValue:
"""Queries a repository tree.
See https://docs.github.com/en/rest/reference/git#trees
Args:
repo: the repository
branch: the branch for which to get the tree
Returns:
JSON dict with the tree.
"""
url = f'https://api.github.com/repos/{repo}/git/trees/{branch}?recursive=1'
return self.query(url)
def _correct_folder(folder: str) -> str:
"""Ensures the folder follows a standard.
Pathlib.parent in the root folder results in '.', whereas in other places
we should use '' for the root folder. This function makes sure the root
folder is always empty string.
Args:
folder: the folder to be corrected.
Returns:
The corrected folder.
"""
if folder == '.':
return ''
return folder
def _get_parent_folder(path: pathlib.PurePosixPath) -> str:
return _correct_folder(os.fspath(path.parent))
@dataclasses.dataclass(frozen=True)
class _GithubElement:
"""Representation of an element in a Github tree (a file or folder).
Attributes:
parent_folder: the folder in which this element resides.
name: the name of this element, e.g. the file name or the folder name.
is_folder: whether this element is a folder or not.
"""
parent_folder: str
name: str
is_folder: bool
@classmethod
def from_path(cls, path: pathlib.PurePosixPath,
is_folder: bool) -> '_GithubElement':
parent_folder = _get_parent_folder(path)
name = path.name
return cls(parent_folder=parent_folder, name=name, is_folder=is_folder)
@dataclasses.dataclass(frozen=True)
class _GithubTree:
"""A Github tree of a repository."""
files_per_folder: Mapping[str, Set[_GithubElement]]
def is_folder(self, path: str) -> bool:
return _correct_folder(path) in self.files_per_folder
def is_file(self, path: pathlib.PurePosixPath) -> bool:
parent_folder = _get_parent_folder(path)
files = self.files_per_folder.get(parent_folder)
if not files:
return False
file = _GithubElement(
parent_folder=parent_folder, name=path.name, is_folder=False)
return file in files
@classmethod
def from_json(cls, value) -> '_GithubTree':
"""Parses a GithubTree from the given JSON."""
if not isinstance(value, dict) or 'tree' not in value:
raise ValueError(f'Github API response not supported: {value}')
files_per_folder: MutableMapping[str, Set[str]] = {}
for element in value['tree']:
github_element = _GithubElement.from_path(
path=pathlib.PurePosixPath(element['path']),
is_folder=(element['type'] == 'tree'))
if element['type'] in {'blob', 'tree'}:
files_per_folder.setdefault(github_element.parent_folder, set())
files_per_folder[github_element.parent_folder].add(github_element)
return _GithubTree(files_per_folder=files_per_folder)
@staticmethod
@functools.lru_cache(maxsize=None)
def from_cache(repo: str, branch: str) -> '_GithubTree':
"""Factory which caches the entire Github tree."""
tree_json = GithubApi().query_tree(repo, branch)
# If the tree is truncated, then we'll need a more sophisticated method to
# retrieve the whole tree. Since this is currently not supported, it raises
# an exception.
assert not tree_json.get('truncated', False)
return _GithubTree.from_json(tree_json)
@dataclasses.dataclass(frozen=True, eq=True)
class _PathMetadata:
"""Github metadata of a file or directory."""
path: str
repo: str # e.g. `tensorflow/datasets`
branch: str # e.g. `master`
subpath: str # e.g 'core/__init__.py'
@classmethod
def from_path(cls, path: str) -> '_PathMetadata':
repo, branch, subpath = _parse_github_path(path)
return cls(path=path, repo=repo, branch=branch, subpath=subpath)
@utils.register_pathlike_cls(_URI_PREFIX)
class GithubPath(pathlib.PurePosixPath):
"""`pathlib.Path` like object for manipulating Github paths.
Example:
```
path = GithubPath.from_repo('tensorflow/datasets')
path = path / 'docs' / 'catalog'
assert path.is_dir()
datasets = [
p.name for p in path.iterdir() if p.match('*.md')
]
path = GithubPath('github://tensorflow/datasets/tree/master/docs/README.md')
assert path.subpath == 'docs/README.md'
assert path.repo == 'tensorflow/datasets'
assert path.branch == 'master'
```
"""
def __new__(cls, *parts: utils.PathLike) -> 'GithubPath':
full_path = '/'.join(os.fspath(p) for p in parts)
_parse_github_path(full_path)
return super().__new__(cls, full_path.replace(_URI_PREFIX, '/github/', 1))
@utils.memoized_property
def _path_str(self) -> str:
return posixpath.join(_URI_PREFIX, *self.parts[2:])
def __fspath__(self) -> str:
return self._path_str
def __str__(self) -> str: # pylint: disable=invalid-str-returned
return self._path_str
@classmethod
def from_repo(cls, repo: str, branch: str = 'master') -> 'GithubPath':
"""Factory to creates a GithubPath from a repo name.
Args:
repo: Repo name (e.g. `tensorflow/datasets`)
branch: Branch name (e.g. `master`, 'v1.2.0', '0d240e8b85c'). Default to
master.
Returns:
github_path: The repository root dir at head
"""
return cls(f'github://{repo}/tree/{branch}')
@utils.memoized_property
def _metadata(self) -> _PathMetadata:
return _PathMetadata.from_path(os.fspath(self))
@property
def subpath(self) -> str:
"""The inner path (e.g. `core/__init__.py`)."""
return self._metadata.subpath
@property
def repo(self) -> str:
"""The repository identifier (e.g. `tensorflow/datasets`)."""
return self._metadata.repo
@property
def branch(self) -> str:
"""The branch (e.g. `master`, `v2`, `43bbad116df`,...)."""
return self._metadata.branch
@property
def github_tree(self) -> _GithubTree:
return _GithubTree.from_cache(self.repo, self.branch)
def as_raw_url(self) -> str:
"""Returns the raw content url (https://raw.githubusercontent.com)."""
return ('https://raw.githubusercontent.com/'
f'{self.repo}/{self.branch}/{self.subpath}')
def as_human_friendly_url(self) -> str:
"""Returns the human friendly url."""
return f'https://github.com/{self.repo}/blob/{self.branch}/{self.subpath}'
def iterdir(self) -> Iterator['GithubPath']:
"""Yields the sub-paths."""
if not self.is_dir():
raise NotADirectoryError(f'{self.subpath} is not a directory.')
for filename in self.github_tree.files_per_folder[self.subpath]:
yield self / filename.name
def is_dir(self) -> bool:
"""Returns True if the path is a directory or submodule."""
return self.github_tree.is_folder(self.subpath)
def is_file(self) -> bool:
"""Returns True if the path is a file."""
return self.github_tree.is_file(pathlib.PurePosixPath(self.subpath))
def exists(self) -> bool:
"""Returns True if the path exists."""
return self.is_dir() or self.is_file()
def read_bytes(self) -> bytes:
"""Returns the file content as bytes."""
# As the content is fetched during the Github API calls, we could cache it
# and return it directly here, rather than using an additional query.
# However this might have significant memory impact if many `GithubPath`
# are used, so would require some additional cleanup (weakref ?).
# Using raw_url doesn't count in the API calls quota and should works with
# arbitrary sized files.
url = self.as_raw_url()
return get_content(url)
def read_text(self, encoding: Optional[str] = None) -> str:
"""Returns the file content as string."""
return self.read_bytes().decode(encoding=encoding or 'utf-8')
# MASKED: copy function (lines 303-325)
def _parse_github_path(path: str) -> Tuple[str, str, str]:
"""Parse the absolute github path.
Args:
path: The full github path.
Returns:
repo: The repository identifiant.
branch: Repository branch.
subpath: The inner path.
Raises:
ValueError: If the path is invalid
"""
err_msg = (f'Invalid github path: {path}. Expected format: '
'`github://<owner>/<name>/tree/<branch>[/<path>]`.')
if not path.startswith(_URI_PREFIX):
raise ValueError(err_msg)
if path.endswith('/'):
raise ValueError(err_msg + ' Trailing `/` not supported.')
parts = path[len(_URI_PREFIX):].split('/')
if len(parts) < 4:
raise ValueError(err_msg)
# 'tensorflow', 'datasets', 'tree', 'master', ...
owner, repo, tree, branch, *subpath = parts
if tree != 'tree':
raise ValueError(err_msg + '. `/blob/` isn\'t accepted. Only `/tree/`.')
return f'{owner}/{repo}', branch, '/'.join(subpath)
|
def copy(
self,
dst: utils.PathLike,
overwrite: bool = False,
) -> utils.ReadWritePath:
"""Copy the current file to the given destination.
Args:
dst: Target file. It can be any PathLike compatible path (e.g. `gs://...`)
overwrite: Whether the file should be overwritten or not
Returns:
The new created file.
Raises:
FileExistsError: If `overwrite` is false and destination exists.
"""
dst = utils.as_path(dst)
if not overwrite and dst.exists():
raise FileExistsError(f'Cannot copy {self}. Destination {dst} exists.')
# Otherwise, copy src to dst
dst.write_bytes(self.read_bytes())
return dst
| 303 | 325 |
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Github pathlib-like util."""
import dataclasses
import functools
import os
import pathlib
import posixpath
from typing import Iterator, Mapping, MutableMapping, Optional, Set, Tuple
import requests
from tensorflow_datasets.core import utils
JsonValue = utils.JsonValue
_URI_PREFIX = 'github://'
def _get_token():
# Get the secret API token to avoid the 60 calls/hour limit
# To get the current quota or test the token:
# curl -H "Authorization: token ${GITHUB_TOKEN}" https://api.github.com/rate_limit # pylint: disable=line-too-long
return os.environ.get('GITHUB_TOKEN')
def get_content(url: str) -> bytes:
resp = requests.get(url)
if resp.status_code != 200:
raise FileNotFoundError(f'Request failed for {url}\n'
f' Error: {resp.status_code}\n'
f' Reason: {resp.content}')
return resp.content
class GithubApi:
"""Class to issue calls to the Github API."""
def __init__(self, token: Optional[str] = None):
self._token = token or _get_token()
def query(self, url: str) -> JsonValue:
"""Launches a Github API query and returns the result."""
headers = {}
if self._token:
headers['Authorization'] = f'token {self._token}'
resp = requests.get(url, headers=headers)
if resp.status_code != 200:
raise FileNotFoundError(
f'Request failed:\n'
f' Request: {url}\n'
f' Error: {resp.status_code}\n'
f' Reason: {resp.content}',)
return resp.json()
def query_tree(self, repo: str, branch: str) -> JsonValue:
"""Queries a repository tree.
See https://docs.github.com/en/rest/reference/git#trees
Args:
repo: the repository
branch: the branch for which to get the tree
Returns:
JSON dict with the tree.
"""
url = f'https://api.github.com/repos/{repo}/git/trees/{branch}?recursive=1'
return self.query(url)
def _correct_folder(folder: str) -> str:
"""Ensures the folder follows a standard.
Pathlib.parent in the root folder results in '.', whereas in other places
we should use '' for the root folder. This function makes sure the root
folder is always empty string.
Args:
folder: the folder to be corrected.
Returns:
The corrected folder.
"""
if folder == '.':
return ''
return folder
def _get_parent_folder(path: pathlib.PurePosixPath) -> str:
return _correct_folder(os.fspath(path.parent))
@dataclasses.dataclass(frozen=True)
class _GithubElement:
"""Representation of an element in a Github tree (a file or folder).
Attributes:
parent_folder: the folder in which this element resides.
name: the name of this element, e.g. the file name or the folder name.
is_folder: whether this element is a folder or not.
"""
parent_folder: str
name: str
is_folder: bool
@classmethod
def from_path(cls, path: pathlib.PurePosixPath,
is_folder: bool) -> '_GithubElement':
parent_folder = _get_parent_folder(path)
name = path.name
return cls(parent_folder=parent_folder, name=name, is_folder=is_folder)
@dataclasses.dataclass(frozen=True)
class _GithubTree:
"""A Github tree of a repository."""
files_per_folder: Mapping[str, Set[_GithubElement]]
def is_folder(self, path: str) -> bool:
return _correct_folder(path) in self.files_per_folder
def is_file(self, path: pathlib.PurePosixPath) -> bool:
parent_folder = _get_parent_folder(path)
files = self.files_per_folder.get(parent_folder)
if not files:
return False
file = _GithubElement(
parent_folder=parent_folder, name=path.name, is_folder=False)
return file in files
@classmethod
def from_json(cls, value) -> '_GithubTree':
"""Parses a GithubTree from the given JSON."""
if not isinstance(value, dict) or 'tree' not in value:
raise ValueError(f'Github API response not supported: {value}')
files_per_folder: MutableMapping[str, Set[str]] = {}
for element in value['tree']:
github_element = _GithubElement.from_path(
path=pathlib.PurePosixPath(element['path']),
is_folder=(element['type'] == 'tree'))
if element['type'] in {'blob', 'tree'}:
files_per_folder.setdefault(github_element.parent_folder, set())
files_per_folder[github_element.parent_folder].add(github_element)
return _GithubTree(files_per_folder=files_per_folder)
@staticmethod
@functools.lru_cache(maxsize=None)
def from_cache(repo: str, branch: str) -> '_GithubTree':
"""Factory which caches the entire Github tree."""
tree_json = GithubApi().query_tree(repo, branch)
# If the tree is truncated, then we'll need a more sophisticated method to
# retrieve the whole tree. Since this is currently not supported, it raises
# an exception.
assert not tree_json.get('truncated', False)
return _GithubTree.from_json(tree_json)
@dataclasses.dataclass(frozen=True, eq=True)
class _PathMetadata:
"""Github metadata of a file or directory."""
path: str
repo: str # e.g. `tensorflow/datasets`
branch: str # e.g. `master`
subpath: str # e.g 'core/__init__.py'
@classmethod
def from_path(cls, path: str) -> '_PathMetadata':
repo, branch, subpath = _parse_github_path(path)
return cls(path=path, repo=repo, branch=branch, subpath=subpath)
@utils.register_pathlike_cls(_URI_PREFIX)
class GithubPath(pathlib.PurePosixPath):
"""`pathlib.Path` like object for manipulating Github paths.
Example:
```
path = GithubPath.from_repo('tensorflow/datasets')
path = path / 'docs' / 'catalog'
assert path.is_dir()
datasets = [
p.name for p in path.iterdir() if p.match('*.md')
]
path = GithubPath('github://tensorflow/datasets/tree/master/docs/README.md')
assert path.subpath == 'docs/README.md'
assert path.repo == 'tensorflow/datasets'
assert path.branch == 'master'
```
"""
def __new__(cls, *parts: utils.PathLike) -> 'GithubPath':
full_path = '/'.join(os.fspath(p) for p in parts)
_parse_github_path(full_path)
return super().__new__(cls, full_path.replace(_URI_PREFIX, '/github/', 1))
@utils.memoized_property
def _path_str(self) -> str:
return posixpath.join(_URI_PREFIX, *self.parts[2:])
def __fspath__(self) -> str:
return self._path_str
def __str__(self) -> str: # pylint: disable=invalid-str-returned
return self._path_str
@classmethod
def from_repo(cls, repo: str, branch: str = 'master') -> 'GithubPath':
"""Factory to creates a GithubPath from a repo name.
Args:
repo: Repo name (e.g. `tensorflow/datasets`)
branch: Branch name (e.g. `master`, 'v1.2.0', '0d240e8b85c'). Default to
master.
Returns:
github_path: The repository root dir at head
"""
return cls(f'github://{repo}/tree/{branch}')
@utils.memoized_property
def _metadata(self) -> _PathMetadata:
return _PathMetadata.from_path(os.fspath(self))
@property
def subpath(self) -> str:
"""The inner path (e.g. `core/__init__.py`)."""
return self._metadata.subpath
@property
def repo(self) -> str:
"""The repository identifier (e.g. `tensorflow/datasets`)."""
return self._metadata.repo
@property
def branch(self) -> str:
"""The branch (e.g. `master`, `v2`, `43bbad116df`,...)."""
return self._metadata.branch
@property
def github_tree(self) -> _GithubTree:
return _GithubTree.from_cache(self.repo, self.branch)
def as_raw_url(self) -> str:
"""Returns the raw content url (https://raw.githubusercontent.com)."""
return ('https://raw.githubusercontent.com/'
f'{self.repo}/{self.branch}/{self.subpath}')
def as_human_friendly_url(self) -> str:
"""Returns the human friendly url."""
return f'https://github.com/{self.repo}/blob/{self.branch}/{self.subpath}'
def iterdir(self) -> Iterator['GithubPath']:
"""Yields the sub-paths."""
if not self.is_dir():
raise NotADirectoryError(f'{self.subpath} is not a directory.')
for filename in self.github_tree.files_per_folder[self.subpath]:
yield self / filename.name
def is_dir(self) -> bool:
"""Returns True if the path is a directory or submodule."""
return self.github_tree.is_folder(self.subpath)
def is_file(self) -> bool:
"""Returns True if the path is a file."""
return self.github_tree.is_file(pathlib.PurePosixPath(self.subpath))
def exists(self) -> bool:
"""Returns True if the path exists."""
return self.is_dir() or self.is_file()
def read_bytes(self) -> bytes:
"""Returns the file content as bytes."""
# As the content is fetched during the Github API calls, we could cache it
# and return it directly here, rather than using an additional query.
# However this might have significant memory impact if many `GithubPath`
# are used, so would require some additional cleanup (weakref ?).
# Using raw_url doesn't count in the API calls quota and should works with
# arbitrary sized files.
url = self.as_raw_url()
return get_content(url)
def read_text(self, encoding: Optional[str] = None) -> str:
"""Returns the file content as string."""
return self.read_bytes().decode(encoding=encoding or 'utf-8')
def copy(
self,
dst: utils.PathLike,
overwrite: bool = False,
) -> utils.ReadWritePath:
"""Copy the current file to the given destination.
Args:
dst: Target file. It can be any PathLike compatible path (e.g. `gs://...`)
overwrite: Whether the file should be overwritten or not
Returns:
The new created file.
Raises:
FileExistsError: If `overwrite` is false and destination exists.
"""
dst = utils.as_path(dst)
if not overwrite and dst.exists():
raise FileExistsError(f'Cannot copy {self}. Destination {dst} exists.')
# Otherwise, copy src to dst
dst.write_bytes(self.read_bytes())
return dst
def _parse_github_path(path: str) -> Tuple[str, str, str]:
"""Parse the absolute github path.
Args:
path: The full github path.
Returns:
repo: The repository identifiant.
branch: Repository branch.
subpath: The inner path.
Raises:
ValueError: If the path is invalid
"""
err_msg = (f'Invalid github path: {path}. Expected format: '
'`github://<owner>/<name>/tree/<branch>[/<path>]`.')
if not path.startswith(_URI_PREFIX):
raise ValueError(err_msg)
if path.endswith('/'):
raise ValueError(err_msg + ' Trailing `/` not supported.')
parts = path[len(_URI_PREFIX):].split('/')
if len(parts) < 4:
raise ValueError(err_msg)
# 'tensorflow', 'datasets', 'tree', 'master', ...
owner, repo, tree, branch, *subpath = parts
if tree != 'tree':
raise ValueError(err_msg + '. `/blob/` isn\'t accepted. Only `/tree/`.')
return f'{owner}/{repo}', branch, '/'.join(subpath)
|
commandstats
|
Shows command stats.
Use a negative number for bottom instead of top.
This is only for the current session.
|
from discord.ext import commands, tasks
from collections import Counter, defaultdict
from .utils import checks, db, time, formats
from .utils.paginator import CannotPaginate
import pkg_resources
import logging
import discord
import textwrap
import datetime
import traceback
import itertools
import typing
import asyncpg
import asyncio
import pygit2
import psutil
import json
import os
import re
import io
import gc
log = logging.getLogger(__name__)
LOGGING_CHANNEL = 309632009427222529
class GatewayHandler(logging.Handler):
def __init__(self, cog):
self.cog = cog
super().__init__(logging.INFO)
def filter(self, record):
return record.name == 'discord.gateway' or 'Shard ID' in record.msg or 'Websocket closed ' in record.msg
def emit(self, record):
self.cog.add_record(record)
class Commands(db.Table):
id = db.PrimaryKeyColumn()
guild_id = db.Column(db.Integer(big=True), index=True)
channel_id = db.Column(db.Integer(big=True))
author_id = db.Column(db.Integer(big=True), index=True)
used = db.Column(db.Datetime, index=True)
prefix = db.Column(db.String)
command = db.Column(db.String, index=True)
failed = db.Column(db.Boolean, index=True)
_INVITE_REGEX = re.compile(r'(?:https?:\/\/)?discord(?:\.gg|\.com|app\.com\/invite)?\/[A-Za-z0-9]+')
def censor_invite(obj, *, _regex=_INVITE_REGEX):
return _regex.sub('[censored-invite]', str(obj))
def hex_value(arg):
return int(arg, base=16)
def object_at(addr):
for o in gc.get_objects():
if id(o) == addr:
return o
return None
class Stats(commands.Cog):
"""Bot usage statistics."""
def __init__(self, bot):
self.bot = bot
self.process = psutil.Process()
self._batch_lock = asyncio.Lock(loop=bot.loop)
self._data_batch = []
self.bulk_insert_loop.add_exception_type(asyncpg.PostgresConnectionError)
self.bulk_insert_loop.start()
self._gateway_queue = asyncio.Queue(loop=bot.loop)
self.gateway_worker.start()
# This is a datetime list
self._resumes = []
# shard_id: List[datetime]
self._identifies = defaultdict(list)
def _clear_gateway_data(self):
one_week_ago = datetime.datetime.utcnow() - datetime.timedelta(days=7)
to_remove = [index for index, dt in enumerate(self._resumes) if dt < one_week_ago]
for index in reversed(to_remove):
del self._resumes[index]
for shard_id, dates in self._identifies.items():
to_remove = [index for index, dt in enumerate(dates) if dt < one_week_ago]
for index in reversed(to_remove):
del dates[index]
async def bulk_insert(self):
query = """INSERT INTO commands (guild_id, channel_id, author_id, used, prefix, command, failed)
SELECT x.guild, x.channel, x.author, x.used, x.prefix, x.command, x.failed
FROM jsonb_to_recordset($1::jsonb) AS
x(guild BIGINT, channel BIGINT, author BIGINT, used TIMESTAMP, prefix TEXT, command TEXT, failed BOOLEAN)
"""
if self._data_batch:
await self.bot.pool.execute(query, self._data_batch)
total = len(self._data_batch)
if total > 1:
log.info('Registered %s commands to the database.', total)
self._data_batch.clear()
def cog_unload(self):
self.bulk_insert_loop.stop()
self._gateway_worker.cancel()
@tasks.loop(seconds=10.0)
async def bulk_insert_loop(self):
async with self._batch_lock:
await self.bulk_insert()
@tasks.loop(seconds=0.0)
async def gateway_worker(self):
record = await self._gateway_queue.get()
await self.notify_gateway_status(record)
async def register_command(self, ctx):
if ctx.command is None:
return
command = ctx.command.qualified_name
self.bot.command_stats[command] += 1
message = ctx.message
destination = None
if ctx.guild is None:
destination = 'Private Message'
guild_id = None
else:
destination = f'#{message.channel} ({message.guild})'
guild_id = ctx.guild.id
log.info(f'{message.created_at}: {message.author} in {destination}: {message.content}')
async with self._batch_lock:
self._data_batch.append({
'guild': guild_id,
'channel': ctx.channel.id,
'author': ctx.author.id,
'used': message.created_at.isoformat(),
'prefix': ctx.prefix,
'command': command,
'failed': ctx.command_failed,
})
@commands.Cog.listener()
async def on_command_completion(self, ctx):
await self.register_command(ctx)
@commands.Cog.listener()
async def on_socket_response(self, msg):
self.bot.socket_stats[msg.get('t')] += 1
@property
def webhook(self):
wh_id, wh_token = self.bot.config.stat_webhook
hook = discord.Webhook.partial(id=wh_id, token=wh_token, adapter=discord.AsyncWebhookAdapter(self.bot.session))
return hook
async def log_error(self, *, ctx=None, extra=None):
e = discord.Embed(title='Error', colour=0xdd5f53)
e.description = f'```py\n{traceback.format_exc()}\n```'
e.add_field(name='Extra', value=extra, inline=False)
e.timestamp = datetime.datetime.utcnow()
if ctx is not None:
fmt = '{0} (ID: {0.id})'
author = fmt.format(ctx.author)
channel = fmt.format(ctx.channel)
guild = 'None' if ctx.guild is None else fmt.format(ctx.guild)
e.add_field(name='Author', value=author)
e.add_field(name='Channel', value=channel)
e.add_field(name='Guild', value=guild)
await self.webhook.send(embed=e)
# MASKED: commandstats function (lines 181-200)
@commands.command(hidden=True)
async def socketstats(self, ctx):
delta = datetime.datetime.utcnow() - self.bot.uptime
minutes = delta.total_seconds() / 60
total = sum(self.bot.socket_stats.values())
cpm = total / minutes
await ctx.send(f'{total} socket events observed ({cpm:.2f}/minute):\n{self.bot.socket_stats}')
def get_bot_uptime(self, *, brief=False):
return time.human_timedelta(self.bot.uptime, accuracy=None, brief=brief, suffix=False)
@commands.command()
async def uptime(self, ctx):
"""Tells you how long the bot has been up for."""
await ctx.send(f'Uptime: **{self.get_bot_uptime()}**')
def format_commit(self, commit):
short, _, _ = commit.message.partition('\n')
short_sha2 = commit.hex[0:6]
commit_tz = datetime.timezone(datetime.timedelta(minutes=commit.commit_time_offset))
commit_time = datetime.datetime.fromtimestamp(commit.commit_time).replace(tzinfo=commit_tz)
# [`hash`](url) message (offset)
offset = time.human_timedelta(commit_time.astimezone(datetime.timezone.utc).replace(tzinfo=None), accuracy=1)
return f'[`{short_sha2}`](https://github.com/Rapptz/RoboDanny/commit/{commit.hex}) {short} ({offset})'
def get_last_commits(self, count=3):
repo = pygit2.Repository('.git')
commits = list(itertools.islice(repo.walk(repo.head.target, pygit2.GIT_SORT_TOPOLOGICAL), count))
return '\n'.join(self.format_commit(c) for c in commits)
@commands.command()
async def about(self, ctx):
"""Tells you information about the bot itself."""
revision = self.get_last_commits()
embed = discord.Embed(description='Latest Changes:\n' + revision)
embed.title = 'Official Bot Server Invite'
embed.url = 'https://discord.gg/DWEaqMy'
embed.colour = discord.Colour.blurple()
owner = self.bot.get_user(self.bot.owner_id)
embed.set_author(name=str(owner), icon_url=owner.avatar_url)
# statistics
total_members = 0
total_online = 0
offline = discord.Status.offline
for member in self.bot.get_all_members():
total_members += 1
if member.status is not offline:
total_online += 1
total_unique = len(self.bot.users)
text = 0
voice = 0
guilds = 0
for guild in self.bot.guilds:
guilds += 1
for channel in guild.channels:
if isinstance(channel, discord.TextChannel):
text += 1
elif isinstance(channel, discord.VoiceChannel):
voice += 1
embed.add_field(name='Members', value=f'{total_members} total\n{total_unique} unique\n{total_online} unique online')
embed.add_field(name='Channels', value=f'{text + voice} total\n{text} text\n{voice} voice')
memory_usage = self.process.memory_full_info().uss / 1024**2
cpu_usage = self.process.cpu_percent() / psutil.cpu_count()
embed.add_field(name='Process', value=f'{memory_usage:.2f} MiB\n{cpu_usage:.2f}% CPU')
version = pkg_resources.get_distribution('discord.py').version
embed.add_field(name='Guilds', value=guilds)
embed.add_field(name='Commands Run', value=sum(self.bot.command_stats.values()))
embed.add_field(name='Uptime', value=self.get_bot_uptime(brief=True))
embed.set_footer(text=f'Made with discord.py v{version}', icon_url='http://i.imgur.com/5BFecvA.png')
embed.timestamp = datetime.datetime.utcnow()
await ctx.send(embed=embed)
def censor_object(self, obj):
if not isinstance(obj, str) and obj.id in self.bot.blacklist:
return '[censored]'
return censor_invite(obj)
async def show_guild_stats(self, ctx):
lookup = (
'\N{FIRST PLACE MEDAL}',
'\N{SECOND PLACE MEDAL}',
'\N{THIRD PLACE MEDAL}',
'\N{SPORTS MEDAL}',
'\N{SPORTS MEDAL}'
)
embed = discord.Embed(title='Server Command Stats', colour=discord.Colour.blurple())
# total command uses
query = "SELECT COUNT(*), MIN(used) FROM commands WHERE guild_id=$1;"
count = await ctx.db.fetchrow(query, ctx.guild.id)
embed.description = f'{count[0]} commands used.'
embed.set_footer(text='Tracking command usage since').timestamp = count[1] or datetime.datetime.utcnow()
query = """SELECT command,
COUNT(*) as "uses"
FROM commands
WHERE guild_id=$1
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)'
for (index, (command, uses)) in enumerate(records)) or 'No Commands'
embed.add_field(name='Top Commands', value=value, inline=True)
query = """SELECT command,
COUNT(*) as "uses"
FROM commands
WHERE guild_id=$1
AND used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)'
for (index, (command, uses)) in enumerate(records)) or 'No Commands.'
embed.add_field(name='Top Commands Today', value=value, inline=True)
embed.add_field(name='\u200b', value='\u200b', inline=True)
query = """SELECT author_id,
COUNT(*) AS "uses"
FROM commands
WHERE guild_id=$1
GROUP BY author_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id)
value = '\n'.join(f'{lookup[index]}: <@!{author_id}> ({uses} bot uses)'
for (index, (author_id, uses)) in enumerate(records)) or 'No bot users.'
embed.add_field(name='Top Command Users', value=value, inline=True)
query = """SELECT author_id,
COUNT(*) AS "uses"
FROM commands
WHERE guild_id=$1
AND used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY author_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id)
value = '\n'.join(f'{lookup[index]}: <@!{author_id}> ({uses} bot uses)'
for (index, (author_id, uses)) in enumerate(records)) or 'No command users.'
embed.add_field(name='Top Command Users Today', value=value, inline=True)
await ctx.send(embed=embed)
async def show_member_stats(self, ctx, member):
lookup = (
'\N{FIRST PLACE MEDAL}',
'\N{SECOND PLACE MEDAL}',
'\N{THIRD PLACE MEDAL}',
'\N{SPORTS MEDAL}',
'\N{SPORTS MEDAL}'
)
embed = discord.Embed(title='Command Stats', colour=member.colour)
embed.set_author(name=str(member), icon_url=member.avatar_url)
# total command uses
query = "SELECT COUNT(*), MIN(used) FROM commands WHERE guild_id=$1 AND author_id=$2;"
count = await ctx.db.fetchrow(query, ctx.guild.id, member.id)
embed.description = f'{count[0]} commands used.'
embed.set_footer(text='First command used').timestamp = count[1] or datetime.datetime.utcnow()
query = """SELECT command,
COUNT(*) as "uses"
FROM commands
WHERE guild_id=$1 AND author_id=$2
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id, member.id)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)'
for (index, (command, uses)) in enumerate(records)) or 'No Commands'
embed.add_field(name='Most Used Commands', value=value, inline=False)
query = """SELECT command,
COUNT(*) as "uses"
FROM commands
WHERE guild_id=$1
AND author_id=$2
AND used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id, member.id)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)'
for (index, (command, uses)) in enumerate(records)) or 'No Commands'
embed.add_field(name='Most Used Commands Today', value=value, inline=False)
await ctx.send(embed=embed)
@commands.group(invoke_without_command=True)
@commands.guild_only()
@commands.cooldown(1, 30.0, type=commands.BucketType.member)
async def stats(self, ctx, *, member: discord.Member = None):
"""Tells you command usage stats for the server or a member."""
async with ctx.typing():
if member is None:
await self.show_guild_stats(ctx)
else:
await self.show_member_stats(ctx, member)
@stats.command(name='global')
@commands.is_owner()
async def stats_global(self, ctx):
"""Global all time command statistics."""
query = "SELECT COUNT(*) FROM commands;"
total = await ctx.db.fetchrow(query)
e = discord.Embed(title='Command Stats', colour=discord.Colour.blurple())
e.description = f'{total[0]} commands used.'
lookup = (
'\N{FIRST PLACE MEDAL}',
'\N{SECOND PLACE MEDAL}',
'\N{THIRD PLACE MEDAL}',
'\N{SPORTS MEDAL}',
'\N{SPORTS MEDAL}'
)
query = """SELECT command, COUNT(*) AS "uses"
FROM commands
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)' for (index, (command, uses)) in enumerate(records))
e.add_field(name='Top Commands', value=value, inline=False)
query = """SELECT guild_id, COUNT(*) AS "uses"
FROM commands
GROUP BY guild_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = []
for (index, (guild_id, uses)) in enumerate(records):
if guild_id is None:
guild = 'Private Message'
else:
guild = self.censor_object(self.bot.get_guild(guild_id) or f'<Unknown {guild_id}>')
emoji = lookup[index]
value.append(f'{emoji}: {guild} ({uses} uses)')
e.add_field(name='Top Guilds', value='\n'.join(value), inline=False)
query = """SELECT author_id, COUNT(*) AS "uses"
FROM commands
GROUP BY author_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = []
for (index, (author_id, uses)) in enumerate(records):
user = self.censor_object(self.bot.get_user(author_id) or f'<Unknown {author_id}>')
emoji = lookup[index]
value.append(f'{emoji}: {user} ({uses} uses)')
e.add_field(name='Top Users', value='\n'.join(value), inline=False)
await ctx.send(embed=e)
@stats.command(name='today')
@commands.is_owner()
async def stats_today(self, ctx):
"""Global command statistics for the day."""
query = "SELECT failed, COUNT(*) FROM commands WHERE used > (CURRENT_TIMESTAMP - INTERVAL '1 day') GROUP BY failed;"
total = await ctx.db.fetch(query)
failed = 0
success = 0
question = 0
for state, count in total:
if state is False:
success += count
elif state is True:
failed += count
else:
question += count
e = discord.Embed(title='Last 24 Hour Command Stats', colour=discord.Colour.blurple())
e.description = f'{failed + success + question} commands used today. ' \
f'({success} succeeded, {failed} failed, {question} unknown)'
lookup = (
'\N{FIRST PLACE MEDAL}',
'\N{SECOND PLACE MEDAL}',
'\N{THIRD PLACE MEDAL}',
'\N{SPORTS MEDAL}',
'\N{SPORTS MEDAL}'
)
query = """SELECT command, COUNT(*) AS "uses"
FROM commands
WHERE used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)' for (index, (command, uses)) in enumerate(records))
e.add_field(name='Top Commands', value=value, inline=False)
query = """SELECT guild_id, COUNT(*) AS "uses"
FROM commands
WHERE used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY guild_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = []
for (index, (guild_id, uses)) in enumerate(records):
if guild_id is None:
guild = 'Private Message'
else:
guild = self.censor_object(self.bot.get_guild(guild_id) or f'<Unknown {guild_id}>')
emoji = lookup[index]
value.append(f'{emoji}: {guild} ({uses} uses)')
e.add_field(name='Top Guilds', value='\n'.join(value), inline=False)
query = """SELECT author_id, COUNT(*) AS "uses"
FROM commands
WHERE used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY author_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = []
for (index, (author_id, uses)) in enumerate(records):
user = self.censor_object(self.bot.get_user(author_id) or f'<Unknown {author_id}>')
emoji = lookup[index]
value.append(f'{emoji}: {user} ({uses} uses)')
e.add_field(name='Top Users', value='\n'.join(value), inline=False)
await ctx.send(embed=e)
async def send_guild_stats(self, e, guild):
e.add_field(name='Name', value=guild.name)
e.add_field(name='ID', value=guild.id)
e.add_field(name='Shard ID', value=guild.shard_id or 'N/A')
e.add_field(name='Owner', value=f'{guild.owner} (ID: {guild.owner.id})')
bots = sum(m.bot for m in guild.members)
total = guild.member_count
online = sum(m.status is discord.Status.online for m in guild.members)
e.add_field(name='Members', value=str(total))
e.add_field(name='Bots', value=f'{bots} ({bots/total:.2%})')
e.add_field(name='Online', value=f'{online} ({online/total:.2%})')
if guild.icon:
e.set_thumbnail(url=guild.icon_url)
if guild.me:
e.timestamp = guild.me.joined_at
await self.webhook.send(embed=e)
@stats_today.before_invoke
@stats_global.before_invoke
async def before_stats_invoke(self, ctx):
await ctx.trigger_typing()
@commands.Cog.listener()
async def on_guild_join(self, guild):
e = discord.Embed(colour=0x53dda4, title='New Guild') # green colour
await self.send_guild_stats(e, guild)
@commands.Cog.listener()
async def on_guild_remove(self, guild):
e = discord.Embed(colour=0xdd5f53, title='Left Guild') # red colour
await self.send_guild_stats(e, guild)
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
await self.register_command(ctx)
if not isinstance(error, (commands.CommandInvokeError, commands.ConversionError)):
return
error = error.original
if isinstance(error, (discord.Forbidden, discord.NotFound, CannotPaginate)):
return
e = discord.Embed(title='Command Error', colour=0xcc3366)
e.add_field(name='Name', value=ctx.command.qualified_name)
e.add_field(name='Author', value=f'{ctx.author} (ID: {ctx.author.id})')
fmt = f'Channel: {ctx.channel} (ID: {ctx.channel.id})'
if ctx.guild:
fmt = f'{fmt}\nGuild: {ctx.guild} (ID: {ctx.guild.id})'
e.add_field(name='Location', value=fmt, inline=False)
e.add_field(name='Content', value=textwrap.shorten(ctx.message.content, width=512))
exc = ''.join(traceback.format_exception(type(error), error, error.__traceback__, chain=False))
e.description = f'```py\n{exc}\n```'
e.timestamp = datetime.datetime.utcnow()
await self.webhook.send(embed=e)
@commands.Cog.listener()
async def on_socket_raw_send(self, data):
# kind of weird way to check if we're sending
# IDENTIFY or RESUME
if '"op":2' not in data and '"op":6' not in data:
return
back_to_json = json.loads(data)
if back_to_json['op'] == 2:
payload = back_to_json['d']
inner_shard = payload.get('shard', [0])
self._identifies[inner_shard[0]].append(datetime.datetime.utcnow())
else:
self._resumes.append(datetime.datetime.utcnow())
# don't want to permanently grow memory
self._clear_gateway_data()
def add_record(self, record):
# if self.bot.config.debug:
# return
self._gateway_queue.put_nowait(record)
async def notify_gateway_status(self, record):
attributes = {
'INFO': '\N{INFORMATION SOURCE}',
'WARNING': '\N{WARNING SIGN}'
}
emoji = attributes.get(record.levelname, '\N{CROSS MARK}')
dt = datetime.datetime.utcfromtimestamp(record.created)
msg = f'{emoji} `[{dt:%Y-%m-%d %H:%M:%S}] {record.message}`'
await self.webhook.send(msg, username='Gateway', avatar_url='https://i.imgur.com/4PnCKB3.png')
@commands.command(hidden=True)
@commands.is_owner()
async def bothealth(self, ctx):
"""Various bot health monitoring tools."""
# This uses a lot of private methods because there is no
# clean way of doing this otherwise.
HEALTHY = discord.Colour(value=0x43B581)
UNHEALTHY = discord.Colour(value=0xF04947)
WARNING = discord.Colour(value=0xF09E47)
total_warnings = 0
embed = discord.Embed(title='Bot Health Report', colour=HEALTHY)
# Check the connection pool health.
pool = self.bot.pool
total_waiting = len(pool._queue._getters)
current_generation = pool._generation
description = [
f'Total `Pool.acquire` Waiters: {total_waiting}',
f'Current Pool Generation: {current_generation}',
f'Connections In Use: {len(pool._holders) - pool._queue.qsize()}'
]
questionable_connections = 0
connection_value = []
for index, holder in enumerate(pool._holders, start=1):
generation = holder._generation
in_use = holder._in_use is not None
is_closed = holder._con is None or holder._con.is_closed()
display = f'gen={holder._generation} in_use={in_use} closed={is_closed}'
questionable_connections += any((in_use, generation != current_generation))
connection_value.append(f'<Holder i={index} {display}>')
joined_value = '\n'.join(connection_value)
embed.add_field(name='Connections', value=f'```py\n{joined_value}\n```', inline=False)
spam_control = self.bot.spam_control
being_spammed = [
str(key) for key, value in spam_control._cache.items()
if value._tokens == 0
]
description.append(f'Current Spammers: {", ".join(being_spammed) if being_spammed else "None"}')
description.append(f'Questionable Connections: {questionable_connections}')
total_warnings += questionable_connections
if being_spammed:
embed.colour = WARNING
total_warnings += 1
try:
task_retriever = asyncio.Task.all_tasks
except AttributeError:
# future proofing for 3.9 I guess
task_retriever = asyncio.all_tasks
else:
all_tasks = task_retriever(loop=self.bot.loop)
event_tasks = [
t for t in all_tasks
if 'Client._run_event' in repr(t) and not t.done()
]
cogs_directory = os.path.dirname(__file__)
tasks_directory = os.path.join('discord', 'ext', 'tasks', '__init__.py')
inner_tasks = [
t for t in all_tasks
if cogs_directory in repr(t) or tasks_directory in repr(t)
]
bad_inner_tasks = ", ".join(hex(id(t)) for t in inner_tasks if t.done() and t._exception is not None)
total_warnings += bool(bad_inner_tasks)
embed.add_field(name='Inner Tasks', value=f'Total: {len(inner_tasks)}\nFailed: {bad_inner_tasks or "None"}')
embed.add_field(name='Events Waiting', value=f'Total: {len(event_tasks)}', inline=False)
command_waiters = len(self._data_batch)
is_locked = self._batch_lock.locked()
description.append(f'Commands Waiting: {command_waiters}, Batch Locked: {is_locked}')
# RESUME/IDENTIFY data
yesterday = datetime.datetime.utcnow() - datetime.timedelta(days=1)
total_resumes = sum(1 for dt in self._resumes if dt > yesterday)
identifies = {
shard_id: sum(1 for dt in dates if dt > yesterday)
for shard_id, dates in self._identifies.items()
}
absolute_total_identifies = sum(identifies.values())
resume_info_builder = [
f'Total RESUMEs: {total_resumes}',
f'Total IDENTIFYs: {absolute_total_identifies}'
]
for shard_id, total in identifies.items():
resume_info_builder.append(f'Shard ID {shard_id} IDENTIFYs: {total}')
if absolute_total_identifies >= (len(self.bot.shards) * 5):
total_warnings += 1
embed.colour = WARNING
embed.add_field(name='Gateway (last 24 hours)', value='\n'.join(resume_info_builder), inline=False)
memory_usage = self.process.memory_full_info().uss / 1024**2
cpu_usage = self.process.cpu_percent() / psutil.cpu_count()
embed.add_field(name='Process', value=f'{memory_usage:.2f} MiB\n{cpu_usage:.2f}% CPU', inline=False)
global_rate_limit = not self.bot.http._global_over.is_set()
description.append(f'Global Rate Limit: {global_rate_limit}')
if command_waiters >= 8:
total_warnings += 1
embed.colour = WARNING
if global_rate_limit or total_warnings >= 9:
embed.colour = UNHEALTHY
embed.set_footer(text=f'{total_warnings} warning(s)')
embed.description = '\n'.join(description)
await ctx.send(embed=embed)
@commands.command(hidden=True, aliases=['cancel_task'])
@commands.is_owner()
async def debug_task(self, ctx, memory_id: hex_value):
"""Debug a task by a memory location."""
task = object_at(memory_id)
if task is None or not isinstance(task, asyncio.Task):
return await ctx.send(f'Could not find Task object at {hex(memory_id)}.')
if ctx.invoked_with == 'cancel_task':
task.cancel()
return await ctx.send(f'Cancelled task object {task!r}.')
paginator = commands.Paginator(prefix='```py')
fp = io.StringIO()
frames = len(task.get_stack())
paginator.add_line(f'# Total Frames: {frames}')
task.print_stack(file=fp)
for line in fp.getvalue().splitlines():
paginator.add_line(line)
for page in paginator.pages:
await ctx.send(page)
async def tabulate_query(self, ctx, query, *args):
records = await ctx.db.fetch(query, *args)
if len(records) == 0:
return await ctx.send('No results found.')
headers = list(records[0].keys())
table = formats.TabularData()
table.set_columns(headers)
table.add_rows(list(r.values()) for r in records)
render = table.render()
fmt = f'```\n{render}\n```'
if len(fmt) > 2000:
fp = io.BytesIO(fmt.encode('utf-8'))
await ctx.send('Too many results...', file=discord.File(fp, 'results.txt'))
else:
await ctx.send(fmt)
@commands.group(hidden=True, invoke_without_command=True)
@commands.is_owner()
async def command_history(self, ctx):
"""Command history."""
query = """SELECT
CASE failed
WHEN TRUE THEN command || ' [!]'
ELSE command
END AS "command",
to_char(used, 'Mon DD HH12:MI:SS AM') AS "invoked",
author_id,
guild_id
FROM commands
ORDER BY used DESC
LIMIT 15;
"""
await self.tabulate_query(ctx, query)
@command_history.command(name='for')
@commands.is_owner()
async def command_history_for(self, ctx, days: typing.Optional[int] = 7, *, command: str):
"""Command history for a command."""
query = """SELECT *, t.success + t.failed AS "total"
FROM (
SELECT guild_id,
SUM(CASE WHEN failed THEN 0 ELSE 1 END) AS "success",
SUM(CASE WHEN failed THEN 1 ELSE 0 END) AS "failed"
FROM commands
WHERE command=$1
AND used > (CURRENT_TIMESTAMP - $2::interval)
GROUP BY guild_id
) AS t
ORDER BY "total" DESC
LIMIT 30;
"""
await self.tabulate_query(ctx, query, command, datetime.timedelta(days=days))
@command_history.command(name='guild', aliases=['server'])
@commands.is_owner()
async def command_history_guild(self, ctx, guild_id: int):
"""Command history for a guild."""
query = """SELECT
CASE failed
WHEN TRUE THEN command || ' [!]'
ELSE command
END AS "command",
channel_id,
author_id,
used
FROM commands
WHERE guild_id=$1
ORDER BY used DESC
LIMIT 15;
"""
await self.tabulate_query(ctx, query, guild_id)
@command_history.command(name='user', aliases=['member'])
@commands.is_owner()
async def command_history_user(self, ctx, user_id: int):
"""Command history for a user."""
query = """SELECT
CASE failed
WHEN TRUE THEN command || ' [!]'
ELSE command
END AS "command",
guild_id,
used
FROM commands
WHERE author_id=$1
ORDER BY used DESC
LIMIT 20;
"""
await self.tabulate_query(ctx, query, user_id)
@command_history.command(name='log')
@commands.is_owner()
async def command_history_log(self, ctx, days=7):
"""Command history log for the last N days."""
query = """SELECT command, COUNT(*)
FROM commands
WHERE used > (CURRENT_TIMESTAMP - $1::interval)
GROUP BY command
ORDER BY 2 DESC
"""
all_commands = {
c.qualified_name: 0
for c in self.bot.walk_commands()
}
records = await ctx.db.fetch(query, datetime.timedelta(days=days))
for name, uses in records:
if name in all_commands:
all_commands[name] = uses
as_data = sorted(all_commands.items(), key=lambda t: t[1], reverse=True)
table = formats.TabularData()
table.set_columns(['Command', 'Uses'])
table.add_rows(tup for tup in as_data)
render = table.render()
embed = discord.Embed(title='Summary', colour=discord.Colour.green())
embed.set_footer(text='Since').timestamp = datetime.datetime.utcnow() - datetime.timedelta(days=days)
top_ten = '\n'.join(f'{command}: {uses}' for command, uses in records[:10])
bottom_ten = '\n'.join(f'{command}: {uses}' for command, uses in records[-10:])
embed.add_field(name='Top 10', value=top_ten)
embed.add_field(name='Bottom 10', value=bottom_ten)
unused = ', '.join(name for name, uses in as_data if uses == 0)
if len(unused) > 1024:
unused = 'Way too many...'
embed.add_field(name='Unused', value=unused, inline=False)
await ctx.send(embed=embed, file=discord.File(io.BytesIO(render.encode()), filename='full_results.txt'))
@command_history.command(name='cog')
@commands.is_owner()
async def command_history_cog(self, ctx, days: typing.Optional[int] = 7, *, cog: str = None):
"""Command history for a cog or grouped by a cog."""
interval = datetime.timedelta(days=days)
if cog is not None:
cog = self.bot.get_cog(cog)
if cog is None:
return await ctx.send(f'Unknown cog: {cog}')
query = """SELECT *, t.success + t.failed AS "total"
FROM (
SELECT command,
SUM(CASE WHEN failed THEN 0 ELSE 1 END) AS "success",
SUM(CASE WHEN failed THEN 1 ELSE 0 END) AS "failed"
FROM commands
WHERE command = any($1::text[])
AND used > (CURRENT_TIMESTAMP - $2::interval)
GROUP BY command
) AS t
ORDER BY "total" DESC
LIMIT 30;
"""
return await self.tabulate_query(ctx, query, [c.qualified_name for c in cog.walk_commands()], interval)
# A more manual query with a manual grouper.
query = """SELECT *, t.success + t.failed AS "total"
FROM (
SELECT command,
SUM(CASE WHEN failed THEN 0 ELSE 1 END) AS "success",
SUM(CASE WHEN failed THEN 1 ELSE 0 END) AS "failed"
FROM commands
WHERE used > (CURRENT_TIMESTAMP - $1::interval)
GROUP BY command
) AS t;
"""
class Count:
__slots__ = ('success', 'failed', 'total')
def __init__(self):
self.success = 0
self.failed = 0
self.total = 0
def add(self, record):
self.success += record['success']
self.failed += record['failed']
self.total += record['total']
data = defaultdict(Count)
records = await ctx.db.fetch(query, interval)
for record in records:
command = self.bot.get_command(record['command'])
if command is None or command.cog is None:
data['No Cog'].add(record)
else:
data[command.cog.qualified_name].add(record)
table = formats.TabularData()
table.set_columns(['Cog', 'Success', 'Failed', 'Total'])
data = sorted([
(cog, e.success, e.failed, e.total)
for cog, e in data.items()
], key=lambda t: t[-1], reverse=True)
table.add_rows(data)
render = table.render()
await ctx.safe_send(f'```\n{render}\n```')
old_on_error = commands.AutoShardedBot.on_error
async def on_error(self, event, *args, **kwargs):
e = discord.Embed(title='Event Error', colour=0xa32952)
e.add_field(name='Event', value=event)
e.description = f'```py\n{traceback.format_exc()}\n```'
e.timestamp = datetime.datetime.utcnow()
args_str = ['```py']
for index, arg in enumerate(args):
args_str.append(f'[{index}]: {arg!r}')
args_str.append('```')
e.add_field(name='Args', value='\n'.join(args_str), inline=False)
hook = self.get_cog('Stats').webhook
try:
await hook.send(embed=e)
except:
pass
def setup(bot):
if not hasattr(bot, 'command_stats'):
bot.command_stats = Counter()
if not hasattr(bot, 'socket_stats'):
bot.socket_stats = Counter()
cog = Stats(bot)
bot.add_cog(cog)
bot._stats_cog_gateway_handler = handler = GatewayHandler(cog)
logging.getLogger().addHandler(handler)
commands.AutoShardedBot.on_error = on_error
def teardown(bot):
commands.AutoShardedBot.on_error = old_on_error
logging.getLogger().removeHandler(bot._stats_cog_gateway_handler)
del bot._stats_cog_gateway_handler
|
@commands.command(hidden=True)
@commands.is_owner()
async def commandstats(self, ctx, limit=20):
"""Shows command stats.
Use a negative number for bottom instead of top.
This is only for the current session.
"""
counter = self.bot.command_stats
width = len(max(counter, key=len))
total = sum(counter.values())
if limit > 0:
common = counter.most_common(limit)
else:
common = counter.most_common()[limit:]
output = '\n'.join(f'{k:<{width}}: {c}' for k, c in common)
await ctx.send(f'```\n{output}\n```')
| 181 | 200 |
from discord.ext import commands, tasks
from collections import Counter, defaultdict
from .utils import checks, db, time, formats
from .utils.paginator import CannotPaginate
import pkg_resources
import logging
import discord
import textwrap
import datetime
import traceback
import itertools
import typing
import asyncpg
import asyncio
import pygit2
import psutil
import json
import os
import re
import io
import gc
log = logging.getLogger(__name__)
LOGGING_CHANNEL = 309632009427222529
class GatewayHandler(logging.Handler):
def __init__(self, cog):
self.cog = cog
super().__init__(logging.INFO)
def filter(self, record):
return record.name == 'discord.gateway' or 'Shard ID' in record.msg or 'Websocket closed ' in record.msg
def emit(self, record):
self.cog.add_record(record)
class Commands(db.Table):
id = db.PrimaryKeyColumn()
guild_id = db.Column(db.Integer(big=True), index=True)
channel_id = db.Column(db.Integer(big=True))
author_id = db.Column(db.Integer(big=True), index=True)
used = db.Column(db.Datetime, index=True)
prefix = db.Column(db.String)
command = db.Column(db.String, index=True)
failed = db.Column(db.Boolean, index=True)
_INVITE_REGEX = re.compile(r'(?:https?:\/\/)?discord(?:\.gg|\.com|app\.com\/invite)?\/[A-Za-z0-9]+')
def censor_invite(obj, *, _regex=_INVITE_REGEX):
return _regex.sub('[censored-invite]', str(obj))
def hex_value(arg):
return int(arg, base=16)
def object_at(addr):
for o in gc.get_objects():
if id(o) == addr:
return o
return None
class Stats(commands.Cog):
"""Bot usage statistics."""
def __init__(self, bot):
self.bot = bot
self.process = psutil.Process()
self._batch_lock = asyncio.Lock(loop=bot.loop)
self._data_batch = []
self.bulk_insert_loop.add_exception_type(asyncpg.PostgresConnectionError)
self.bulk_insert_loop.start()
self._gateway_queue = asyncio.Queue(loop=bot.loop)
self.gateway_worker.start()
# This is a datetime list
self._resumes = []
# shard_id: List[datetime]
self._identifies = defaultdict(list)
def _clear_gateway_data(self):
one_week_ago = datetime.datetime.utcnow() - datetime.timedelta(days=7)
to_remove = [index for index, dt in enumerate(self._resumes) if dt < one_week_ago]
for index in reversed(to_remove):
del self._resumes[index]
for shard_id, dates in self._identifies.items():
to_remove = [index for index, dt in enumerate(dates) if dt < one_week_ago]
for index in reversed(to_remove):
del dates[index]
async def bulk_insert(self):
query = """INSERT INTO commands (guild_id, channel_id, author_id, used, prefix, command, failed)
SELECT x.guild, x.channel, x.author, x.used, x.prefix, x.command, x.failed
FROM jsonb_to_recordset($1::jsonb) AS
x(guild BIGINT, channel BIGINT, author BIGINT, used TIMESTAMP, prefix TEXT, command TEXT, failed BOOLEAN)
"""
if self._data_batch:
await self.bot.pool.execute(query, self._data_batch)
total = len(self._data_batch)
if total > 1:
log.info('Registered %s commands to the database.', total)
self._data_batch.clear()
def cog_unload(self):
self.bulk_insert_loop.stop()
self._gateway_worker.cancel()
@tasks.loop(seconds=10.0)
async def bulk_insert_loop(self):
async with self._batch_lock:
await self.bulk_insert()
@tasks.loop(seconds=0.0)
async def gateway_worker(self):
record = await self._gateway_queue.get()
await self.notify_gateway_status(record)
async def register_command(self, ctx):
if ctx.command is None:
return
command = ctx.command.qualified_name
self.bot.command_stats[command] += 1
message = ctx.message
destination = None
if ctx.guild is None:
destination = 'Private Message'
guild_id = None
else:
destination = f'#{message.channel} ({message.guild})'
guild_id = ctx.guild.id
log.info(f'{message.created_at}: {message.author} in {destination}: {message.content}')
async with self._batch_lock:
self._data_batch.append({
'guild': guild_id,
'channel': ctx.channel.id,
'author': ctx.author.id,
'used': message.created_at.isoformat(),
'prefix': ctx.prefix,
'command': command,
'failed': ctx.command_failed,
})
@commands.Cog.listener()
async def on_command_completion(self, ctx):
await self.register_command(ctx)
@commands.Cog.listener()
async def on_socket_response(self, msg):
self.bot.socket_stats[msg.get('t')] += 1
@property
def webhook(self):
wh_id, wh_token = self.bot.config.stat_webhook
hook = discord.Webhook.partial(id=wh_id, token=wh_token, adapter=discord.AsyncWebhookAdapter(self.bot.session))
return hook
async def log_error(self, *, ctx=None, extra=None):
e = discord.Embed(title='Error', colour=0xdd5f53)
e.description = f'```py\n{traceback.format_exc()}\n```'
e.add_field(name='Extra', value=extra, inline=False)
e.timestamp = datetime.datetime.utcnow()
if ctx is not None:
fmt = '{0} (ID: {0.id})'
author = fmt.format(ctx.author)
channel = fmt.format(ctx.channel)
guild = 'None' if ctx.guild is None else fmt.format(ctx.guild)
e.add_field(name='Author', value=author)
e.add_field(name='Channel', value=channel)
e.add_field(name='Guild', value=guild)
await self.webhook.send(embed=e)
@commands.command(hidden=True)
@commands.is_owner()
async def commandstats(self, ctx, limit=20):
"""Shows command stats.
Use a negative number for bottom instead of top.
This is only for the current session.
"""
counter = self.bot.command_stats
width = len(max(counter, key=len))
total = sum(counter.values())
if limit > 0:
common = counter.most_common(limit)
else:
common = counter.most_common()[limit:]
output = '\n'.join(f'{k:<{width}}: {c}' for k, c in common)
await ctx.send(f'```\n{output}\n```')
@commands.command(hidden=True)
async def socketstats(self, ctx):
delta = datetime.datetime.utcnow() - self.bot.uptime
minutes = delta.total_seconds() / 60
total = sum(self.bot.socket_stats.values())
cpm = total / minutes
await ctx.send(f'{total} socket events observed ({cpm:.2f}/minute):\n{self.bot.socket_stats}')
def get_bot_uptime(self, *, brief=False):
return time.human_timedelta(self.bot.uptime, accuracy=None, brief=brief, suffix=False)
@commands.command()
async def uptime(self, ctx):
"""Tells you how long the bot has been up for."""
await ctx.send(f'Uptime: **{self.get_bot_uptime()}**')
def format_commit(self, commit):
short, _, _ = commit.message.partition('\n')
short_sha2 = commit.hex[0:6]
commit_tz = datetime.timezone(datetime.timedelta(minutes=commit.commit_time_offset))
commit_time = datetime.datetime.fromtimestamp(commit.commit_time).replace(tzinfo=commit_tz)
# [`hash`](url) message (offset)
offset = time.human_timedelta(commit_time.astimezone(datetime.timezone.utc).replace(tzinfo=None), accuracy=1)
return f'[`{short_sha2}`](https://github.com/Rapptz/RoboDanny/commit/{commit.hex}) {short} ({offset})'
def get_last_commits(self, count=3):
repo = pygit2.Repository('.git')
commits = list(itertools.islice(repo.walk(repo.head.target, pygit2.GIT_SORT_TOPOLOGICAL), count))
return '\n'.join(self.format_commit(c) for c in commits)
@commands.command()
async def about(self, ctx):
"""Tells you information about the bot itself."""
revision = self.get_last_commits()
embed = discord.Embed(description='Latest Changes:\n' + revision)
embed.title = 'Official Bot Server Invite'
embed.url = 'https://discord.gg/DWEaqMy'
embed.colour = discord.Colour.blurple()
owner = self.bot.get_user(self.bot.owner_id)
embed.set_author(name=str(owner), icon_url=owner.avatar_url)
# statistics
total_members = 0
total_online = 0
offline = discord.Status.offline
for member in self.bot.get_all_members():
total_members += 1
if member.status is not offline:
total_online += 1
total_unique = len(self.bot.users)
text = 0
voice = 0
guilds = 0
for guild in self.bot.guilds:
guilds += 1
for channel in guild.channels:
if isinstance(channel, discord.TextChannel):
text += 1
elif isinstance(channel, discord.VoiceChannel):
voice += 1
embed.add_field(name='Members', value=f'{total_members} total\n{total_unique} unique\n{total_online} unique online')
embed.add_field(name='Channels', value=f'{text + voice} total\n{text} text\n{voice} voice')
memory_usage = self.process.memory_full_info().uss / 1024**2
cpu_usage = self.process.cpu_percent() / psutil.cpu_count()
embed.add_field(name='Process', value=f'{memory_usage:.2f} MiB\n{cpu_usage:.2f}% CPU')
version = pkg_resources.get_distribution('discord.py').version
embed.add_field(name='Guilds', value=guilds)
embed.add_field(name='Commands Run', value=sum(self.bot.command_stats.values()))
embed.add_field(name='Uptime', value=self.get_bot_uptime(brief=True))
embed.set_footer(text=f'Made with discord.py v{version}', icon_url='http://i.imgur.com/5BFecvA.png')
embed.timestamp = datetime.datetime.utcnow()
await ctx.send(embed=embed)
def censor_object(self, obj):
if not isinstance(obj, str) and obj.id in self.bot.blacklist:
return '[censored]'
return censor_invite(obj)
async def show_guild_stats(self, ctx):
lookup = (
'\N{FIRST PLACE MEDAL}',
'\N{SECOND PLACE MEDAL}',
'\N{THIRD PLACE MEDAL}',
'\N{SPORTS MEDAL}',
'\N{SPORTS MEDAL}'
)
embed = discord.Embed(title='Server Command Stats', colour=discord.Colour.blurple())
# total command uses
query = "SELECT COUNT(*), MIN(used) FROM commands WHERE guild_id=$1;"
count = await ctx.db.fetchrow(query, ctx.guild.id)
embed.description = f'{count[0]} commands used.'
embed.set_footer(text='Tracking command usage since').timestamp = count[1] or datetime.datetime.utcnow()
query = """SELECT command,
COUNT(*) as "uses"
FROM commands
WHERE guild_id=$1
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)'
for (index, (command, uses)) in enumerate(records)) or 'No Commands'
embed.add_field(name='Top Commands', value=value, inline=True)
query = """SELECT command,
COUNT(*) as "uses"
FROM commands
WHERE guild_id=$1
AND used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)'
for (index, (command, uses)) in enumerate(records)) or 'No Commands.'
embed.add_field(name='Top Commands Today', value=value, inline=True)
embed.add_field(name='\u200b', value='\u200b', inline=True)
query = """SELECT author_id,
COUNT(*) AS "uses"
FROM commands
WHERE guild_id=$1
GROUP BY author_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id)
value = '\n'.join(f'{lookup[index]}: <@!{author_id}> ({uses} bot uses)'
for (index, (author_id, uses)) in enumerate(records)) or 'No bot users.'
embed.add_field(name='Top Command Users', value=value, inline=True)
query = """SELECT author_id,
COUNT(*) AS "uses"
FROM commands
WHERE guild_id=$1
AND used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY author_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id)
value = '\n'.join(f'{lookup[index]}: <@!{author_id}> ({uses} bot uses)'
for (index, (author_id, uses)) in enumerate(records)) or 'No command users.'
embed.add_field(name='Top Command Users Today', value=value, inline=True)
await ctx.send(embed=embed)
async def show_member_stats(self, ctx, member):
lookup = (
'\N{FIRST PLACE MEDAL}',
'\N{SECOND PLACE MEDAL}',
'\N{THIRD PLACE MEDAL}',
'\N{SPORTS MEDAL}',
'\N{SPORTS MEDAL}'
)
embed = discord.Embed(title='Command Stats', colour=member.colour)
embed.set_author(name=str(member), icon_url=member.avatar_url)
# total command uses
query = "SELECT COUNT(*), MIN(used) FROM commands WHERE guild_id=$1 AND author_id=$2;"
count = await ctx.db.fetchrow(query, ctx.guild.id, member.id)
embed.description = f'{count[0]} commands used.'
embed.set_footer(text='First command used').timestamp = count[1] or datetime.datetime.utcnow()
query = """SELECT command,
COUNT(*) as "uses"
FROM commands
WHERE guild_id=$1 AND author_id=$2
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id, member.id)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)'
for (index, (command, uses)) in enumerate(records)) or 'No Commands'
embed.add_field(name='Most Used Commands', value=value, inline=False)
query = """SELECT command,
COUNT(*) as "uses"
FROM commands
WHERE guild_id=$1
AND author_id=$2
AND used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id, member.id)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)'
for (index, (command, uses)) in enumerate(records)) or 'No Commands'
embed.add_field(name='Most Used Commands Today', value=value, inline=False)
await ctx.send(embed=embed)
@commands.group(invoke_without_command=True)
@commands.guild_only()
@commands.cooldown(1, 30.0, type=commands.BucketType.member)
async def stats(self, ctx, *, member: discord.Member = None):
"""Tells you command usage stats for the server or a member."""
async with ctx.typing():
if member is None:
await self.show_guild_stats(ctx)
else:
await self.show_member_stats(ctx, member)
@stats.command(name='global')
@commands.is_owner()
async def stats_global(self, ctx):
"""Global all time command statistics."""
query = "SELECT COUNT(*) FROM commands;"
total = await ctx.db.fetchrow(query)
e = discord.Embed(title='Command Stats', colour=discord.Colour.blurple())
e.description = f'{total[0]} commands used.'
lookup = (
'\N{FIRST PLACE MEDAL}',
'\N{SECOND PLACE MEDAL}',
'\N{THIRD PLACE MEDAL}',
'\N{SPORTS MEDAL}',
'\N{SPORTS MEDAL}'
)
query = """SELECT command, COUNT(*) AS "uses"
FROM commands
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)' for (index, (command, uses)) in enumerate(records))
e.add_field(name='Top Commands', value=value, inline=False)
query = """SELECT guild_id, COUNT(*) AS "uses"
FROM commands
GROUP BY guild_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = []
for (index, (guild_id, uses)) in enumerate(records):
if guild_id is None:
guild = 'Private Message'
else:
guild = self.censor_object(self.bot.get_guild(guild_id) or f'<Unknown {guild_id}>')
emoji = lookup[index]
value.append(f'{emoji}: {guild} ({uses} uses)')
e.add_field(name='Top Guilds', value='\n'.join(value), inline=False)
query = """SELECT author_id, COUNT(*) AS "uses"
FROM commands
GROUP BY author_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = []
for (index, (author_id, uses)) in enumerate(records):
user = self.censor_object(self.bot.get_user(author_id) or f'<Unknown {author_id}>')
emoji = lookup[index]
value.append(f'{emoji}: {user} ({uses} uses)')
e.add_field(name='Top Users', value='\n'.join(value), inline=False)
await ctx.send(embed=e)
@stats.command(name='today')
@commands.is_owner()
async def stats_today(self, ctx):
"""Global command statistics for the day."""
query = "SELECT failed, COUNT(*) FROM commands WHERE used > (CURRENT_TIMESTAMP - INTERVAL '1 day') GROUP BY failed;"
total = await ctx.db.fetch(query)
failed = 0
success = 0
question = 0
for state, count in total:
if state is False:
success += count
elif state is True:
failed += count
else:
question += count
e = discord.Embed(title='Last 24 Hour Command Stats', colour=discord.Colour.blurple())
e.description = f'{failed + success + question} commands used today. ' \
f'({success} succeeded, {failed} failed, {question} unknown)'
lookup = (
'\N{FIRST PLACE MEDAL}',
'\N{SECOND PLACE MEDAL}',
'\N{THIRD PLACE MEDAL}',
'\N{SPORTS MEDAL}',
'\N{SPORTS MEDAL}'
)
query = """SELECT command, COUNT(*) AS "uses"
FROM commands
WHERE used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)' for (index, (command, uses)) in enumerate(records))
e.add_field(name='Top Commands', value=value, inline=False)
query = """SELECT guild_id, COUNT(*) AS "uses"
FROM commands
WHERE used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY guild_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = []
for (index, (guild_id, uses)) in enumerate(records):
if guild_id is None:
guild = 'Private Message'
else:
guild = self.censor_object(self.bot.get_guild(guild_id) or f'<Unknown {guild_id}>')
emoji = lookup[index]
value.append(f'{emoji}: {guild} ({uses} uses)')
e.add_field(name='Top Guilds', value='\n'.join(value), inline=False)
query = """SELECT author_id, COUNT(*) AS "uses"
FROM commands
WHERE used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY author_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = []
for (index, (author_id, uses)) in enumerate(records):
user = self.censor_object(self.bot.get_user(author_id) or f'<Unknown {author_id}>')
emoji = lookup[index]
value.append(f'{emoji}: {user} ({uses} uses)')
e.add_field(name='Top Users', value='\n'.join(value), inline=False)
await ctx.send(embed=e)
async def send_guild_stats(self, e, guild):
e.add_field(name='Name', value=guild.name)
e.add_field(name='ID', value=guild.id)
e.add_field(name='Shard ID', value=guild.shard_id or 'N/A')
e.add_field(name='Owner', value=f'{guild.owner} (ID: {guild.owner.id})')
bots = sum(m.bot for m in guild.members)
total = guild.member_count
online = sum(m.status is discord.Status.online for m in guild.members)
e.add_field(name='Members', value=str(total))
e.add_field(name='Bots', value=f'{bots} ({bots/total:.2%})')
e.add_field(name='Online', value=f'{online} ({online/total:.2%})')
if guild.icon:
e.set_thumbnail(url=guild.icon_url)
if guild.me:
e.timestamp = guild.me.joined_at
await self.webhook.send(embed=e)
@stats_today.before_invoke
@stats_global.before_invoke
async def before_stats_invoke(self, ctx):
await ctx.trigger_typing()
@commands.Cog.listener()
async def on_guild_join(self, guild):
e = discord.Embed(colour=0x53dda4, title='New Guild') # green colour
await self.send_guild_stats(e, guild)
@commands.Cog.listener()
async def on_guild_remove(self, guild):
e = discord.Embed(colour=0xdd5f53, title='Left Guild') # red colour
await self.send_guild_stats(e, guild)
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
await self.register_command(ctx)
if not isinstance(error, (commands.CommandInvokeError, commands.ConversionError)):
return
error = error.original
if isinstance(error, (discord.Forbidden, discord.NotFound, CannotPaginate)):
return
e = discord.Embed(title='Command Error', colour=0xcc3366)
e.add_field(name='Name', value=ctx.command.qualified_name)
e.add_field(name='Author', value=f'{ctx.author} (ID: {ctx.author.id})')
fmt = f'Channel: {ctx.channel} (ID: {ctx.channel.id})'
if ctx.guild:
fmt = f'{fmt}\nGuild: {ctx.guild} (ID: {ctx.guild.id})'
e.add_field(name='Location', value=fmt, inline=False)
e.add_field(name='Content', value=textwrap.shorten(ctx.message.content, width=512))
exc = ''.join(traceback.format_exception(type(error), error, error.__traceback__, chain=False))
e.description = f'```py\n{exc}\n```'
e.timestamp = datetime.datetime.utcnow()
await self.webhook.send(embed=e)
@commands.Cog.listener()
async def on_socket_raw_send(self, data):
# kind of weird way to check if we're sending
# IDENTIFY or RESUME
if '"op":2' not in data and '"op":6' not in data:
return
back_to_json = json.loads(data)
if back_to_json['op'] == 2:
payload = back_to_json['d']
inner_shard = payload.get('shard', [0])
self._identifies[inner_shard[0]].append(datetime.datetime.utcnow())
else:
self._resumes.append(datetime.datetime.utcnow())
# don't want to permanently grow memory
self._clear_gateway_data()
def add_record(self, record):
# if self.bot.config.debug:
# return
self._gateway_queue.put_nowait(record)
async def notify_gateway_status(self, record):
attributes = {
'INFO': '\N{INFORMATION SOURCE}',
'WARNING': '\N{WARNING SIGN}'
}
emoji = attributes.get(record.levelname, '\N{CROSS MARK}')
dt = datetime.datetime.utcfromtimestamp(record.created)
msg = f'{emoji} `[{dt:%Y-%m-%d %H:%M:%S}] {record.message}`'
await self.webhook.send(msg, username='Gateway', avatar_url='https://i.imgur.com/4PnCKB3.png')
@commands.command(hidden=True)
@commands.is_owner()
async def bothealth(self, ctx):
"""Various bot health monitoring tools."""
# This uses a lot of private methods because there is no
# clean way of doing this otherwise.
HEALTHY = discord.Colour(value=0x43B581)
UNHEALTHY = discord.Colour(value=0xF04947)
WARNING = discord.Colour(value=0xF09E47)
total_warnings = 0
embed = discord.Embed(title='Bot Health Report', colour=HEALTHY)
# Check the connection pool health.
pool = self.bot.pool
total_waiting = len(pool._queue._getters)
current_generation = pool._generation
description = [
f'Total `Pool.acquire` Waiters: {total_waiting}',
f'Current Pool Generation: {current_generation}',
f'Connections In Use: {len(pool._holders) - pool._queue.qsize()}'
]
questionable_connections = 0
connection_value = []
for index, holder in enumerate(pool._holders, start=1):
generation = holder._generation
in_use = holder._in_use is not None
is_closed = holder._con is None or holder._con.is_closed()
display = f'gen={holder._generation} in_use={in_use} closed={is_closed}'
questionable_connections += any((in_use, generation != current_generation))
connection_value.append(f'<Holder i={index} {display}>')
joined_value = '\n'.join(connection_value)
embed.add_field(name='Connections', value=f'```py\n{joined_value}\n```', inline=False)
spam_control = self.bot.spam_control
being_spammed = [
str(key) for key, value in spam_control._cache.items()
if value._tokens == 0
]
description.append(f'Current Spammers: {", ".join(being_spammed) if being_spammed else "None"}')
description.append(f'Questionable Connections: {questionable_connections}')
total_warnings += questionable_connections
if being_spammed:
embed.colour = WARNING
total_warnings += 1
try:
task_retriever = asyncio.Task.all_tasks
except AttributeError:
# future proofing for 3.9 I guess
task_retriever = asyncio.all_tasks
else:
all_tasks = task_retriever(loop=self.bot.loop)
event_tasks = [
t for t in all_tasks
if 'Client._run_event' in repr(t) and not t.done()
]
cogs_directory = os.path.dirname(__file__)
tasks_directory = os.path.join('discord', 'ext', 'tasks', '__init__.py')
inner_tasks = [
t for t in all_tasks
if cogs_directory in repr(t) or tasks_directory in repr(t)
]
bad_inner_tasks = ", ".join(hex(id(t)) for t in inner_tasks if t.done() and t._exception is not None)
total_warnings += bool(bad_inner_tasks)
embed.add_field(name='Inner Tasks', value=f'Total: {len(inner_tasks)}\nFailed: {bad_inner_tasks or "None"}')
embed.add_field(name='Events Waiting', value=f'Total: {len(event_tasks)}', inline=False)
command_waiters = len(self._data_batch)
is_locked = self._batch_lock.locked()
description.append(f'Commands Waiting: {command_waiters}, Batch Locked: {is_locked}')
# RESUME/IDENTIFY data
yesterday = datetime.datetime.utcnow() - datetime.timedelta(days=1)
total_resumes = sum(1 for dt in self._resumes if dt > yesterday)
identifies = {
shard_id: sum(1 for dt in dates if dt > yesterday)
for shard_id, dates in self._identifies.items()
}
absolute_total_identifies = sum(identifies.values())
resume_info_builder = [
f'Total RESUMEs: {total_resumes}',
f'Total IDENTIFYs: {absolute_total_identifies}'
]
for shard_id, total in identifies.items():
resume_info_builder.append(f'Shard ID {shard_id} IDENTIFYs: {total}')
if absolute_total_identifies >= (len(self.bot.shards) * 5):
total_warnings += 1
embed.colour = WARNING
embed.add_field(name='Gateway (last 24 hours)', value='\n'.join(resume_info_builder), inline=False)
memory_usage = self.process.memory_full_info().uss / 1024**2
cpu_usage = self.process.cpu_percent() / psutil.cpu_count()
embed.add_field(name='Process', value=f'{memory_usage:.2f} MiB\n{cpu_usage:.2f}% CPU', inline=False)
global_rate_limit = not self.bot.http._global_over.is_set()
description.append(f'Global Rate Limit: {global_rate_limit}')
if command_waiters >= 8:
total_warnings += 1
embed.colour = WARNING
if global_rate_limit or total_warnings >= 9:
embed.colour = UNHEALTHY
embed.set_footer(text=f'{total_warnings} warning(s)')
embed.description = '\n'.join(description)
await ctx.send(embed=embed)
@commands.command(hidden=True, aliases=['cancel_task'])
@commands.is_owner()
async def debug_task(self, ctx, memory_id: hex_value):
"""Debug a task by a memory location."""
task = object_at(memory_id)
if task is None or not isinstance(task, asyncio.Task):
return await ctx.send(f'Could not find Task object at {hex(memory_id)}.')
if ctx.invoked_with == 'cancel_task':
task.cancel()
return await ctx.send(f'Cancelled task object {task!r}.')
paginator = commands.Paginator(prefix='```py')
fp = io.StringIO()
frames = len(task.get_stack())
paginator.add_line(f'# Total Frames: {frames}')
task.print_stack(file=fp)
for line in fp.getvalue().splitlines():
paginator.add_line(line)
for page in paginator.pages:
await ctx.send(page)
async def tabulate_query(self, ctx, query, *args):
records = await ctx.db.fetch(query, *args)
if len(records) == 0:
return await ctx.send('No results found.')
headers = list(records[0].keys())
table = formats.TabularData()
table.set_columns(headers)
table.add_rows(list(r.values()) for r in records)
render = table.render()
fmt = f'```\n{render}\n```'
if len(fmt) > 2000:
fp = io.BytesIO(fmt.encode('utf-8'))
await ctx.send('Too many results...', file=discord.File(fp, 'results.txt'))
else:
await ctx.send(fmt)
@commands.group(hidden=True, invoke_without_command=True)
@commands.is_owner()
async def command_history(self, ctx):
"""Command history."""
query = """SELECT
CASE failed
WHEN TRUE THEN command || ' [!]'
ELSE command
END AS "command",
to_char(used, 'Mon DD HH12:MI:SS AM') AS "invoked",
author_id,
guild_id
FROM commands
ORDER BY used DESC
LIMIT 15;
"""
await self.tabulate_query(ctx, query)
@command_history.command(name='for')
@commands.is_owner()
async def command_history_for(self, ctx, days: typing.Optional[int] = 7, *, command: str):
"""Command history for a command."""
query = """SELECT *, t.success + t.failed AS "total"
FROM (
SELECT guild_id,
SUM(CASE WHEN failed THEN 0 ELSE 1 END) AS "success",
SUM(CASE WHEN failed THEN 1 ELSE 0 END) AS "failed"
FROM commands
WHERE command=$1
AND used > (CURRENT_TIMESTAMP - $2::interval)
GROUP BY guild_id
) AS t
ORDER BY "total" DESC
LIMIT 30;
"""
await self.tabulate_query(ctx, query, command, datetime.timedelta(days=days))
@command_history.command(name='guild', aliases=['server'])
@commands.is_owner()
async def command_history_guild(self, ctx, guild_id: int):
"""Command history for a guild."""
query = """SELECT
CASE failed
WHEN TRUE THEN command || ' [!]'
ELSE command
END AS "command",
channel_id,
author_id,
used
FROM commands
WHERE guild_id=$1
ORDER BY used DESC
LIMIT 15;
"""
await self.tabulate_query(ctx, query, guild_id)
@command_history.command(name='user', aliases=['member'])
@commands.is_owner()
async def command_history_user(self, ctx, user_id: int):
"""Command history for a user."""
query = """SELECT
CASE failed
WHEN TRUE THEN command || ' [!]'
ELSE command
END AS "command",
guild_id,
used
FROM commands
WHERE author_id=$1
ORDER BY used DESC
LIMIT 20;
"""
await self.tabulate_query(ctx, query, user_id)
@command_history.command(name='log')
@commands.is_owner()
async def command_history_log(self, ctx, days=7):
"""Command history log for the last N days."""
query = """SELECT command, COUNT(*)
FROM commands
WHERE used > (CURRENT_TIMESTAMP - $1::interval)
GROUP BY command
ORDER BY 2 DESC
"""
all_commands = {
c.qualified_name: 0
for c in self.bot.walk_commands()
}
records = await ctx.db.fetch(query, datetime.timedelta(days=days))
for name, uses in records:
if name in all_commands:
all_commands[name] = uses
as_data = sorted(all_commands.items(), key=lambda t: t[1], reverse=True)
table = formats.TabularData()
table.set_columns(['Command', 'Uses'])
table.add_rows(tup for tup in as_data)
render = table.render()
embed = discord.Embed(title='Summary', colour=discord.Colour.green())
embed.set_footer(text='Since').timestamp = datetime.datetime.utcnow() - datetime.timedelta(days=days)
top_ten = '\n'.join(f'{command}: {uses}' for command, uses in records[:10])
bottom_ten = '\n'.join(f'{command}: {uses}' for command, uses in records[-10:])
embed.add_field(name='Top 10', value=top_ten)
embed.add_field(name='Bottom 10', value=bottom_ten)
unused = ', '.join(name for name, uses in as_data if uses == 0)
if len(unused) > 1024:
unused = 'Way too many...'
embed.add_field(name='Unused', value=unused, inline=False)
await ctx.send(embed=embed, file=discord.File(io.BytesIO(render.encode()), filename='full_results.txt'))
@command_history.command(name='cog')
@commands.is_owner()
async def command_history_cog(self, ctx, days: typing.Optional[int] = 7, *, cog: str = None):
"""Command history for a cog or grouped by a cog."""
interval = datetime.timedelta(days=days)
if cog is not None:
cog = self.bot.get_cog(cog)
if cog is None:
return await ctx.send(f'Unknown cog: {cog}')
query = """SELECT *, t.success + t.failed AS "total"
FROM (
SELECT command,
SUM(CASE WHEN failed THEN 0 ELSE 1 END) AS "success",
SUM(CASE WHEN failed THEN 1 ELSE 0 END) AS "failed"
FROM commands
WHERE command = any($1::text[])
AND used > (CURRENT_TIMESTAMP - $2::interval)
GROUP BY command
) AS t
ORDER BY "total" DESC
LIMIT 30;
"""
return await self.tabulate_query(ctx, query, [c.qualified_name for c in cog.walk_commands()], interval)
# A more manual query with a manual grouper.
query = """SELECT *, t.success + t.failed AS "total"
FROM (
SELECT command,
SUM(CASE WHEN failed THEN 0 ELSE 1 END) AS "success",
SUM(CASE WHEN failed THEN 1 ELSE 0 END) AS "failed"
FROM commands
WHERE used > (CURRENT_TIMESTAMP - $1::interval)
GROUP BY command
) AS t;
"""
class Count:
__slots__ = ('success', 'failed', 'total')
def __init__(self):
self.success = 0
self.failed = 0
self.total = 0
def add(self, record):
self.success += record['success']
self.failed += record['failed']
self.total += record['total']
data = defaultdict(Count)
records = await ctx.db.fetch(query, interval)
for record in records:
command = self.bot.get_command(record['command'])
if command is None or command.cog is None:
data['No Cog'].add(record)
else:
data[command.cog.qualified_name].add(record)
table = formats.TabularData()
table.set_columns(['Cog', 'Success', 'Failed', 'Total'])
data = sorted([
(cog, e.success, e.failed, e.total)
for cog, e in data.items()
], key=lambda t: t[-1], reverse=True)
table.add_rows(data)
render = table.render()
await ctx.safe_send(f'```\n{render}\n```')
old_on_error = commands.AutoShardedBot.on_error
async def on_error(self, event, *args, **kwargs):
e = discord.Embed(title='Event Error', colour=0xa32952)
e.add_field(name='Event', value=event)
e.description = f'```py\n{traceback.format_exc()}\n```'
e.timestamp = datetime.datetime.utcnow()
args_str = ['```py']
for index, arg in enumerate(args):
args_str.append(f'[{index}]: {arg!r}')
args_str.append('```')
e.add_field(name='Args', value='\n'.join(args_str), inline=False)
hook = self.get_cog('Stats').webhook
try:
await hook.send(embed=e)
except:
pass
def setup(bot):
if not hasattr(bot, 'command_stats'):
bot.command_stats = Counter()
if not hasattr(bot, 'socket_stats'):
bot.socket_stats = Counter()
cog = Stats(bot)
bot.add_cog(cog)
bot._stats_cog_gateway_handler = handler = GatewayHandler(cog)
logging.getLogger().addHandler(handler)
commands.AutoShardedBot.on_error = on_error
def teardown(bot):
commands.AutoShardedBot.on_error = old_on_error
logging.getLogger().removeHandler(bot._stats_cog_gateway_handler)
del bot._stats_cog_gateway_handler
|
prepare
|
Create a 1-node cluster, start it, create a keyspace, and if
<table_name>, create a table in that keyspace. If <cdc_enabled_table>,
that table is created with CDC enabled. If <column_spec>, use that
string to specify the schema of the table -- for example, a valid value
is 'a int PRIMARY KEY, b int'. The <configuration_overrides> is
treated as a dict-like object and passed to
self.cluster.set_configuration_options.
|
from __future__ import division
import errno
import os
import re
import shutil
import time
import uuid
from collections import namedtuple
from itertools import repeat
from pprint import pformat
import pytest
from cassandra import WriteFailure
from cassandra.concurrent import (execute_concurrent,
execute_concurrent_with_args)
from ccmlib.node import Node
from cqlsh_tests.cqlsh_tools import assert_resultset_contains
from dtest import Tester, create_ks, logger
from tools.assertions import assert_length_equal
from tools.data import rows_to_list
from tools.files import size_of_files_in_dir
from tools.funcutils import get_rate_limited_function
from tools.hacks import advance_to_next_cl_segment
since = pytest.mark.since
_16_uuid_column_spec = (
'a uuid PRIMARY KEY, b uuid, c uuid, d uuid, e uuid, f uuid, g uuid, '
'h uuid, i uuid, j uuid, k uuid, l uuid, m uuid, n uuid, o uuid, '
'p uuid'
)
def _insert_rows(session, table_name, insert_stmt, values):
prepared_insert = session.prepare(insert_stmt)
values = list(values) # in case values is a generator
execute_concurrent(session, ((prepared_insert, x) for x in values),
concurrency=500, raise_on_first_error=True)
data_loaded = rows_to_list(session.execute('SELECT * FROM ' + table_name))
logger.debug('{n} rows inserted into {table_name}'.format(n=len(data_loaded), table_name=table_name))
# use assert_equal over assert_length_equal to avoid printing out
# potentially large lists
assert len(values) == len(data_loaded)
return data_loaded
def _move_commitlog_segments(source_dir, dest_dir, verbose=True):
for source_filename in [name for name in os.listdir(source_dir) if not name.endswith('_cdc.idx')]:
source_path, dest_path = (os.path.join(source_dir, source_filename),
os.path.join(dest_dir, source_filename))
if verbose:
logger.debug('moving {} to {}'.format(source_path, dest_path))
shutil.move(source_path, dest_path)
def _get_16_uuid_insert_stmt(ks_name, table_name):
return (
'INSERT INTO {ks_name}.{table_name} '
'(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) '
'VALUES (uuid(), uuid(), uuid(), uuid(), uuid(), '
'uuid(), uuid(), uuid(), uuid(), uuid(), uuid(), '
'uuid(), uuid(), uuid(), uuid(), uuid())'
).format(ks_name=ks_name, table_name=table_name)
def _get_create_table_statement(ks_name, table_name, column_spec, options=None):
if options:
options_pairs = ('{k}={v}'.format(k=k, v=v) for (k, v) in options.items())
options_string = 'WITH ' + ' AND '.join(options_pairs)
else:
options_string = ''
return (
'CREATE TABLE ' + ks_name + '.' + table_name + ' '
'(' + column_spec + ') ' + options_string
)
def _write_to_cdc_write_failure(session, insert_stmt):
prepared = session.prepare(insert_stmt)
start, rows_loaded, error_found = time.time(), 0, False
rate_limited_debug = get_rate_limited_function(logger.debug, 5)
while not error_found:
# We want to fail if inserting data takes too long. Locally this
# takes about 10s, but let's be generous.
assert ((time.time() - start) <= 600), (
"It's taken more than 10 minutes to reach a WriteFailure trying "
'to overrun the space designated for CDC commitlogs. This could '
"be because data isn't being written quickly enough in this "
'environment, or because C* is failing to reject writes when '
'it should.')
# If we haven't logged from here in the last 5s, do so.
rate_limited_debug(
' data load step has lasted {s:.2f}s, '
'loaded {r} rows'.format(s=(time.time() - start), r=rows_loaded))
batch_results = list(execute_concurrent(
session,
((prepared, ()) for _ in range(1000)),
concurrency=500,
# Don't propagate errors to the main thread. We expect at least
# one WriteFailure, so we handle it below as part of the
# results recieved from this method.
raise_on_first_error=False
))
# Here, we track the number of inserted values by getting the
# number of successfully completed statements...
rows_loaded += len([br for br in batch_results if br[0]])
# then, we make sure that the only failures are the expected
# WriteFailure.
assert ([] == [result for (success, result) in batch_results
if not success and not isinstance(result, WriteFailure)])
# Finally, if we find a WriteFailure, that means we've inserted all
# the CDC data we can and so we flip error_found to exit the loop.
if any(isinstance(result, WriteFailure) for (_, result) in batch_results):
logger.debug("write failed (presumably because we've overrun "
'designated CDC commitlog space) after '
'loading {r} rows in {s:.2f}s'.format(
r=rows_loaded,
s=time.time() - start))
error_found = True
return rows_loaded
_TableInfoNamedtuple = namedtuple('TableInfoNamedtuple', [
# required
'ks_name', 'table_name', 'column_spec',
# optional
'options', 'insert_stmt',
# derived
'name', 'create_stmt'
])
class TableInfo(_TableInfoNamedtuple):
__slots__ = ()
def __new__(cls, ks_name, table_name, column_spec, options=None, insert_stmt=None):
name = ks_name + '.' + table_name
create_stmt = _get_create_table_statement(ks_name, table_name, column_spec, options)
self = super(TableInfo, cls).__new__(
cls,
# required
ks_name=ks_name, table_name=table_name, column_spec=column_spec,
# optional
options=options, insert_stmt=insert_stmt,
# derived
name=name, create_stmt=create_stmt
)
return self
def _set_cdc_on_table(session, table_name, value, ks_name=None):
"""
Uses <session> to set CDC to <value> on <ks_name>.<table_name>.
"""
table_string = ks_name + '.' + table_name if ks_name else table_name
value_string = 'true' if value else 'false'
stmt = 'ALTER TABLE ' + table_string + ' WITH CDC = ' + value_string
logger.debug(stmt)
session.execute(stmt)
def _get_set_cdc_func(session, ks_name, table_name):
"""
Close over a session, keyspace name, and table name and return a function
that takes enables CDC on that keyspace if its argument is truthy and
otherwise disables it.
"""
def set_cdc(value):
return _set_cdc_on_table(
session=session,
ks_name=ks_name, table_name=table_name,
value=value
)
return set_cdc
def _get_commitlog_files(node_path):
commitlog_dir = os.path.join(node_path, 'commitlogs')
return {
os.path.join(commitlog_dir, name)
for name in os.listdir(commitlog_dir)
}
def _get_cdc_raw_files(node_path, cdc_raw_dir_name='cdc_raw'):
commitlog_dir = os.path.join(node_path, cdc_raw_dir_name)
return {
os.path.join(commitlog_dir, name)
for name in os.listdir(commitlog_dir)
}
@since('3.8')
class TestCDC(Tester):
"""
@jira_ticket CASSANDRA-8844
@jira_ticket CASSANDRA-12148
Test the correctness of some features of CDC, Change Data Capture, which
provides a view of the commitlog on tables for which it is enabled.
"""
@pytest.fixture(autouse=True)
def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
fixture_dtest_setup.allow_log_errors = True
fixture_dtest_setup.ignore_log_patterns = (
# We expect to see this error in the logs when we reach CDC limit
r'Failed to apply mutation locally'
)
def _create_temp_dir(self, dir_name, verbose=True):
"""
Create a directory that will be deleted when this test class is torn
down.
"""
if verbose:
logger.debug('creating ' + dir_name)
try:
os.mkdir(dir_name)
except OSError as e:
if e.errno != errno.EEXIST:
logger.debug(dir_name + ' already exists. removing and recreating.')
shutil.rmtree(dir_name)
os.mkdir(dir_name)
else:
raise e
def debug_and_rmtree():
shutil.rmtree(dir_name)
logger.debug(dir_name + ' removed')
self.addCleanup(debug_and_rmtree)
# MASKED: prepare function (lines 244-290)
def _assert_cdc_data_readable_on_round_trip(self, start_with_cdc_enabled):
"""
Parameterized test asserting that data written to a table is still
readable after flipping the CDC flag on that table, then flipping it
again. Starts with CDC enabled if start_with_cdc_enabled, otherwise
starts with it disabled.
"""
ks_name, table_name = 'ks', 'tab'
sequence = [True, False, True] if start_with_cdc_enabled else [False, True, False]
start_enabled, alter_path = sequence[0], list(sequence[1:])
node, session = self.prepare(ks_name=ks_name, table_name=table_name,
cdc_enabled_table=start_enabled,
column_spec='a int PRIMARY KEY, b int')
set_cdc = _get_set_cdc_func(session=session, ks_name=ks_name, table_name=table_name)
insert_stmt = session.prepare('INSERT INTO ' + table_name + ' (a, b) VALUES (?, ?)')
# data = zip(list(range(1000)), list(range(1000)))
start = 0
stop = 1000
step = 1
data = [(n, min(n+step, stop)) for n in range(start, stop, step)]
execute_concurrent_with_args(session, insert_stmt, data)
# We need data to be in commitlogs, not sstables.
assert [] == list(node.get_sstables(ks_name, table_name))
for enable in alter_path:
set_cdc(enable)
assert_resultset_contains(session.execute('SELECT * FROM ' + table_name), data)
def test_cdc_enabled_data_readable_on_round_trip(self):
"""
Test that data is readable after an enabled->disabled->enabled round
trip.
"""
self._assert_cdc_data_readable_on_round_trip(start_with_cdc_enabled=True)
def test_cdc_disabled_data_readable_on_round_trip(self):
"""
Test that data is readable after an disabled->enabled->disabled round
trip.
"""
self._assert_cdc_data_readable_on_round_trip(start_with_cdc_enabled=False)
def test_non_cdc_segments_deleted_after_replay(self):
"""
Test that non-cdc segment files generated in previous runs are deleted
after replay.
"""
ks_name, table_name = 'ks', 'tab'
node, session = self.prepare(ks_name=ks_name, table_name=table_name,
cdc_enabled_table=True,
column_spec='a int PRIMARY KEY, b int')
old_files = _get_cdc_raw_files(node.get_path())
node.drain()
session.cluster.shutdown()
node.stop()
node.start(wait_for_binary_proto=True)
new_files = _get_cdc_raw_files(node.get_path())
assert len(old_files.intersection(new_files)) == 0
def test_insertion_and_commitlog_behavior_after_reaching_cdc_total_space(self):
"""
Test that C* behaves correctly when CDC tables have consumed all the
space available to them. In particular: after writing
cdc_total_space_in_mb MB into CDC commitlogs:
- CDC writes are rejected
- non-CDC writes are accepted
- on flush, CDC commitlogs are copied to cdc_raw
- on flush, non-CDC commitlogs are not copied to cdc_raw
This is a lot of behavior to validate in one test, but we do so to
avoid running multiple tests that each write 1MB of data to fill
cdc_total_space_in_mb.
"""
ks_name = 'ks'
full_cdc_table_info = TableInfo(
ks_name=ks_name, table_name='full_cdc_tab',
column_spec=_16_uuid_column_spec,
insert_stmt=_get_16_uuid_insert_stmt(ks_name, 'full_cdc_tab'),
options={'cdc': 'true'}
)
configuration_overrides = {
# Make CDC space as small as possible so we can fill it quickly.
'cdc_total_space_in_mb': 4,
}
node, session = self.prepare(
ks_name=ks_name,
configuration_overrides=configuration_overrides
)
session.execute(full_cdc_table_info.create_stmt)
# Later, we'll also make assertions about the behavior of non-CDC
# tables, so we create one here.
non_cdc_table_info = TableInfo(
ks_name=ks_name, table_name='non_cdc_tab',
column_spec=_16_uuid_column_spec,
insert_stmt=_get_16_uuid_insert_stmt(ks_name, 'non_cdc_tab')
)
session.execute(non_cdc_table_info.create_stmt)
# We'll also make assertions about the behavior of CDC tables when
# other CDC tables have already filled the designated space for CDC
# commitlogs, so we create the second CDC table here.
empty_cdc_table_info = TableInfo(
ks_name=ks_name, table_name='empty_cdc_tab',
column_spec=_16_uuid_column_spec,
insert_stmt=_get_16_uuid_insert_stmt(ks_name, 'empty_cdc_tab'),
options={'cdc': 'true'}
)
session.execute(empty_cdc_table_info.create_stmt)
# Here, we insert values into the first CDC table until we get a
# WriteFailure. This should happen when the CDC commitlogs take up 1MB
# or more.
logger.debug('flushing non-CDC commitlogs')
node.flush()
# Then, we insert rows into the CDC table until we can't anymore.
logger.debug('beginning data insert to fill CDC commitlogs')
rows_loaded = _write_to_cdc_write_failure(session, full_cdc_table_info.insert_stmt)
assert 0 < rows_loaded, ('No CDC rows inserted. This may happen when '
'cdc_total_space_in_mb > commitlog_segment_size_in_mb')
commitlog_dir = os.path.join(node.get_path(), 'commitlogs')
commitlogs_size = size_of_files_in_dir(commitlog_dir)
logger.debug('Commitlog dir ({d}) is {b}B'.format(d=commitlog_dir, b=commitlogs_size))
# We should get a WriteFailure when trying to write to the CDC table
# that's filled the designated CDC space...
try:
session.execute(full_cdc_table_info.insert_stmt)
raise Exception("WriteFailure expected")
except WriteFailure:
pass
# or any CDC table.
try:
session.execute(empty_cdc_table_info.insert_stmt)
raise Exception("WriteFailure expected")
except WriteFailure:
pass
# Now we test for behaviors of non-CDC tables when we've exceeded
# cdc_total_space_in_mb.
#
# First, we drain and save the names of all the new discarded CDC
# segments
node.drain()
session.cluster.shutdown()
node.stop()
node.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node)
pre_non_cdc_write_cdc_raw_segments = _get_cdc_raw_files(node.get_path())
# Snapshot the _cdc.idx file if > 4.0 for comparison at end
before_cdc_state = [] # init empty here to quiet PEP
if self.cluster.version() >= '4.0':
# Create ReplayData objects for each index file found in loading cluster
node1_path = os.path.join(node.get_path(), 'cdc_raw')
before_cdc_state = [ReplayData.load(node1_path, name)
for name in os.listdir(node1_path) if name.endswith('_cdc.idx')]
# save the names of all the commitlog segments written up to this
# point:
pre_non_cdc_write_segments = _get_commitlog_files(node.get_path())
# Check that writing to non-CDC tables succeeds even when writes to CDC
# tables are rejected:
non_cdc_prepared_insert = session.prepare(non_cdc_table_info.insert_stmt)
session.execute(non_cdc_prepared_insert, ()) # should not raise an exception
# Check the following property: any new commitlog segments written to
# after cdc_raw has reached its maximum configured size should not be
# moved to cdc_raw, on commitlog discard, because any such commitlog
# segments are written to non-CDC tables.
#
# First, write to non-cdc tables.
start, time_limit = time.time(), 600
rate_limited_debug = get_rate_limited_function(logger.debug, 5)
logger.debug('writing to non-cdc table')
# We write until we get a new commitlog segment.
while _get_commitlog_files(node.get_path()) <= pre_non_cdc_write_segments:
elapsed = time.time() - start
rate_limited_debug(' non-cdc load step has lasted {s:.2f}s'.format(s=elapsed))
assert elapsed <= time_limit, "It's been over a {s}s and we haven't written a new " \
"commitlog segment. Something is wrong.".format(s=time_limit)
execute_concurrent(
session,
((non_cdc_prepared_insert, ()) for _ in range(1000)),
concurrency=500,
raise_on_first_error=True,
)
# Finally, we check that draining doesn't move any new segments to cdc_raw:
node.drain()
session.cluster.shutdown()
if self.cluster.version() < '4.0':
assert pre_non_cdc_write_cdc_raw_segments == _get_cdc_raw_files(node.get_path())
else:
# Create ReplayData objects for each index file found in loading cluster
node2_path = os.path.join(node.get_path(), 'cdc_raw')
after_cdc_state = [ReplayData.load(node2_path, name)
for name in os.listdir(node2_path) if name.endswith('_cdc.idx')]
# Confirm all indexes in 1st are accounted for and match corresponding entry in 2nd.
found = True
for idx in before_cdc_state:
idx_found = False
for idx_two in after_cdc_state:
if compare_replay_data(idx, idx_two):
idx_found = True
if not idx_found:
found = False
break
if not found:
self._fail_and_print_sets(before_cdc_state, after_cdc_state,
'Found CDC index in before not matched in after (non-CDC write test)')
# Now we confirm we don't have anything that showed up in 2nd not accounted for in 1st
orphan_found = False
for idx_two in after_cdc_state:
index_found = False
for idx in before_cdc_state:
if compare_replay_data(idx_two, idx):
index_found = True
if not index_found:
orphan_found = True
break
if orphan_found:
self._fail_and_print_sets(before_cdc_state, after_cdc_state,
'Found orphaned index file in after CDC state not in former.')
def _fail_and_print_sets(self, rd_one, rd_two, msg):
print('Set One:')
for idx in rd_one:
print(' {},{},{},{}'.format(idx.name, idx.completed, idx.offset, idx.log_name))
print('Set Two:')
for idx_two in rd_two:
print(' {},{},{},{}'.format(idx_two.name, idx_two.completed, idx_two.offset, idx_two.log_name))
pytest.fail(msg)
def _init_new_loading_node(self, ks_name, create_stmt, use_thrift=False):
loading_node = Node(
name='node2',
cluster=self.cluster,
auto_bootstrap=False,
thrift_interface=('127.0.0.2', 9160) if use_thrift else None,
storage_interface=('127.0.0.2', 7000),
jmx_port='7400',
remote_debug_port='0',
initial_token=None,
binary_interface=('127.0.0.2', 9042)
)
logger.debug('adding node')
self.cluster.add(loading_node, is_seed=True, data_center="dc1")
logger.debug('starting new node')
loading_node.start(wait_for_binary_proto=120)
logger.debug('recreating ks and table')
loading_session = self.patient_exclusive_cql_connection(loading_node)
create_ks(loading_session, ks_name, rf=1)
logger.debug('creating new table')
loading_session.execute(create_stmt)
logger.debug('stopping new node')
loading_session.cluster.shutdown()
loading_node.stop()
return loading_node
def test_cdc_data_available_in_cdc_raw(self):
ks_name = 'ks'
# First, create a new node just for data generation.
generation_node, generation_session = self.prepare(ks_name=ks_name)
cdc_table_info = TableInfo(
ks_name=ks_name, table_name='cdc_tab',
column_spec=_16_uuid_column_spec,
insert_stmt=_get_16_uuid_insert_stmt(ks_name, 'cdc_tab'),
options={
'cdc': 'true',
# give table an explicit id so when we create it again it's the
# same table and we can replay into it
'id': uuid.uuid4()
}
)
# Write until we get a new CL segment to avoid replaying initialization
# mutations from this node's startup into system tables in the other
# node. See CASSANDRA-11811.
advance_to_next_cl_segment(
session=generation_session,
commitlog_dir=os.path.join(generation_node.get_path(), 'commitlogs')
)
generation_session.execute(cdc_table_info.create_stmt)
# insert 10000 rows
inserted_rows = _insert_rows(generation_session, cdc_table_info.name, cdc_table_info.insert_stmt,
repeat((), 10000))
# drain the node to guarantee all cl segments will be recycled
logger.debug('draining')
generation_node.drain()
logger.debug('stopping')
# stop the node and clean up all sessions attached to it
generation_session.cluster.shutdown()
generation_node.stop()
# We can rely on the existing _cdc.idx files to determine which .log files contain cdc data.
source_path = os.path.join(generation_node.get_path(), 'cdc_raw')
source_cdc_indexes = {ReplayData.load(source_path, name)
for name in source_path if name.endswith('_cdc.idx')}
# assertNotEqual(source_cdc_indexes, {})
assert source_cdc_indexes != {}
# create a new node to use for cdc_raw cl segment replay
loading_node = self._init_new_loading_node(ks_name, cdc_table_info.create_stmt, self.cluster.version() < '4')
# move cdc_raw contents to commitlog directories, then start the
# node again to trigger commitlog replay, which should replay the
# cdc_raw files we moved to commitlogs into memtables.
logger.debug('moving cdc_raw and restarting node')
_move_commitlog_segments(
os.path.join(generation_node.get_path(), 'cdc_raw'),
os.path.join(loading_node.get_path(), 'commitlogs')
)
loading_node.start(wait_for_binary_proto=120)
logger.debug('node successfully started; waiting on log replay')
loading_node.grep_log('Log replay complete')
logger.debug('log replay complete')
# final assertions
validation_session = self.patient_exclusive_cql_connection(loading_node)
data_in_cdc_table_after_restart = rows_to_list(
validation_session.execute('SELECT * FROM ' + cdc_table_info.name)
)
logger.debug('found {cdc} values in CDC table'.format(
cdc=len(data_in_cdc_table_after_restart)
))
# Then we assert that the CDC data that we expect to be there is there.
# All data that was in CDC tables should have been copied to cdc_raw,
# then used in commitlog replay, so it should be back in the cluster.
assert (inserted_rows == data_in_cdc_table_after_restart), 'not all expected data selected'
if self.cluster.version() >= '4.0':
# Create ReplayData objects for each index file found in loading cluster
loading_path = os.path.join(loading_node.get_path(), 'cdc_raw')
dest_cdc_indexes = [ReplayData.load(loading_path, name)
for name in os.listdir(loading_path) if name.endswith('_cdc.idx')]
# Compare source replay data to dest to ensure replay process created both hard links and index files.
for srd in source_cdc_indexes:
# Confirm both log and index are in dest
assert os.path.isfile(os.path.join(loading_path, srd.idx_name))
assert os.path.isfile(os.path.join(loading_path, srd.log_name))
# Find dest ReplayData that corresponds to the source (should be exactly 1)
corresponding_dest_replay_datae = [x for x in dest_cdc_indexes
if srd.idx_name == x.idx_name]
assert_length_equal(corresponding_dest_replay_datae, 1)
drd = corresponding_dest_replay_datae[0]
# We can't compare equality on offsets since replay uses the raw file length as the written
# cdc offset. We *can*, however, confirm that the offset in the replayed file is >=
# the source file, ensuring clients are signaled to replay at least all the data in the
# log.
assert drd.offset >= srd.offset
# Confirm completed flag is the same in both
assert srd.completed == drd.completed
# Confirm that the relationship between index files on the source
# and destination looks like we expect.
# First, grab the mapping between the two, make sure it's a 1-1
# mapping, and transform the dict to reflect that:
src_to_dest_idx_map = {
src_rd: [dest_rd for dest_rd in dest_cdc_indexes
if dest_rd.idx_name == src_rd.idx_name]
for src_rd in source_cdc_indexes
}
for src_rd, dest_rds in src_to_dest_idx_map.items():
assert_length_equal(dest_rds, 1)
src_to_dest_idx_map[src_rd] = dest_rds[0]
# All offsets in idx files that were copied should be >0 on the
# destination node.
assert (
0 not in {i.offset for i in src_to_dest_idx_map.values()}),\
('Found index offsets == 0 in an index file on the '
'destination node that corresponds to an index file on the '
'source node:\n'
'{}').format(pformat(src_to_dest_idx_map))
# Offsets of all shared indexes should be >= on the destination
# than on the source.
for src_rd, dest_rd in src_to_dest_idx_map.items():
assert dest_rd.offset >= src_rd.offset
src_to_dest_idx_map = {
src_rd: [dest_rd for dest_rd in dest_cdc_indexes
if dest_rd.idx_name == src_rd.idx_name]
for src_rd in source_cdc_indexes
}
for k, v in src_to_dest_idx_map.items():
assert_length_equal(v, 1)
assert k.offset >= v.offset
def compare_replay_data(rd_one, rd_two):
return rd_one.idx_name == rd_two.idx_name and \
rd_one.completed == rd_two.completed and \
rd_one.offset == rd_two.offset and \
rd_one.log_name == rd_two.log_name
class ReplayData(namedtuple('ReplayData', ['idx_name', 'completed', 'offset', 'log_name'])):
"""
Replay data class containing data from a _cdc.idx file. Build one with the load method.
"""
@classmethod
def load(cls, path, name):
assert '_cdc' in name, 'expected to find _cdc in passed in index name. Did not: ' + name
with open(os.path.join(path, name), 'r') as f:
offset, completed = [line.strip() for line in f.readlines()]
return cls(
idx_name=name,
completed=completed,
offset=int(offset),
log_name=re.sub('_cdc.idx', '.log', name)
)
|
def prepare(self, ks_name,
table_name=None, cdc_enabled_table=None,
gc_grace_seconds=None,
column_spec=None,
configuration_overrides=None,
table_id=None):
"""
Create a 1-node cluster, start it, create a keyspace, and if
<table_name>, create a table in that keyspace. If <cdc_enabled_table>,
that table is created with CDC enabled. If <column_spec>, use that
string to specify the schema of the table -- for example, a valid value
is 'a int PRIMARY KEY, b int'. The <configuration_overrides> is
treated as a dict-like object and passed to
self.cluster.set_configuration_options.
"""
config_defaults = {
'cdc_enabled': True,
# we want to be able to generate new segments quickly
'commitlog_segment_size_in_mb': 2,
}
if configuration_overrides is None:
configuration_overrides = {}
self.cluster.populate(1)
self.cluster.set_configuration_options(dict(config_defaults, **configuration_overrides))
self.cluster.start()
node = self.cluster.nodelist()[0]
session = self.patient_cql_connection(node)
create_ks(session, ks_name, rf=1)
if table_name is not None:
assert cdc_enabled_table is not None, 'if creating a table in prepare, must specify whether or not CDC is enabled on it'
assert column_spec is not None, 'if creating a table in prepare, must specify its schema'
options = {}
if gc_grace_seconds is not None:
options['gc_grace_seconds'] = gc_grace_seconds
if table_id is not None:
options['id'] = table_id
if cdc_enabled_table:
options['cdc'] = 'true'
stmt = _get_create_table_statement(
ks_name, table_name, column_spec,
options=options
)
logger.debug(stmt)
session.execute(stmt)
return node, session
| 244 | 290 |
from __future__ import division
import errno
import os
import re
import shutil
import time
import uuid
from collections import namedtuple
from itertools import repeat
from pprint import pformat
import pytest
from cassandra import WriteFailure
from cassandra.concurrent import (execute_concurrent,
execute_concurrent_with_args)
from ccmlib.node import Node
from cqlsh_tests.cqlsh_tools import assert_resultset_contains
from dtest import Tester, create_ks, logger
from tools.assertions import assert_length_equal
from tools.data import rows_to_list
from tools.files import size_of_files_in_dir
from tools.funcutils import get_rate_limited_function
from tools.hacks import advance_to_next_cl_segment
since = pytest.mark.since
_16_uuid_column_spec = (
'a uuid PRIMARY KEY, b uuid, c uuid, d uuid, e uuid, f uuid, g uuid, '
'h uuid, i uuid, j uuid, k uuid, l uuid, m uuid, n uuid, o uuid, '
'p uuid'
)
def _insert_rows(session, table_name, insert_stmt, values):
prepared_insert = session.prepare(insert_stmt)
values = list(values) # in case values is a generator
execute_concurrent(session, ((prepared_insert, x) for x in values),
concurrency=500, raise_on_first_error=True)
data_loaded = rows_to_list(session.execute('SELECT * FROM ' + table_name))
logger.debug('{n} rows inserted into {table_name}'.format(n=len(data_loaded), table_name=table_name))
# use assert_equal over assert_length_equal to avoid printing out
# potentially large lists
assert len(values) == len(data_loaded)
return data_loaded
def _move_commitlog_segments(source_dir, dest_dir, verbose=True):
for source_filename in [name for name in os.listdir(source_dir) if not name.endswith('_cdc.idx')]:
source_path, dest_path = (os.path.join(source_dir, source_filename),
os.path.join(dest_dir, source_filename))
if verbose:
logger.debug('moving {} to {}'.format(source_path, dest_path))
shutil.move(source_path, dest_path)
def _get_16_uuid_insert_stmt(ks_name, table_name):
return (
'INSERT INTO {ks_name}.{table_name} '
'(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) '
'VALUES (uuid(), uuid(), uuid(), uuid(), uuid(), '
'uuid(), uuid(), uuid(), uuid(), uuid(), uuid(), '
'uuid(), uuid(), uuid(), uuid(), uuid())'
).format(ks_name=ks_name, table_name=table_name)
def _get_create_table_statement(ks_name, table_name, column_spec, options=None):
if options:
options_pairs = ('{k}={v}'.format(k=k, v=v) for (k, v) in options.items())
options_string = 'WITH ' + ' AND '.join(options_pairs)
else:
options_string = ''
return (
'CREATE TABLE ' + ks_name + '.' + table_name + ' '
'(' + column_spec + ') ' + options_string
)
def _write_to_cdc_write_failure(session, insert_stmt):
prepared = session.prepare(insert_stmt)
start, rows_loaded, error_found = time.time(), 0, False
rate_limited_debug = get_rate_limited_function(logger.debug, 5)
while not error_found:
# We want to fail if inserting data takes too long. Locally this
# takes about 10s, but let's be generous.
assert ((time.time() - start) <= 600), (
"It's taken more than 10 minutes to reach a WriteFailure trying "
'to overrun the space designated for CDC commitlogs. This could '
"be because data isn't being written quickly enough in this "
'environment, or because C* is failing to reject writes when '
'it should.')
# If we haven't logged from here in the last 5s, do so.
rate_limited_debug(
' data load step has lasted {s:.2f}s, '
'loaded {r} rows'.format(s=(time.time() - start), r=rows_loaded))
batch_results = list(execute_concurrent(
session,
((prepared, ()) for _ in range(1000)),
concurrency=500,
# Don't propagate errors to the main thread. We expect at least
# one WriteFailure, so we handle it below as part of the
# results recieved from this method.
raise_on_first_error=False
))
# Here, we track the number of inserted values by getting the
# number of successfully completed statements...
rows_loaded += len([br for br in batch_results if br[0]])
# then, we make sure that the only failures are the expected
# WriteFailure.
assert ([] == [result for (success, result) in batch_results
if not success and not isinstance(result, WriteFailure)])
# Finally, if we find a WriteFailure, that means we've inserted all
# the CDC data we can and so we flip error_found to exit the loop.
if any(isinstance(result, WriteFailure) for (_, result) in batch_results):
logger.debug("write failed (presumably because we've overrun "
'designated CDC commitlog space) after '
'loading {r} rows in {s:.2f}s'.format(
r=rows_loaded,
s=time.time() - start))
error_found = True
return rows_loaded
_TableInfoNamedtuple = namedtuple('TableInfoNamedtuple', [
# required
'ks_name', 'table_name', 'column_spec',
# optional
'options', 'insert_stmt',
# derived
'name', 'create_stmt'
])
class TableInfo(_TableInfoNamedtuple):
__slots__ = ()
def __new__(cls, ks_name, table_name, column_spec, options=None, insert_stmt=None):
name = ks_name + '.' + table_name
create_stmt = _get_create_table_statement(ks_name, table_name, column_spec, options)
self = super(TableInfo, cls).__new__(
cls,
# required
ks_name=ks_name, table_name=table_name, column_spec=column_spec,
# optional
options=options, insert_stmt=insert_stmt,
# derived
name=name, create_stmt=create_stmt
)
return self
def _set_cdc_on_table(session, table_name, value, ks_name=None):
"""
Uses <session> to set CDC to <value> on <ks_name>.<table_name>.
"""
table_string = ks_name + '.' + table_name if ks_name else table_name
value_string = 'true' if value else 'false'
stmt = 'ALTER TABLE ' + table_string + ' WITH CDC = ' + value_string
logger.debug(stmt)
session.execute(stmt)
def _get_set_cdc_func(session, ks_name, table_name):
"""
Close over a session, keyspace name, and table name and return a function
that takes enables CDC on that keyspace if its argument is truthy and
otherwise disables it.
"""
def set_cdc(value):
return _set_cdc_on_table(
session=session,
ks_name=ks_name, table_name=table_name,
value=value
)
return set_cdc
def _get_commitlog_files(node_path):
commitlog_dir = os.path.join(node_path, 'commitlogs')
return {
os.path.join(commitlog_dir, name)
for name in os.listdir(commitlog_dir)
}
def _get_cdc_raw_files(node_path, cdc_raw_dir_name='cdc_raw'):
commitlog_dir = os.path.join(node_path, cdc_raw_dir_name)
return {
os.path.join(commitlog_dir, name)
for name in os.listdir(commitlog_dir)
}
@since('3.8')
class TestCDC(Tester):
"""
@jira_ticket CASSANDRA-8844
@jira_ticket CASSANDRA-12148
Test the correctness of some features of CDC, Change Data Capture, which
provides a view of the commitlog on tables for which it is enabled.
"""
@pytest.fixture(autouse=True)
def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
fixture_dtest_setup.allow_log_errors = True
fixture_dtest_setup.ignore_log_patterns = (
# We expect to see this error in the logs when we reach CDC limit
r'Failed to apply mutation locally'
)
def _create_temp_dir(self, dir_name, verbose=True):
"""
Create a directory that will be deleted when this test class is torn
down.
"""
if verbose:
logger.debug('creating ' + dir_name)
try:
os.mkdir(dir_name)
except OSError as e:
if e.errno != errno.EEXIST:
logger.debug(dir_name + ' already exists. removing and recreating.')
shutil.rmtree(dir_name)
os.mkdir(dir_name)
else:
raise e
def debug_and_rmtree():
shutil.rmtree(dir_name)
logger.debug(dir_name + ' removed')
self.addCleanup(debug_and_rmtree)
def prepare(self, ks_name,
table_name=None, cdc_enabled_table=None,
gc_grace_seconds=None,
column_spec=None,
configuration_overrides=None,
table_id=None):
"""
Create a 1-node cluster, start it, create a keyspace, and if
<table_name>, create a table in that keyspace. If <cdc_enabled_table>,
that table is created with CDC enabled. If <column_spec>, use that
string to specify the schema of the table -- for example, a valid value
is 'a int PRIMARY KEY, b int'. The <configuration_overrides> is
treated as a dict-like object and passed to
self.cluster.set_configuration_options.
"""
config_defaults = {
'cdc_enabled': True,
# we want to be able to generate new segments quickly
'commitlog_segment_size_in_mb': 2,
}
if configuration_overrides is None:
configuration_overrides = {}
self.cluster.populate(1)
self.cluster.set_configuration_options(dict(config_defaults, **configuration_overrides))
self.cluster.start()
node = self.cluster.nodelist()[0]
session = self.patient_cql_connection(node)
create_ks(session, ks_name, rf=1)
if table_name is not None:
assert cdc_enabled_table is not None, 'if creating a table in prepare, must specify whether or not CDC is enabled on it'
assert column_spec is not None, 'if creating a table in prepare, must specify its schema'
options = {}
if gc_grace_seconds is not None:
options['gc_grace_seconds'] = gc_grace_seconds
if table_id is not None:
options['id'] = table_id
if cdc_enabled_table:
options['cdc'] = 'true'
stmt = _get_create_table_statement(
ks_name, table_name, column_spec,
options=options
)
logger.debug(stmt)
session.execute(stmt)
return node, session
def _assert_cdc_data_readable_on_round_trip(self, start_with_cdc_enabled):
"""
Parameterized test asserting that data written to a table is still
readable after flipping the CDC flag on that table, then flipping it
again. Starts with CDC enabled if start_with_cdc_enabled, otherwise
starts with it disabled.
"""
ks_name, table_name = 'ks', 'tab'
sequence = [True, False, True] if start_with_cdc_enabled else [False, True, False]
start_enabled, alter_path = sequence[0], list(sequence[1:])
node, session = self.prepare(ks_name=ks_name, table_name=table_name,
cdc_enabled_table=start_enabled,
column_spec='a int PRIMARY KEY, b int')
set_cdc = _get_set_cdc_func(session=session, ks_name=ks_name, table_name=table_name)
insert_stmt = session.prepare('INSERT INTO ' + table_name + ' (a, b) VALUES (?, ?)')
# data = zip(list(range(1000)), list(range(1000)))
start = 0
stop = 1000
step = 1
data = [(n, min(n+step, stop)) for n in range(start, stop, step)]
execute_concurrent_with_args(session, insert_stmt, data)
# We need data to be in commitlogs, not sstables.
assert [] == list(node.get_sstables(ks_name, table_name))
for enable in alter_path:
set_cdc(enable)
assert_resultset_contains(session.execute('SELECT * FROM ' + table_name), data)
def test_cdc_enabled_data_readable_on_round_trip(self):
"""
Test that data is readable after an enabled->disabled->enabled round
trip.
"""
self._assert_cdc_data_readable_on_round_trip(start_with_cdc_enabled=True)
def test_cdc_disabled_data_readable_on_round_trip(self):
"""
Test that data is readable after an disabled->enabled->disabled round
trip.
"""
self._assert_cdc_data_readable_on_round_trip(start_with_cdc_enabled=False)
def test_non_cdc_segments_deleted_after_replay(self):
"""
Test that non-cdc segment files generated in previous runs are deleted
after replay.
"""
ks_name, table_name = 'ks', 'tab'
node, session = self.prepare(ks_name=ks_name, table_name=table_name,
cdc_enabled_table=True,
column_spec='a int PRIMARY KEY, b int')
old_files = _get_cdc_raw_files(node.get_path())
node.drain()
session.cluster.shutdown()
node.stop()
node.start(wait_for_binary_proto=True)
new_files = _get_cdc_raw_files(node.get_path())
assert len(old_files.intersection(new_files)) == 0
def test_insertion_and_commitlog_behavior_after_reaching_cdc_total_space(self):
"""
Test that C* behaves correctly when CDC tables have consumed all the
space available to them. In particular: after writing
cdc_total_space_in_mb MB into CDC commitlogs:
- CDC writes are rejected
- non-CDC writes are accepted
- on flush, CDC commitlogs are copied to cdc_raw
- on flush, non-CDC commitlogs are not copied to cdc_raw
This is a lot of behavior to validate in one test, but we do so to
avoid running multiple tests that each write 1MB of data to fill
cdc_total_space_in_mb.
"""
ks_name = 'ks'
full_cdc_table_info = TableInfo(
ks_name=ks_name, table_name='full_cdc_tab',
column_spec=_16_uuid_column_spec,
insert_stmt=_get_16_uuid_insert_stmt(ks_name, 'full_cdc_tab'),
options={'cdc': 'true'}
)
configuration_overrides = {
# Make CDC space as small as possible so we can fill it quickly.
'cdc_total_space_in_mb': 4,
}
node, session = self.prepare(
ks_name=ks_name,
configuration_overrides=configuration_overrides
)
session.execute(full_cdc_table_info.create_stmt)
# Later, we'll also make assertions about the behavior of non-CDC
# tables, so we create one here.
non_cdc_table_info = TableInfo(
ks_name=ks_name, table_name='non_cdc_tab',
column_spec=_16_uuid_column_spec,
insert_stmt=_get_16_uuid_insert_stmt(ks_name, 'non_cdc_tab')
)
session.execute(non_cdc_table_info.create_stmt)
# We'll also make assertions about the behavior of CDC tables when
# other CDC tables have already filled the designated space for CDC
# commitlogs, so we create the second CDC table here.
empty_cdc_table_info = TableInfo(
ks_name=ks_name, table_name='empty_cdc_tab',
column_spec=_16_uuid_column_spec,
insert_stmt=_get_16_uuid_insert_stmt(ks_name, 'empty_cdc_tab'),
options={'cdc': 'true'}
)
session.execute(empty_cdc_table_info.create_stmt)
# Here, we insert values into the first CDC table until we get a
# WriteFailure. This should happen when the CDC commitlogs take up 1MB
# or more.
logger.debug('flushing non-CDC commitlogs')
node.flush()
# Then, we insert rows into the CDC table until we can't anymore.
logger.debug('beginning data insert to fill CDC commitlogs')
rows_loaded = _write_to_cdc_write_failure(session, full_cdc_table_info.insert_stmt)
assert 0 < rows_loaded, ('No CDC rows inserted. This may happen when '
'cdc_total_space_in_mb > commitlog_segment_size_in_mb')
commitlog_dir = os.path.join(node.get_path(), 'commitlogs')
commitlogs_size = size_of_files_in_dir(commitlog_dir)
logger.debug('Commitlog dir ({d}) is {b}B'.format(d=commitlog_dir, b=commitlogs_size))
# We should get a WriteFailure when trying to write to the CDC table
# that's filled the designated CDC space...
try:
session.execute(full_cdc_table_info.insert_stmt)
raise Exception("WriteFailure expected")
except WriteFailure:
pass
# or any CDC table.
try:
session.execute(empty_cdc_table_info.insert_stmt)
raise Exception("WriteFailure expected")
except WriteFailure:
pass
# Now we test for behaviors of non-CDC tables when we've exceeded
# cdc_total_space_in_mb.
#
# First, we drain and save the names of all the new discarded CDC
# segments
node.drain()
session.cluster.shutdown()
node.stop()
node.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node)
pre_non_cdc_write_cdc_raw_segments = _get_cdc_raw_files(node.get_path())
# Snapshot the _cdc.idx file if > 4.0 for comparison at end
before_cdc_state = [] # init empty here to quiet PEP
if self.cluster.version() >= '4.0':
# Create ReplayData objects for each index file found in loading cluster
node1_path = os.path.join(node.get_path(), 'cdc_raw')
before_cdc_state = [ReplayData.load(node1_path, name)
for name in os.listdir(node1_path) if name.endswith('_cdc.idx')]
# save the names of all the commitlog segments written up to this
# point:
pre_non_cdc_write_segments = _get_commitlog_files(node.get_path())
# Check that writing to non-CDC tables succeeds even when writes to CDC
# tables are rejected:
non_cdc_prepared_insert = session.prepare(non_cdc_table_info.insert_stmt)
session.execute(non_cdc_prepared_insert, ()) # should not raise an exception
# Check the following property: any new commitlog segments written to
# after cdc_raw has reached its maximum configured size should not be
# moved to cdc_raw, on commitlog discard, because any such commitlog
# segments are written to non-CDC tables.
#
# First, write to non-cdc tables.
start, time_limit = time.time(), 600
rate_limited_debug = get_rate_limited_function(logger.debug, 5)
logger.debug('writing to non-cdc table')
# We write until we get a new commitlog segment.
while _get_commitlog_files(node.get_path()) <= pre_non_cdc_write_segments:
elapsed = time.time() - start
rate_limited_debug(' non-cdc load step has lasted {s:.2f}s'.format(s=elapsed))
assert elapsed <= time_limit, "It's been over a {s}s and we haven't written a new " \
"commitlog segment. Something is wrong.".format(s=time_limit)
execute_concurrent(
session,
((non_cdc_prepared_insert, ()) for _ in range(1000)),
concurrency=500,
raise_on_first_error=True,
)
# Finally, we check that draining doesn't move any new segments to cdc_raw:
node.drain()
session.cluster.shutdown()
if self.cluster.version() < '4.0':
assert pre_non_cdc_write_cdc_raw_segments == _get_cdc_raw_files(node.get_path())
else:
# Create ReplayData objects for each index file found in loading cluster
node2_path = os.path.join(node.get_path(), 'cdc_raw')
after_cdc_state = [ReplayData.load(node2_path, name)
for name in os.listdir(node2_path) if name.endswith('_cdc.idx')]
# Confirm all indexes in 1st are accounted for and match corresponding entry in 2nd.
found = True
for idx in before_cdc_state:
idx_found = False
for idx_two in after_cdc_state:
if compare_replay_data(idx, idx_two):
idx_found = True
if not idx_found:
found = False
break
if not found:
self._fail_and_print_sets(before_cdc_state, after_cdc_state,
'Found CDC index in before not matched in after (non-CDC write test)')
# Now we confirm we don't have anything that showed up in 2nd not accounted for in 1st
orphan_found = False
for idx_two in after_cdc_state:
index_found = False
for idx in before_cdc_state:
if compare_replay_data(idx_two, idx):
index_found = True
if not index_found:
orphan_found = True
break
if orphan_found:
self._fail_and_print_sets(before_cdc_state, after_cdc_state,
'Found orphaned index file in after CDC state not in former.')
def _fail_and_print_sets(self, rd_one, rd_two, msg):
print('Set One:')
for idx in rd_one:
print(' {},{},{},{}'.format(idx.name, idx.completed, idx.offset, idx.log_name))
print('Set Two:')
for idx_two in rd_two:
print(' {},{},{},{}'.format(idx_two.name, idx_two.completed, idx_two.offset, idx_two.log_name))
pytest.fail(msg)
def _init_new_loading_node(self, ks_name, create_stmt, use_thrift=False):
loading_node = Node(
name='node2',
cluster=self.cluster,
auto_bootstrap=False,
thrift_interface=('127.0.0.2', 9160) if use_thrift else None,
storage_interface=('127.0.0.2', 7000),
jmx_port='7400',
remote_debug_port='0',
initial_token=None,
binary_interface=('127.0.0.2', 9042)
)
logger.debug('adding node')
self.cluster.add(loading_node, is_seed=True, data_center="dc1")
logger.debug('starting new node')
loading_node.start(wait_for_binary_proto=120)
logger.debug('recreating ks and table')
loading_session = self.patient_exclusive_cql_connection(loading_node)
create_ks(loading_session, ks_name, rf=1)
logger.debug('creating new table')
loading_session.execute(create_stmt)
logger.debug('stopping new node')
loading_session.cluster.shutdown()
loading_node.stop()
return loading_node
def test_cdc_data_available_in_cdc_raw(self):
ks_name = 'ks'
# First, create a new node just for data generation.
generation_node, generation_session = self.prepare(ks_name=ks_name)
cdc_table_info = TableInfo(
ks_name=ks_name, table_name='cdc_tab',
column_spec=_16_uuid_column_spec,
insert_stmt=_get_16_uuid_insert_stmt(ks_name, 'cdc_tab'),
options={
'cdc': 'true',
# give table an explicit id so when we create it again it's the
# same table and we can replay into it
'id': uuid.uuid4()
}
)
# Write until we get a new CL segment to avoid replaying initialization
# mutations from this node's startup into system tables in the other
# node. See CASSANDRA-11811.
advance_to_next_cl_segment(
session=generation_session,
commitlog_dir=os.path.join(generation_node.get_path(), 'commitlogs')
)
generation_session.execute(cdc_table_info.create_stmt)
# insert 10000 rows
inserted_rows = _insert_rows(generation_session, cdc_table_info.name, cdc_table_info.insert_stmt,
repeat((), 10000))
# drain the node to guarantee all cl segments will be recycled
logger.debug('draining')
generation_node.drain()
logger.debug('stopping')
# stop the node and clean up all sessions attached to it
generation_session.cluster.shutdown()
generation_node.stop()
# We can rely on the existing _cdc.idx files to determine which .log files contain cdc data.
source_path = os.path.join(generation_node.get_path(), 'cdc_raw')
source_cdc_indexes = {ReplayData.load(source_path, name)
for name in source_path if name.endswith('_cdc.idx')}
# assertNotEqual(source_cdc_indexes, {})
assert source_cdc_indexes != {}
# create a new node to use for cdc_raw cl segment replay
loading_node = self._init_new_loading_node(ks_name, cdc_table_info.create_stmt, self.cluster.version() < '4')
# move cdc_raw contents to commitlog directories, then start the
# node again to trigger commitlog replay, which should replay the
# cdc_raw files we moved to commitlogs into memtables.
logger.debug('moving cdc_raw and restarting node')
_move_commitlog_segments(
os.path.join(generation_node.get_path(), 'cdc_raw'),
os.path.join(loading_node.get_path(), 'commitlogs')
)
loading_node.start(wait_for_binary_proto=120)
logger.debug('node successfully started; waiting on log replay')
loading_node.grep_log('Log replay complete')
logger.debug('log replay complete')
# final assertions
validation_session = self.patient_exclusive_cql_connection(loading_node)
data_in_cdc_table_after_restart = rows_to_list(
validation_session.execute('SELECT * FROM ' + cdc_table_info.name)
)
logger.debug('found {cdc} values in CDC table'.format(
cdc=len(data_in_cdc_table_after_restart)
))
# Then we assert that the CDC data that we expect to be there is there.
# All data that was in CDC tables should have been copied to cdc_raw,
# then used in commitlog replay, so it should be back in the cluster.
assert (inserted_rows == data_in_cdc_table_after_restart), 'not all expected data selected'
if self.cluster.version() >= '4.0':
# Create ReplayData objects for each index file found in loading cluster
loading_path = os.path.join(loading_node.get_path(), 'cdc_raw')
dest_cdc_indexes = [ReplayData.load(loading_path, name)
for name in os.listdir(loading_path) if name.endswith('_cdc.idx')]
# Compare source replay data to dest to ensure replay process created both hard links and index files.
for srd in source_cdc_indexes:
# Confirm both log and index are in dest
assert os.path.isfile(os.path.join(loading_path, srd.idx_name))
assert os.path.isfile(os.path.join(loading_path, srd.log_name))
# Find dest ReplayData that corresponds to the source (should be exactly 1)
corresponding_dest_replay_datae = [x for x in dest_cdc_indexes
if srd.idx_name == x.idx_name]
assert_length_equal(corresponding_dest_replay_datae, 1)
drd = corresponding_dest_replay_datae[0]
# We can't compare equality on offsets since replay uses the raw file length as the written
# cdc offset. We *can*, however, confirm that the offset in the replayed file is >=
# the source file, ensuring clients are signaled to replay at least all the data in the
# log.
assert drd.offset >= srd.offset
# Confirm completed flag is the same in both
assert srd.completed == drd.completed
# Confirm that the relationship between index files on the source
# and destination looks like we expect.
# First, grab the mapping between the two, make sure it's a 1-1
# mapping, and transform the dict to reflect that:
src_to_dest_idx_map = {
src_rd: [dest_rd for dest_rd in dest_cdc_indexes
if dest_rd.idx_name == src_rd.idx_name]
for src_rd in source_cdc_indexes
}
for src_rd, dest_rds in src_to_dest_idx_map.items():
assert_length_equal(dest_rds, 1)
src_to_dest_idx_map[src_rd] = dest_rds[0]
# All offsets in idx files that were copied should be >0 on the
# destination node.
assert (
0 not in {i.offset for i in src_to_dest_idx_map.values()}),\
('Found index offsets == 0 in an index file on the '
'destination node that corresponds to an index file on the '
'source node:\n'
'{}').format(pformat(src_to_dest_idx_map))
# Offsets of all shared indexes should be >= on the destination
# than on the source.
for src_rd, dest_rd in src_to_dest_idx_map.items():
assert dest_rd.offset >= src_rd.offset
src_to_dest_idx_map = {
src_rd: [dest_rd for dest_rd in dest_cdc_indexes
if dest_rd.idx_name == src_rd.idx_name]
for src_rd in source_cdc_indexes
}
for k, v in src_to_dest_idx_map.items():
assert_length_equal(v, 1)
assert k.offset >= v.offset
def compare_replay_data(rd_one, rd_two):
return rd_one.idx_name == rd_two.idx_name and \
rd_one.completed == rd_two.completed and \
rd_one.offset == rd_two.offset and \
rd_one.log_name == rd_two.log_name
class ReplayData(namedtuple('ReplayData', ['idx_name', 'completed', 'offset', 'log_name'])):
"""
Replay data class containing data from a _cdc.idx file. Build one with the load method.
"""
@classmethod
def load(cls, path, name):
assert '_cdc' in name, 'expected to find _cdc in passed in index name. Did not: ' + name
with open(os.path.join(path, name), 'r') as f:
offset, completed = [line.strip() for line in f.readlines()]
return cls(
idx_name=name,
completed=completed,
offset=int(offset),
log_name=re.sub('_cdc.idx', '.log', name)
)
|
create_group
|
Create new group in account.
:param api: api fixture
:param account: account fixture
:yields: create_group function
|
"""Account, user fixtures."""
import json
import logging
from time import monotonic, sleep
from typing import List, NamedTuple, Optional, Tuple
import pytest
from box import Box
from dynaconf import settings
from ambra_sdk.exceptions.service import DuplicateName, NotEmpty
from ambra_sdk.models import Group
from ambra_sdk.service.filtering import Filter, FilterCondition
from ambra_sdk.service.query import QueryO, QueryOPF
logger = logging.getLogger(__name__)
@pytest.fixture(scope='module')
def storage_cluster(api, request):
"""Specific storage cluster.
:param api: api
:param request: pytest request
:raises RuntimeError: Unknown cluster name
:return: cluster box
"""
cluster_name = request.param
cluster = None
if cluster_name != 'DEFAULT':
cluster = QueryOPF(
api=api,
url='/cluster/list',
request_data={},
errors_mapping={},
paginated_field='clusters',
required_sid=True,
).filter_by(Filter(
'name',
FilterCondition.equals,
cluster_name,
)).first()
if cluster is None:
raise RuntimeError(
'Unknown cluster name {name}'.format(name=cluster_name),
)
return cluster
class UserParams(NamedTuple):
"""User params."""
account: Box
user: Box
class GroupParams(NamedTuple):
"""Group params."""
uuid: str
namespace_id: str
name: str
def create_account(api, account_name: str) -> Tuple[Box, Box]:
"""Create new account.
:param api: api
:param account_name: account name
:raises RuntimeError: Cant find account
:return: user params
"""
# If account exists - raise DuplicateName error
QueryO(
api=api,
url='/account/add',
request_data={
'name': account_name,
},
errors_mapping={
'DUPLICATE_NAME': DuplicateName(),
},
required_sid=True,
).get()
account = api \
.Account \
.list() \
.filter_by(
Filter(
'name',
FilterCondition.equals,
account_name,
),
).first()
if account is None:
raise RuntimeError('Cant find test account')
# set role permissions
admin_role = api \
.Role \
.list(account_id=account.uuid) \
.filter_by(
Filter(
'name',
FilterCondition.equals,
'Administrator',
),
).first()
if admin_role is None:
raise RuntimeError('Cant find admin role')
api.Role.set(
uuid=admin_role.uuid,
permissions=json.dumps(
{
'study_delete': 1,
'study_duplicate': 1,
'study_split': 1,
'study_merge': 1,
'study_delete_image': 1,
},
),
).get()
user = api.User.get(account_id=account.uuid).get()
logger.info('Created account %s', account.name)
return (account, user)
def account_studies(api, account) -> List[Box]:
"""List of account studies.
:param api: api
:param account: account
:return: list of studies
"""
account_namespaces = [account.namespace_id]
group_namespaces = [
group.namespace_id for group in
api.Group.list(account_id=account.uuid).only(Group.namespace_id).all()
]
account_namespaces.extend(group_namespaces)
# Method study list does not support in_condition filtering for namespace !
acc_studies = []
for account_namespace in account_namespaces:
studies = api \
.Study \
.list() \
.filter_by(
Filter(
field_name='phi_namespace',
condition=FilterCondition.equals,
value=account_namespace,
),
).all()
acc_studies.extend(list(studies))
return acc_studies
def delete_account(api, account) -> Box:
"""Delete account.
:param api: api
:param account: account
:raises RuntimeError: if account have undeleted studies
"""
try:
QueryO(
api=api,
url='/account/delete/',
request_data={
'uuid': account.uuid,
},
errors_mapping={
'NOT_EMPTY': NotEmpty(),
},
required_sid=True,
).get()
except NotEmpty:
acc_studies = account_studies(api, account)
raise RuntimeError(
'Account have undeleted studies:\n{studies}'.format(
studies='\n'.join(
[
str((study.uuid, study.study_uid))
for study in acc_studies
],
),
),
)
def clear_studies(api, account):
"""Delete account studies.
:param api: api
:param account: account
"""
account_namespaces = [account.namespace_id]
group_namespaces = [
group.namespace_id for group in
api.Group.list(account_id=account.uuid).only(Group.namespace_id).all()
]
account_namespaces.extend(group_namespaces)
# Method study list does not support in_condition filtering for namespace !
# So delete studies in loop
for account_namespace in account_namespaces:
studies = api \
.Study \
.list() \
.filter_by(
Filter(
field_name='phi_namespace',
condition=FilterCondition.equals,
value=account_namespace,
),
).all()
for study in studies:
study_uid = study.uuid
logger.error('Remove undeleted study %s', study_uid)
api.Study.delete(uuid=study_uid).get()
@pytest.fixture(scope='module') # NOQA:WPS210,WPS231
def account(api, storage_cluster):
"""Get account.
:param api: ambra api
:param storage_cluster: storage cluster
:yields: test account
:raises RuntimeError: On deleted account with existing studies
:raises TimeoutError: Time for waiting account deletion is out
"""
account_name = settings.TEST_ACCOUNT_NAME
if storage_cluster:
account_name = '{account}_{cluster}'.format(
account=account_name,
cluster=storage_cluster.name,
)
try:
account, user = create_account(api, account_name)
except DuplicateName:
logger.error('Duplicated account: %s', account_name)
account = api \
.Account \
.list() \
.filter_by(
Filter(
'name',
FilterCondition.equals,
account_name,
),
).first()
if account is None:
raise RuntimeError('Account duplicated but not exists')
clear_studies(api, account)
delete_account(api, account)
account, user = create_account(api, account_name)
if storage_cluster is not None:
QueryO(
api=api,
url='/cluster/account/bind',
request_data={
'account_id': account.uuid,
'cluster_id': storage_cluster.uuid,
},
errors_mapping={},
required_sid=True,
).get()
logger.info(
'Bind account to storage cluster {name}'.format(
name=storage_cluster.name,
),
)
yield UserParams(
account=account,
user=user,
)
delete_account(api, account)
start = monotonic()
while True:
if monotonic() - start >= settings.API['account_deletion_timeout']:
raise TimeoutError('Account still exists')
account = api \
.Account \
.list() \
.filter_by(
Filter(
'name',
FilterCondition.equals,
account_name,
),
).first()
if account is None:
return
sleep(settings.API['account_deletion_check_interval'])
# MASKED: create_group function (lines 310-348)
|
@pytest.fixture
def create_group(api, account):
"""Create new group in account.
:param api: api fixture
:param account: account fixture
:yields: create_group function
"""
groups = []
group_counter = 0
def _create_group(name: Optional[str] = None):
nonlocal group_counter
group_counter += 1
if name is None:
name = 'SDK_TEST_GROUP_{gnum}'.format(gnum=group_counter)
account_id = account.account.uuid
response = api.Group.add(
account_id=account_id,
name=name,
).get()
group = GroupParams(
uuid=response.uuid,
namespace_id=response.namespace_id,
name=name,
)
groups.append(group)
# add account user to the group
api.Group.user_add(
uuid=group.uuid,
user_id=account.user.uuid,
).get()
return group
yield _create_group
for group in groups:
api.Group.delete(uuid=group.uuid).get()
| 310 | 348 |
"""Account, user fixtures."""
import json
import logging
from time import monotonic, sleep
from typing import List, NamedTuple, Optional, Tuple
import pytest
from box import Box
from dynaconf import settings
from ambra_sdk.exceptions.service import DuplicateName, NotEmpty
from ambra_sdk.models import Group
from ambra_sdk.service.filtering import Filter, FilterCondition
from ambra_sdk.service.query import QueryO, QueryOPF
logger = logging.getLogger(__name__)
@pytest.fixture(scope='module')
def storage_cluster(api, request):
"""Specific storage cluster.
:param api: api
:param request: pytest request
:raises RuntimeError: Unknown cluster name
:return: cluster box
"""
cluster_name = request.param
cluster = None
if cluster_name != 'DEFAULT':
cluster = QueryOPF(
api=api,
url='/cluster/list',
request_data={},
errors_mapping={},
paginated_field='clusters',
required_sid=True,
).filter_by(Filter(
'name',
FilterCondition.equals,
cluster_name,
)).first()
if cluster is None:
raise RuntimeError(
'Unknown cluster name {name}'.format(name=cluster_name),
)
return cluster
class UserParams(NamedTuple):
"""User params."""
account: Box
user: Box
class GroupParams(NamedTuple):
"""Group params."""
uuid: str
namespace_id: str
name: str
def create_account(api, account_name: str) -> Tuple[Box, Box]:
"""Create new account.
:param api: api
:param account_name: account name
:raises RuntimeError: Cant find account
:return: user params
"""
# If account exists - raise DuplicateName error
QueryO(
api=api,
url='/account/add',
request_data={
'name': account_name,
},
errors_mapping={
'DUPLICATE_NAME': DuplicateName(),
},
required_sid=True,
).get()
account = api \
.Account \
.list() \
.filter_by(
Filter(
'name',
FilterCondition.equals,
account_name,
),
).first()
if account is None:
raise RuntimeError('Cant find test account')
# set role permissions
admin_role = api \
.Role \
.list(account_id=account.uuid) \
.filter_by(
Filter(
'name',
FilterCondition.equals,
'Administrator',
),
).first()
if admin_role is None:
raise RuntimeError('Cant find admin role')
api.Role.set(
uuid=admin_role.uuid,
permissions=json.dumps(
{
'study_delete': 1,
'study_duplicate': 1,
'study_split': 1,
'study_merge': 1,
'study_delete_image': 1,
},
),
).get()
user = api.User.get(account_id=account.uuid).get()
logger.info('Created account %s', account.name)
return (account, user)
def account_studies(api, account) -> List[Box]:
"""List of account studies.
:param api: api
:param account: account
:return: list of studies
"""
account_namespaces = [account.namespace_id]
group_namespaces = [
group.namespace_id for group in
api.Group.list(account_id=account.uuid).only(Group.namespace_id).all()
]
account_namespaces.extend(group_namespaces)
# Method study list does not support in_condition filtering for namespace !
acc_studies = []
for account_namespace in account_namespaces:
studies = api \
.Study \
.list() \
.filter_by(
Filter(
field_name='phi_namespace',
condition=FilterCondition.equals,
value=account_namespace,
),
).all()
acc_studies.extend(list(studies))
return acc_studies
def delete_account(api, account) -> Box:
"""Delete account.
:param api: api
:param account: account
:raises RuntimeError: if account have undeleted studies
"""
try:
QueryO(
api=api,
url='/account/delete/',
request_data={
'uuid': account.uuid,
},
errors_mapping={
'NOT_EMPTY': NotEmpty(),
},
required_sid=True,
).get()
except NotEmpty:
acc_studies = account_studies(api, account)
raise RuntimeError(
'Account have undeleted studies:\n{studies}'.format(
studies='\n'.join(
[
str((study.uuid, study.study_uid))
for study in acc_studies
],
),
),
)
def clear_studies(api, account):
"""Delete account studies.
:param api: api
:param account: account
"""
account_namespaces = [account.namespace_id]
group_namespaces = [
group.namespace_id for group in
api.Group.list(account_id=account.uuid).only(Group.namespace_id).all()
]
account_namespaces.extend(group_namespaces)
# Method study list does not support in_condition filtering for namespace !
# So delete studies in loop
for account_namespace in account_namespaces:
studies = api \
.Study \
.list() \
.filter_by(
Filter(
field_name='phi_namespace',
condition=FilterCondition.equals,
value=account_namespace,
),
).all()
for study in studies:
study_uid = study.uuid
logger.error('Remove undeleted study %s', study_uid)
api.Study.delete(uuid=study_uid).get()
@pytest.fixture(scope='module') # NOQA:WPS210,WPS231
def account(api, storage_cluster):
"""Get account.
:param api: ambra api
:param storage_cluster: storage cluster
:yields: test account
:raises RuntimeError: On deleted account with existing studies
:raises TimeoutError: Time for waiting account deletion is out
"""
account_name = settings.TEST_ACCOUNT_NAME
if storage_cluster:
account_name = '{account}_{cluster}'.format(
account=account_name,
cluster=storage_cluster.name,
)
try:
account, user = create_account(api, account_name)
except DuplicateName:
logger.error('Duplicated account: %s', account_name)
account = api \
.Account \
.list() \
.filter_by(
Filter(
'name',
FilterCondition.equals,
account_name,
),
).first()
if account is None:
raise RuntimeError('Account duplicated but not exists')
clear_studies(api, account)
delete_account(api, account)
account, user = create_account(api, account_name)
if storage_cluster is not None:
QueryO(
api=api,
url='/cluster/account/bind',
request_data={
'account_id': account.uuid,
'cluster_id': storage_cluster.uuid,
},
errors_mapping={},
required_sid=True,
).get()
logger.info(
'Bind account to storage cluster {name}'.format(
name=storage_cluster.name,
),
)
yield UserParams(
account=account,
user=user,
)
delete_account(api, account)
start = monotonic()
while True:
if monotonic() - start >= settings.API['account_deletion_timeout']:
raise TimeoutError('Account still exists')
account = api \
.Account \
.list() \
.filter_by(
Filter(
'name',
FilterCondition.equals,
account_name,
),
).first()
if account is None:
return
sleep(settings.API['account_deletion_check_interval'])
@pytest.fixture
def create_group(api, account):
"""Create new group in account.
:param api: api fixture
:param account: account fixture
:yields: create_group function
"""
groups = []
group_counter = 0
def _create_group(name: Optional[str] = None):
nonlocal group_counter
group_counter += 1
if name is None:
name = 'SDK_TEST_GROUP_{gnum}'.format(gnum=group_counter)
account_id = account.account.uuid
response = api.Group.add(
account_id=account_id,
name=name,
).get()
group = GroupParams(
uuid=response.uuid,
namespace_id=response.namespace_id,
name=name,
)
groups.append(group)
# add account user to the group
api.Group.user_add(
uuid=group.uuid,
user_id=account.user.uuid,
).get()
return group
yield _create_group
for group in groups:
api.Group.delete(uuid=group.uuid).get()
|
encode
|
Encodes the CID using a given multibase. If :obj:`None` is given,
the CID's own multibase is used by default.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.encode() # default: cid.base
'zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA'
>>> cid.encode("base32")
'bafkreidon73zkcrwdb5iafqtijxildoonbwnpv7dyd6ef3qdgads2jc4su'
:param base: the multibase to be used for encoding
:type base: :obj:`None`, :obj:`str` or :class:`~multiformats.multibase.Multibase`, *optional*
:raises KeyError: see :meth:`multiformats.multibase.Multibase.encode`
|
"""
Implementation of the `CID spec <https://github.com/multiformats/cid>`_.
This module differs from other modules of :mod:`~multiformats`, in that the functionality is completely
encapsulated by a single class :class:`CID`, which is imported from top level instead
of the module itself:
>>> from multiformats import CID
"""
from typing import Any, cast, FrozenSet, Tuple, Type, TypeVar, Union
from typing_extensions import Literal, Final
from typing_validation import validate
from bases import base58btc
from multiformats import varint, multicodec, multibase, multihash
from multiformats.multicodec import Multicodec
from multiformats.multibase import Multibase
from multiformats.multihash import Multihash, _validate_raw_digest_size
from multiformats.varint import BytesLike, byteslike
_CIDSubclass = TypeVar("_CIDSubclass", bound="CID")
CIDVersion = Literal[0, 1]
CIDVersionNumbers: Final[FrozenSet[int]] = frozenset({0, 1})
def _binary_cid_from_str(cid: str) -> Tuple[bytes, Multibase]:
if len(cid) == 46 and cid.startswith("Qm"):
# CIDv0 to be decoded as base58btc
return base58btc.decode(cid), multibase.get("base58btc")
mb, b = multibase.decode_raw(cid)
if b[0] == 0x12:
# CIDv0 may not be multibase encoded (0x12 is the first byte of sha2-256 multihashes)
# CIDv18 (first byte 18=0x12) will be skipped to prevent ambiguity
raise ValueError("CIDv0 may not be multibase encoded (found multibase encoded bytes starting with 0x12).")
return b, mb
def _CID_validate_multibase(base: Union[str, Multibase]) -> Multibase:
if isinstance(base, str):
base = multibase.get(base)
else:
multibase.validate_multibase(base)
return base
def _CID_validate_multicodec(codec: Union[str, int, Multicodec]) -> Multicodec:
if isinstance(codec, str):
codec = multicodec.get(codec)
elif isinstance(codec, int):
codec = multicodec.get(code=codec)
else:
multicodec.validate_multicodec(codec)
return codec
def _CID_validate_multihash(hashfun: Union[str, int, Multihash]) -> Multihash:
if isinstance(hashfun, str):
hashfun = multihash.get(hashfun)
elif isinstance(hashfun, int):
hashfun = multihash.get(code=hashfun)
else:
pass
return hashfun
def _CID_validate_raw_digest(raw_digest: Union[str, BytesLike], hashfun: Multihash) -> bytes:
if isinstance(raw_digest, str):
raw_digest = bytes.fromhex(raw_digest)
else:
validate(raw_digest, BytesLike)
if not isinstance(raw_digest, bytes):
raw_digest = bytes(raw_digest)
_, max_digest_size = hashfun.implementation
_validate_raw_digest_size(hashfun.name, raw_digest, max_digest_size)
return raw_digest
def _CID_validate_multihash_digest(digest: Union[str, BytesLike]) -> Tuple[Multihash, bytes]:
if isinstance(digest, str):
digest = bytes.fromhex(digest)
raw_digest: BytesLike
code, raw_digest = multihash.unwrap_raw(digest)
hashfun = _CID_validate_multihash(code)
raw_digest = _CID_validate_raw_digest(raw_digest, hashfun)
return hashfun, raw_digest
def _CID_validate_version(version: int, base: Multibase, codec: Multicodec, hashfun: Multihash) -> int:
if version in (2, 3):
raise ValueError("CID versions 2 and 3 are reserved for future use.")
if version not in (0, 1):
raise ValueError(f"CID version {version} is not allowed.")
if version == 0:
if base.name != 'base58btc':
raise ValueError(f"CIDv0 multibase must be 'base58btc', found {repr(base.name)} instead.")
if codec.name != "dag-pb":
raise ValueError(f"CIDv0 multicodec must be 'dag-pb', found {repr(codec.name)} instead.")
if hashfun.name != "sha2-256":
raise ValueError(f"CIDv0 multihash must be 'sha2-256', found {repr(hashfun.name)} instead.")
return version
class CID:
"""
Container class for `Content IDentifiers <https://github.com/multiformats/cid>`_.
CIDs can be explicitly instantiated by passing multibase, CID version, multicodec and multihash digest to the constructor:
>>> cid = CID("base58btc", 1, "raw",
... "12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95")
>>> str(cid)
'zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA'
Alternatively, a pair of multihash codec and raw hash digest can be passed in lieu of the multihash digest:
>>> raw_digest = bytes.fromhex(
... "6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95")
>>> cid = CID("base58btc", 1, "raw", ("sha2-256", raw_digest))
>>> str(cid)
'zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA'
The multihash digest and raw digest values can be passed either as :obj:`bytes`-like objects or as the corresponding hex strings:
>>> isinstance(raw_digest, bytes)
True
>>> raw_digest.hex()
'6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95'
Note: the hex strings are not multibase encoded.
Calling :obj:`bytes` on an instance of this class returns its binary representation, as a :obj:`bytes` object:
>>> cid = CID("base58btc", 1, "raw",
... "12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95")
>>> raw_digest.hex()
'6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95'
>>> bytes(cid).hex()
'015512206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95'
#^^ 0x01 = CIDv1
# ^^ 0x55 = 'raw' codec
>>> bytes(cid)
:param base: default multibase to use when encoding this CID
:type base: :obj:`str` or :class:`~multiformats.multibase.Multibase`
:param version: the CID version
:type version: 0 or 1
:param codec: the content multicodec
:type codec: :obj:`str`, :obj:`int` or :class:`~multiformats.multicodec.Multicodec`
:param digest: the content multihash digest, or a pair of multihash codec and raw content digest
:type digest: see below
The ``digest`` parameter can be specified in the following ways:
- as a :obj:`str`, in which case it is treated as a hex-string and converted to :obj:`bytes` using :obj:`bytes.fromhex`
- as a :obj:`~multiformats.varint.BytesLike`, in which case it is converted to :obj:`bytes` directly
- as a pair ``(multihash_codec, raw_digest)`` of a multihash and raw hash digest, which are used to produce a multihash digest
via the :meth:`~multiformats.multihash.Multihash.wrap` metho
If ``digest`` is specified by a pair, the ``multihash_codec`` value can be specified in the following ways:
- by multihash multicodec name, as a :obj:`str`
- by multihash multicodec code, as a :obj:`int`
- as a :class:`~multiformats.multihash.Multihash` object
If ``digest`` is specified by a pair, the ``raw_digest`` value can be specified in the following ways:
- as a :obj:`str`, in which case it is treated as a hex-string and converted to :obj:`bytes` using :obj:`bytes.fromhex`
- as a :obj:`~multiformats.varint.BytesLike`, in which case it is converted to :obj:`bytes` directly
:raises ValueError: if the CID version is unsupported
:raises ValueError: if version is 0 but base is not 'base58btc' or codec is not 'dag-pb'
:raises KeyError: if the multibase, multicodec or multihash are unknown
"""
_base: Multibase
_version: CIDVersion
_codec: Multicodec
_hashfun: Multihash
_digest: bytes
__slots__ = ("__weakref__", "_base", "_version", "_codec", "_hashfun", "_digest")
def __new__(cls: Type[_CIDSubclass],
base: Union[str, Multibase],
version: int,
codec: Union[str, int, Multicodec],
digest: Union[str, BytesLike, Tuple[Union[str, int, Multihash], Union[str, BytesLike]]],
) -> _CIDSubclass:
# pylint: disable = too-many-arguments
base = _CID_validate_multibase(base)
codec = _CID_validate_multicodec(codec)
raw_digest: Union[str, bytes]
hashfun: Union[str, int, Multihash]
if isinstance(digest, (str,)+byteslike):
hashfun, raw_digest = _CID_validate_multihash_digest(digest)
else:
validate(digest, Tuple[Union[str, int, Multihash], Union[str, BytesLike]])
hashfun, raw_digest = digest
hashfun = _CID_validate_multihash(hashfun)
raw_digest = _CID_validate_raw_digest(raw_digest, hashfun)
version = _CID_validate_version(version, base, codec, hashfun)
if isinstance(digest, bytes):
return CID._new_instance(cls, base, version, codec, hashfun, digest)
return CID._new_instance(cls, base, version, codec, hashfun, (hashfun, raw_digest))
@staticmethod
def _new_instance(CID_subclass: Type[_CIDSubclass],
base: Multibase,
version: int,
codec: Multicodec,
hashfun: Multihash,
digest: Union[bytes, Tuple[Multihash, bytes]],
) -> _CIDSubclass:
# pylint: disable = too-many-arguments
instance: _CIDSubclass = super().__new__(CID_subclass)
instance._base = base
assert version in (0, 1)
instance._version = cast(Literal[0, 1], version)
instance._codec = codec
instance._hashfun = hashfun
if isinstance(digest, bytes):
instance._digest = digest
elif isinstance(digest, byteslike):
instance._digest = bytes(digest)
else:
_hashfun, raw_digest = digest
if not isinstance(raw_digest, bytes):
raw_digest = bytes(raw_digest)
assert _hashfun == hashfun, "You passed different multihashes to a _new_instance call with digest as a pair."
instance._digest = hashfun.wrap(raw_digest)
return instance
@property
def version(self) -> CIDVersion:
"""
CID version.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.version
1
"""
return self._version
@property
def base(self) -> Multibase:
"""
Multibase used to encode the CID:
- if a CIDv1 was decoded from a multibase-encoded string, the encoding multibase is used
- if a CIDv1 was decoded from a bytestring, the 'base58btc' multibase is used
- for a CIDv0, 'base58btc' is always used
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.base
Multibase(name='base58btc', code='z',
status='default', description='base58 bitcoin')
"""
return self._base
@property
def codec(self) -> Multicodec:
"""
Codec that the multihash digest refers to.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.codec
Multicodec(name='raw', tag='ipld', code='0x55',
status='permanent', description='raw binary')
"""
return self._codec
@property
def hashfun(self) -> Multihash:
"""
Multihash used to produce the multihash digest.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.hashfun
Multicodec(name='sha2-256', tag='multihash', code='0x12',
status='permanent', description='')
"""
return self._hashfun
@property
def digest(self) -> bytes:
"""
Multihash digest.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.digest.hex()
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95'
"""
return self._digest
@property
def raw_digest(self) -> bytes:
"""
Raw hash digest, decoded from the multihash digest.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.raw_digest.hex()
'6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95'
"""
return multihash.unwrap(self._digest)
@property
def human_readable(self) -> str:
"""
Human-readable representation of the CID.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.human_readable
'base58btc - cidv1 - raw - (sha2-256 : 256 : 6E6FF7950A36187A801613426E858DCE686CD7D7E3C0FC42EE0330072D245C95)'
"""
raw_digest = self.raw_digest
hashfun_str = f"({self.hashfun.name} : {len(raw_digest)*8} : {raw_digest.hex().upper()})"
return f"{self.base.name} - cidv{self.version} - {self.codec.name} - {hashfun_str}"
# MASKED: encode function (lines 346-377)
def set(self, *,
base: Union[None, str, Multibase] = None,
version: Union[None, int] = None,
codec: Union[None, str, int, Multicodec] = None
) -> "CID":
"""
Returns a new CID obtained by setting new values for one or more of:
``base``, ``version``, or ``codec``.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid
CID('base58btc', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(base="base32")
CID('base32', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(codec="dag-cbor")
CID('base58btc', 1, 'dag-cbor',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(version=0, codec="dag-pb")
CID('base58btc', 0, 'dag-pb',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid
CID('base58btc', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
# Note: 'CID.set' returns new instances,
# the original 'cid' instance is unchanged
If setting ``version`` to 0, ``base`` must be 'base58btc' and ``codec`` must be 'dag-pb'.
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid
CID('base58btc', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(version=0, codec="dag-pb")
CID('base58btc', 0, 'dag-pb',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(version=0)
ValueError: CIDv0 multicodec must be 'dag-pb', found 'raw' instead.
>>> cid.set(version=0, codec="dag-pb", base="base32")
ValueError: CIDv0 multibase must be 'base58btc', found 'base32' instead
:param base: the new CID multibase, or :obj:`None` if multibase unchanged
:type base: :obj:`None`, :obj:`str` or :class:`~multiformats.multibase.Multibase`, *optional*
:param version: the new CID version, or :obj:`None` if version unchanged
:type version: :obj:`None`, 0 or 1, *optional*
:param codec: the new content multicodec, or :obj:`None` if multicodec unchanged
:type codec: :obj:`None`, :obj:`str` or :class:`~multiformats.multicodec.Multicodec`, *optional*
:raises KeyError: if the multibase or multicodec are unknown
"""
hashfun = self.hashfun
digest = self.digest
if base is not None and base not in (self.base, self.base.name):
base = _CID_validate_multibase(base)
else:
base = self.base
if codec is not None and codec not in (self.codec, self.codec.name, self.codec.code):
codec = _CID_validate_multicodec(codec)
else:
codec = self.codec
if version is not None and version != self.version:
_CID_validate_version(version, base, codec, hashfun)
else:
version = self.version
return CID._new_instance(CID, base, version, codec, hashfun, digest)
def __bytes__(self) -> bytes:
if self.version == 0:
return self.digest
return varint.encode(self.version)+varint.encode(self.codec.code)+self.digest
def __str__(self) -> str:
return self.encode()
def __repr__(self) -> str:
mb = self.base.name
v = self.version
mc = self.codec.name
d = self.digest
return f"CID({repr(mb)}, {v}, {repr(mc)}, {repr(d.hex())})"
@property
def _as_tuple(self) -> Tuple[Type["CID"], int, Multicodec, bytes]:
return (CID, self.version, self.codec, self.digest)
def __hash__(self) -> int:
return hash(self._as_tuple)
def __eq__(self, other: Any) -> bool:
if self is other:
return True
if not isinstance(other, CID):
return NotImplemented
return self._as_tuple == other._as_tuple
@staticmethod
def decode(cid: Union[str, BytesLike]) -> "CID":
"""
Decodes a CID from a bytestring or a hex string (which will be converted to :obj:`bytes`
using :obj:`bytes.fromhex`). Note: the hex string is not multibase encoded.
Example usage for CIDv1 multibase-encoded string:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> CID.decode(s)
CID('base58btc', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
Example usage for CIDv1 bytestring (multibase always set to 'base58btc'):
>>> b = bytes.fromhex(
... "015512206e6ff7950a36187a801613426e85"
... "8dce686cd7d7e3c0fc42ee0330072d245c95")
>>> CID.decode(b)
CID('base58btc', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
Example usage for CIDv0 base58-encoded string:
>>> s = "QmbWqxBEKC3P8tqsKc98xmWNzrzDtRLMiMPL8wBuTGsMnR"
>>> CID.decode(s)
CID('base58btc', 0, 'dag-pb',
'1220c3c4733ec8affd06cf9e9ff50ffc6bcd2ec85a6170004bb709669c31de94391a')
Example usage for CIDv0 bytestring (multibase always set to 'base58btc'):
>>> b = bytes.fromhex(
... "1220c3c4733ec8affd06cf9e9ff50ffc6b"
... "cd2ec85a6170004bb709669c31de94391a")
>>> CID.decode(b)
CID('base58btc', 0, 'dag-pb',
'1220c3c4733ec8affd06cf9e9ff50ffc6bcd2ec85a6170004bb709669c31de94391a')
:param cid: the CID bytes or multibase-encoded string
:type cid: :obj:`str` or :obj:`~multiformats.varint.BytesLike`
:raises ValueError: if the CID is malformed or the CID version is unsupported
:raises KeyError: if the multibase, multicodec or multihash are unknown
"""
if isinstance(cid, str):
cid, mb = _binary_cid_from_str(cid)
else:
mb = multibase.get("base58btc")
validate(cid, BytesLike)
cid = memoryview(cid)
# if len(cid) == 34 and cid.startswith(b"\x12\x20"):
if len(cid) == 34 and cid[0] == 0x12 and cid[1] == 0x20:
v = 0 # CID version
mc_code = 0x70 # multicodec.get("dag-pb")
digest = cid # multihash digest is what's left
else:
v, _, cid = varint.decode_raw(cid) # CID version
if v == 0:
raise ValueError("CIDv0 is malformed.")
if v in (2, 3):
raise ValueError("CID versions 2 and 3 are reserved for future use.")
if v != 1:
raise ValueError(f"CIDv{v} is currently not supported.")
mc_code, _, cid = multicodec.unwrap_raw(cid) # multicodec
digest = cid # multihash digest is what's left
mc = multicodec.get(code=mc_code)
mh_code, _ = multihash.unwrap_raw(digest)
mh = multihash.get(code=mh_code)
return CID._new_instance(CID, mb, v, mc, mh, digest)
@staticmethod
def peer_id(pk_bytes: Union[str, BytesLike]) -> "CID":
"""
Wraps the raw hash of a public key into a `PeerID <https://docs.libp2p.io/concepts/peer-id/>`_, as a CIDv1.
The ``pk_bytes`` argument should be the binary public key, encoded according to the
`PeerID spec <https://github.com/libp2p/specs/blob/master/peer-ids/peer-ids.md>`_.
This can be passed as a bytestring or as a hex string (which will be converted to :obj:`bytes` using :obj:`bytes.fromhex`).
Note: the hex string is not multibase encoded.
Example usage with Ed25519 public key:
>>> pk_bytes = bytes.fromhex(
... "1498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93")
... # a 32-byte Ed25519 public key
>>> peer_id = CID.peer_id(pk_bytes)
>>> peer_id
CID('base32', 1, 'libp2p-key',
'00201498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93')
#^^ 0x00 = 'identity' multihash used (public key length <= 42)
# ^^ 0x20 = 32-bytes of raw hash digestlength
>>> str(peer_id)
'bafzaaiautc2um6td375c3soz4bu4v4dv2fx4gp65jq5qdp5nvzsdg5t5sm'
Snippet showing how to obtain the `Ed25519 <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/ed25519/>`_
public key bytestring using the `cryptography <https://github.com/pyca/cryptography>`_ library:
>>> from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey
>>> from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat
>>> private_key = Ed25519PrivateKey.generate()
>>> public_key = private_key.public_key()
>>> pk_bytes = public_key.public_bytes(
... encoding=Encoding.Raw,
... format=PublicFormat.Raw
... )
>>> pk_bytes.hex()
"1498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93"
Example usage with DER-encoded RSA public key:
>>> pk_bytes = bytes.fromhex(
... "30820122300d06092a864886f70d01010105000382010f003082010a02820101"
... "009a56a5c11e2705d0bfe0cd1fa66d5e519095cc741b62ed99ddf129c32e046e"
... "5ba3958bb8a068b05a95a6a0623cc3c889b1581793cd84a34cc2307e0dd74c70"
... "b4f230c74e5063ecd8e906d372be4eba13f47d04427a717ac78cb12b4b9c2ab5"
... "591f36f98021a70f84d782c36c51819054228ff35a45efa3f82b27849ec89036"
... "26b4a4c4b40f9f74b79caf55253687124c79cb10cd3bc73f0c44fbd341e5417d"
... "2e85e900d22849d2bc85ca6bf037f1f5b4f9759b4b6942fccdf1140b30ea7557"
... "87deb5c373c5953c14d64b523959a76a32a599903974a98cf38d4aaac7e359f8"
... "6b00a91dcf424bf794592139e7097d7e65889259227c07155770276b6eda4cec"
... "370203010001")
... # a 294-byte RSA public key
>>> peer_id = CID.peer_id(pk_bytes)
>>> peer_id
CID('base32', 1, 'libp2p-key',
'1220c1a6513ffb14f202f75453c49666a5b9d7ed9a1a068891daf824d477573f829f')
#^^ 0x12 = 'sha2-256' multihash used (public key length > 42)
# ^^ 0x20 = 32-bytes of raw hash digest length
>>> str(peer_id)
'bafzbeigbuzit76yu6ibpovctyslgnjnz27wzugqgrci5v6be2r3vop4ct4'
Snippet showing how to obtain the `RSA <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/>`_
public key bytestring using the `cryptography <https://github.com/pyca/cryptography>`_ library:
>>> from cryptography.hazmat.primitives.asymmetric import rsa
>>> from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat
>>> private_key = rsa.generate_private_key(
... public_exponent=65537,
... key_size=2048,
... )
>>> public_key = private_key.public_key()
>>> pk_bytes = public_key.public_bytes(
... encoding=Encoding.DER,
... format=PublicFormat.SubjectPublicKeyInfo
... )
>>> pk_bytes.hex()
"30820122300d06092a864886f70d01010105000382010f003082010a02820101"
"009a56a5c11e2705d0bfe0cd1fa66d5e519095cc741b62ed99ddf129c32e046e"
"5ba3958bb8a068b05a95a6a0623cc3c889b1581793cd84a34cc2307e0dd74c70"
"b4f230c74e5063ecd8e906d372be4eba13f47d04427a717ac78cb12b4b9c2ab5"
"591f36f98021a70f84d782c36c51819054228ff35a45efa3f82b27849ec89036"
"26b4a4c4b40f9f74b79caf55253687124c79cb10cd3bc73f0c44fbd341e5417d"
"2e85e900d22849d2bc85ca6bf037f1f5b4f9759b4b6942fccdf1140b30ea7557"
"87deb5c373c5953c14d64b523959a76a32a599903974a98cf38d4aaac7e359f8"
"6b00a91dcf424bf794592139e7097d7e65889259227c07155770276b6eda4cec"
"370203010001"
:param pk_bytes: the public key bytes
:type pk_bytes: :obj:`str` or :obj:`~multiformats.varint.BytesLike`
:raises ValueError: if ``pk_bytes`` is passed as a string and is not the hex-string of some bytes
"""
if isinstance(pk_bytes, str):
pk_bytes = bytes.fromhex(pk_bytes)
else:
validate(pk_bytes, BytesLike)
if len(pk_bytes) <= 42:
mh = multihash.get("identity")
digest = multihash.digest(pk_bytes, mh)
else:
mh = multihash.get("sha2-256")
digest = multihash.digest(pk_bytes, mh)
mc = multicodec.get(code=0x72) # multicodec.get("libp2p-key")
mb = multibase.get("base32")
return CID._new_instance(CID, mb, 1, mc, mh, digest)
|
def encode(self, base: Union[None, str, Multibase] = None) -> str:
"""
Encodes the CID using a given multibase. If :obj:`None` is given,
the CID's own multibase is used by default.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.encode() # default: cid.base
'zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA'
>>> cid.encode("base32")
'bafkreidon73zkcrwdb5iafqtijxildoonbwnpv7dyd6ef3qdgads2jc4su'
:param base: the multibase to be used for encoding
:type base: :obj:`None`, :obj:`str` or :class:`~multiformats.multibase.Multibase`, *optional*
:raises KeyError: see :meth:`multiformats.multibase.Multibase.encode`
"""
if self.version == 0:
if base is not None:
raise ValueError("CIDv0 cannot be multibase-encoded, please set multibase=None.")
return base58btc.encode(bytes(self))
if base is None or base == self.base:
base = self.base # use CID's own multibase as default
else:
if isinstance(base, str):
base = multibase.get(base)
else:
multibase.validate_multibase(base)
return base.encode(bytes(self))
| 346 | 377 |
"""
Implementation of the `CID spec <https://github.com/multiformats/cid>`_.
This module differs from other modules of :mod:`~multiformats`, in that the functionality is completely
encapsulated by a single class :class:`CID`, which is imported from top level instead
of the module itself:
>>> from multiformats import CID
"""
from typing import Any, cast, FrozenSet, Tuple, Type, TypeVar, Union
from typing_extensions import Literal, Final
from typing_validation import validate
from bases import base58btc
from multiformats import varint, multicodec, multibase, multihash
from multiformats.multicodec import Multicodec
from multiformats.multibase import Multibase
from multiformats.multihash import Multihash, _validate_raw_digest_size
from multiformats.varint import BytesLike, byteslike
_CIDSubclass = TypeVar("_CIDSubclass", bound="CID")
CIDVersion = Literal[0, 1]
CIDVersionNumbers: Final[FrozenSet[int]] = frozenset({0, 1})
def _binary_cid_from_str(cid: str) -> Tuple[bytes, Multibase]:
if len(cid) == 46 and cid.startswith("Qm"):
# CIDv0 to be decoded as base58btc
return base58btc.decode(cid), multibase.get("base58btc")
mb, b = multibase.decode_raw(cid)
if b[0] == 0x12:
# CIDv0 may not be multibase encoded (0x12 is the first byte of sha2-256 multihashes)
# CIDv18 (first byte 18=0x12) will be skipped to prevent ambiguity
raise ValueError("CIDv0 may not be multibase encoded (found multibase encoded bytes starting with 0x12).")
return b, mb
def _CID_validate_multibase(base: Union[str, Multibase]) -> Multibase:
if isinstance(base, str):
base = multibase.get(base)
else:
multibase.validate_multibase(base)
return base
def _CID_validate_multicodec(codec: Union[str, int, Multicodec]) -> Multicodec:
if isinstance(codec, str):
codec = multicodec.get(codec)
elif isinstance(codec, int):
codec = multicodec.get(code=codec)
else:
multicodec.validate_multicodec(codec)
return codec
def _CID_validate_multihash(hashfun: Union[str, int, Multihash]) -> Multihash:
if isinstance(hashfun, str):
hashfun = multihash.get(hashfun)
elif isinstance(hashfun, int):
hashfun = multihash.get(code=hashfun)
else:
pass
return hashfun
def _CID_validate_raw_digest(raw_digest: Union[str, BytesLike], hashfun: Multihash) -> bytes:
if isinstance(raw_digest, str):
raw_digest = bytes.fromhex(raw_digest)
else:
validate(raw_digest, BytesLike)
if not isinstance(raw_digest, bytes):
raw_digest = bytes(raw_digest)
_, max_digest_size = hashfun.implementation
_validate_raw_digest_size(hashfun.name, raw_digest, max_digest_size)
return raw_digest
def _CID_validate_multihash_digest(digest: Union[str, BytesLike]) -> Tuple[Multihash, bytes]:
if isinstance(digest, str):
digest = bytes.fromhex(digest)
raw_digest: BytesLike
code, raw_digest = multihash.unwrap_raw(digest)
hashfun = _CID_validate_multihash(code)
raw_digest = _CID_validate_raw_digest(raw_digest, hashfun)
return hashfun, raw_digest
def _CID_validate_version(version: int, base: Multibase, codec: Multicodec, hashfun: Multihash) -> int:
if version in (2, 3):
raise ValueError("CID versions 2 and 3 are reserved for future use.")
if version not in (0, 1):
raise ValueError(f"CID version {version} is not allowed.")
if version == 0:
if base.name != 'base58btc':
raise ValueError(f"CIDv0 multibase must be 'base58btc', found {repr(base.name)} instead.")
if codec.name != "dag-pb":
raise ValueError(f"CIDv0 multicodec must be 'dag-pb', found {repr(codec.name)} instead.")
if hashfun.name != "sha2-256":
raise ValueError(f"CIDv0 multihash must be 'sha2-256', found {repr(hashfun.name)} instead.")
return version
class CID:
"""
Container class for `Content IDentifiers <https://github.com/multiformats/cid>`_.
CIDs can be explicitly instantiated by passing multibase, CID version, multicodec and multihash digest to the constructor:
>>> cid = CID("base58btc", 1, "raw",
... "12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95")
>>> str(cid)
'zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA'
Alternatively, a pair of multihash codec and raw hash digest can be passed in lieu of the multihash digest:
>>> raw_digest = bytes.fromhex(
... "6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95")
>>> cid = CID("base58btc", 1, "raw", ("sha2-256", raw_digest))
>>> str(cid)
'zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA'
The multihash digest and raw digest values can be passed either as :obj:`bytes`-like objects or as the corresponding hex strings:
>>> isinstance(raw_digest, bytes)
True
>>> raw_digest.hex()
'6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95'
Note: the hex strings are not multibase encoded.
Calling :obj:`bytes` on an instance of this class returns its binary representation, as a :obj:`bytes` object:
>>> cid = CID("base58btc", 1, "raw",
... "12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95")
>>> raw_digest.hex()
'6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95'
>>> bytes(cid).hex()
'015512206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95'
#^^ 0x01 = CIDv1
# ^^ 0x55 = 'raw' codec
>>> bytes(cid)
:param base: default multibase to use when encoding this CID
:type base: :obj:`str` or :class:`~multiformats.multibase.Multibase`
:param version: the CID version
:type version: 0 or 1
:param codec: the content multicodec
:type codec: :obj:`str`, :obj:`int` or :class:`~multiformats.multicodec.Multicodec`
:param digest: the content multihash digest, or a pair of multihash codec and raw content digest
:type digest: see below
The ``digest`` parameter can be specified in the following ways:
- as a :obj:`str`, in which case it is treated as a hex-string and converted to :obj:`bytes` using :obj:`bytes.fromhex`
- as a :obj:`~multiformats.varint.BytesLike`, in which case it is converted to :obj:`bytes` directly
- as a pair ``(multihash_codec, raw_digest)`` of a multihash and raw hash digest, which are used to produce a multihash digest
via the :meth:`~multiformats.multihash.Multihash.wrap` metho
If ``digest`` is specified by a pair, the ``multihash_codec`` value can be specified in the following ways:
- by multihash multicodec name, as a :obj:`str`
- by multihash multicodec code, as a :obj:`int`
- as a :class:`~multiformats.multihash.Multihash` object
If ``digest`` is specified by a pair, the ``raw_digest`` value can be specified in the following ways:
- as a :obj:`str`, in which case it is treated as a hex-string and converted to :obj:`bytes` using :obj:`bytes.fromhex`
- as a :obj:`~multiformats.varint.BytesLike`, in which case it is converted to :obj:`bytes` directly
:raises ValueError: if the CID version is unsupported
:raises ValueError: if version is 0 but base is not 'base58btc' or codec is not 'dag-pb'
:raises KeyError: if the multibase, multicodec or multihash are unknown
"""
_base: Multibase
_version: CIDVersion
_codec: Multicodec
_hashfun: Multihash
_digest: bytes
__slots__ = ("__weakref__", "_base", "_version", "_codec", "_hashfun", "_digest")
def __new__(cls: Type[_CIDSubclass],
base: Union[str, Multibase],
version: int,
codec: Union[str, int, Multicodec],
digest: Union[str, BytesLike, Tuple[Union[str, int, Multihash], Union[str, BytesLike]]],
) -> _CIDSubclass:
# pylint: disable = too-many-arguments
base = _CID_validate_multibase(base)
codec = _CID_validate_multicodec(codec)
raw_digest: Union[str, bytes]
hashfun: Union[str, int, Multihash]
if isinstance(digest, (str,)+byteslike):
hashfun, raw_digest = _CID_validate_multihash_digest(digest)
else:
validate(digest, Tuple[Union[str, int, Multihash], Union[str, BytesLike]])
hashfun, raw_digest = digest
hashfun = _CID_validate_multihash(hashfun)
raw_digest = _CID_validate_raw_digest(raw_digest, hashfun)
version = _CID_validate_version(version, base, codec, hashfun)
if isinstance(digest, bytes):
return CID._new_instance(cls, base, version, codec, hashfun, digest)
return CID._new_instance(cls, base, version, codec, hashfun, (hashfun, raw_digest))
@staticmethod
def _new_instance(CID_subclass: Type[_CIDSubclass],
base: Multibase,
version: int,
codec: Multicodec,
hashfun: Multihash,
digest: Union[bytes, Tuple[Multihash, bytes]],
) -> _CIDSubclass:
# pylint: disable = too-many-arguments
instance: _CIDSubclass = super().__new__(CID_subclass)
instance._base = base
assert version in (0, 1)
instance._version = cast(Literal[0, 1], version)
instance._codec = codec
instance._hashfun = hashfun
if isinstance(digest, bytes):
instance._digest = digest
elif isinstance(digest, byteslike):
instance._digest = bytes(digest)
else:
_hashfun, raw_digest = digest
if not isinstance(raw_digest, bytes):
raw_digest = bytes(raw_digest)
assert _hashfun == hashfun, "You passed different multihashes to a _new_instance call with digest as a pair."
instance._digest = hashfun.wrap(raw_digest)
return instance
@property
def version(self) -> CIDVersion:
"""
CID version.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.version
1
"""
return self._version
@property
def base(self) -> Multibase:
"""
Multibase used to encode the CID:
- if a CIDv1 was decoded from a multibase-encoded string, the encoding multibase is used
- if a CIDv1 was decoded from a bytestring, the 'base58btc' multibase is used
- for a CIDv0, 'base58btc' is always used
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.base
Multibase(name='base58btc', code='z',
status='default', description='base58 bitcoin')
"""
return self._base
@property
def codec(self) -> Multicodec:
"""
Codec that the multihash digest refers to.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.codec
Multicodec(name='raw', tag='ipld', code='0x55',
status='permanent', description='raw binary')
"""
return self._codec
@property
def hashfun(self) -> Multihash:
"""
Multihash used to produce the multihash digest.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.hashfun
Multicodec(name='sha2-256', tag='multihash', code='0x12',
status='permanent', description='')
"""
return self._hashfun
@property
def digest(self) -> bytes:
"""
Multihash digest.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.digest.hex()
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95'
"""
return self._digest
@property
def raw_digest(self) -> bytes:
"""
Raw hash digest, decoded from the multihash digest.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.raw_digest.hex()
'6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95'
"""
return multihash.unwrap(self._digest)
@property
def human_readable(self) -> str:
"""
Human-readable representation of the CID.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.human_readable
'base58btc - cidv1 - raw - (sha2-256 : 256 : 6E6FF7950A36187A801613426E858DCE686CD7D7E3C0FC42EE0330072D245C95)'
"""
raw_digest = self.raw_digest
hashfun_str = f"({self.hashfun.name} : {len(raw_digest)*8} : {raw_digest.hex().upper()})"
return f"{self.base.name} - cidv{self.version} - {self.codec.name} - {hashfun_str}"
def encode(self, base: Union[None, str, Multibase] = None) -> str:
"""
Encodes the CID using a given multibase. If :obj:`None` is given,
the CID's own multibase is used by default.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.encode() # default: cid.base
'zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA'
>>> cid.encode("base32")
'bafkreidon73zkcrwdb5iafqtijxildoonbwnpv7dyd6ef3qdgads2jc4su'
:param base: the multibase to be used for encoding
:type base: :obj:`None`, :obj:`str` or :class:`~multiformats.multibase.Multibase`, *optional*
:raises KeyError: see :meth:`multiformats.multibase.Multibase.encode`
"""
if self.version == 0:
if base is not None:
raise ValueError("CIDv0 cannot be multibase-encoded, please set multibase=None.")
return base58btc.encode(bytes(self))
if base is None or base == self.base:
base = self.base # use CID's own multibase as default
else:
if isinstance(base, str):
base = multibase.get(base)
else:
multibase.validate_multibase(base)
return base.encode(bytes(self))
def set(self, *,
base: Union[None, str, Multibase] = None,
version: Union[None, int] = None,
codec: Union[None, str, int, Multicodec] = None
) -> "CID":
"""
Returns a new CID obtained by setting new values for one or more of:
``base``, ``version``, or ``codec``.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid
CID('base58btc', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(base="base32")
CID('base32', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(codec="dag-cbor")
CID('base58btc', 1, 'dag-cbor',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(version=0, codec="dag-pb")
CID('base58btc', 0, 'dag-pb',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid
CID('base58btc', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
# Note: 'CID.set' returns new instances,
# the original 'cid' instance is unchanged
If setting ``version`` to 0, ``base`` must be 'base58btc' and ``codec`` must be 'dag-pb'.
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid
CID('base58btc', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(version=0, codec="dag-pb")
CID('base58btc', 0, 'dag-pb',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(version=0)
ValueError: CIDv0 multicodec must be 'dag-pb', found 'raw' instead.
>>> cid.set(version=0, codec="dag-pb", base="base32")
ValueError: CIDv0 multibase must be 'base58btc', found 'base32' instead
:param base: the new CID multibase, or :obj:`None` if multibase unchanged
:type base: :obj:`None`, :obj:`str` or :class:`~multiformats.multibase.Multibase`, *optional*
:param version: the new CID version, or :obj:`None` if version unchanged
:type version: :obj:`None`, 0 or 1, *optional*
:param codec: the new content multicodec, or :obj:`None` if multicodec unchanged
:type codec: :obj:`None`, :obj:`str` or :class:`~multiformats.multicodec.Multicodec`, *optional*
:raises KeyError: if the multibase or multicodec are unknown
"""
hashfun = self.hashfun
digest = self.digest
if base is not None and base not in (self.base, self.base.name):
base = _CID_validate_multibase(base)
else:
base = self.base
if codec is not None and codec not in (self.codec, self.codec.name, self.codec.code):
codec = _CID_validate_multicodec(codec)
else:
codec = self.codec
if version is not None and version != self.version:
_CID_validate_version(version, base, codec, hashfun)
else:
version = self.version
return CID._new_instance(CID, base, version, codec, hashfun, digest)
def __bytes__(self) -> bytes:
if self.version == 0:
return self.digest
return varint.encode(self.version)+varint.encode(self.codec.code)+self.digest
def __str__(self) -> str:
return self.encode()
def __repr__(self) -> str:
mb = self.base.name
v = self.version
mc = self.codec.name
d = self.digest
return f"CID({repr(mb)}, {v}, {repr(mc)}, {repr(d.hex())})"
@property
def _as_tuple(self) -> Tuple[Type["CID"], int, Multicodec, bytes]:
return (CID, self.version, self.codec, self.digest)
def __hash__(self) -> int:
return hash(self._as_tuple)
def __eq__(self, other: Any) -> bool:
if self is other:
return True
if not isinstance(other, CID):
return NotImplemented
return self._as_tuple == other._as_tuple
@staticmethod
def decode(cid: Union[str, BytesLike]) -> "CID":
"""
Decodes a CID from a bytestring or a hex string (which will be converted to :obj:`bytes`
using :obj:`bytes.fromhex`). Note: the hex string is not multibase encoded.
Example usage for CIDv1 multibase-encoded string:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> CID.decode(s)
CID('base58btc', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
Example usage for CIDv1 bytestring (multibase always set to 'base58btc'):
>>> b = bytes.fromhex(
... "015512206e6ff7950a36187a801613426e85"
... "8dce686cd7d7e3c0fc42ee0330072d245c95")
>>> CID.decode(b)
CID('base58btc', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
Example usage for CIDv0 base58-encoded string:
>>> s = "QmbWqxBEKC3P8tqsKc98xmWNzrzDtRLMiMPL8wBuTGsMnR"
>>> CID.decode(s)
CID('base58btc', 0, 'dag-pb',
'1220c3c4733ec8affd06cf9e9ff50ffc6bcd2ec85a6170004bb709669c31de94391a')
Example usage for CIDv0 bytestring (multibase always set to 'base58btc'):
>>> b = bytes.fromhex(
... "1220c3c4733ec8affd06cf9e9ff50ffc6b"
... "cd2ec85a6170004bb709669c31de94391a")
>>> CID.decode(b)
CID('base58btc', 0, 'dag-pb',
'1220c3c4733ec8affd06cf9e9ff50ffc6bcd2ec85a6170004bb709669c31de94391a')
:param cid: the CID bytes or multibase-encoded string
:type cid: :obj:`str` or :obj:`~multiformats.varint.BytesLike`
:raises ValueError: if the CID is malformed or the CID version is unsupported
:raises KeyError: if the multibase, multicodec or multihash are unknown
"""
if isinstance(cid, str):
cid, mb = _binary_cid_from_str(cid)
else:
mb = multibase.get("base58btc")
validate(cid, BytesLike)
cid = memoryview(cid)
# if len(cid) == 34 and cid.startswith(b"\x12\x20"):
if len(cid) == 34 and cid[0] == 0x12 and cid[1] == 0x20:
v = 0 # CID version
mc_code = 0x70 # multicodec.get("dag-pb")
digest = cid # multihash digest is what's left
else:
v, _, cid = varint.decode_raw(cid) # CID version
if v == 0:
raise ValueError("CIDv0 is malformed.")
if v in (2, 3):
raise ValueError("CID versions 2 and 3 are reserved for future use.")
if v != 1:
raise ValueError(f"CIDv{v} is currently not supported.")
mc_code, _, cid = multicodec.unwrap_raw(cid) # multicodec
digest = cid # multihash digest is what's left
mc = multicodec.get(code=mc_code)
mh_code, _ = multihash.unwrap_raw(digest)
mh = multihash.get(code=mh_code)
return CID._new_instance(CID, mb, v, mc, mh, digest)
@staticmethod
def peer_id(pk_bytes: Union[str, BytesLike]) -> "CID":
"""
Wraps the raw hash of a public key into a `PeerID <https://docs.libp2p.io/concepts/peer-id/>`_, as a CIDv1.
The ``pk_bytes`` argument should be the binary public key, encoded according to the
`PeerID spec <https://github.com/libp2p/specs/blob/master/peer-ids/peer-ids.md>`_.
This can be passed as a bytestring or as a hex string (which will be converted to :obj:`bytes` using :obj:`bytes.fromhex`).
Note: the hex string is not multibase encoded.
Example usage with Ed25519 public key:
>>> pk_bytes = bytes.fromhex(
... "1498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93")
... # a 32-byte Ed25519 public key
>>> peer_id = CID.peer_id(pk_bytes)
>>> peer_id
CID('base32', 1, 'libp2p-key',
'00201498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93')
#^^ 0x00 = 'identity' multihash used (public key length <= 42)
# ^^ 0x20 = 32-bytes of raw hash digestlength
>>> str(peer_id)
'bafzaaiautc2um6td375c3soz4bu4v4dv2fx4gp65jq5qdp5nvzsdg5t5sm'
Snippet showing how to obtain the `Ed25519 <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/ed25519/>`_
public key bytestring using the `cryptography <https://github.com/pyca/cryptography>`_ library:
>>> from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey
>>> from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat
>>> private_key = Ed25519PrivateKey.generate()
>>> public_key = private_key.public_key()
>>> pk_bytes = public_key.public_bytes(
... encoding=Encoding.Raw,
... format=PublicFormat.Raw
... )
>>> pk_bytes.hex()
"1498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93"
Example usage with DER-encoded RSA public key:
>>> pk_bytes = bytes.fromhex(
... "30820122300d06092a864886f70d01010105000382010f003082010a02820101"
... "009a56a5c11e2705d0bfe0cd1fa66d5e519095cc741b62ed99ddf129c32e046e"
... "5ba3958bb8a068b05a95a6a0623cc3c889b1581793cd84a34cc2307e0dd74c70"
... "b4f230c74e5063ecd8e906d372be4eba13f47d04427a717ac78cb12b4b9c2ab5"
... "591f36f98021a70f84d782c36c51819054228ff35a45efa3f82b27849ec89036"
... "26b4a4c4b40f9f74b79caf55253687124c79cb10cd3bc73f0c44fbd341e5417d"
... "2e85e900d22849d2bc85ca6bf037f1f5b4f9759b4b6942fccdf1140b30ea7557"
... "87deb5c373c5953c14d64b523959a76a32a599903974a98cf38d4aaac7e359f8"
... "6b00a91dcf424bf794592139e7097d7e65889259227c07155770276b6eda4cec"
... "370203010001")
... # a 294-byte RSA public key
>>> peer_id = CID.peer_id(pk_bytes)
>>> peer_id
CID('base32', 1, 'libp2p-key',
'1220c1a6513ffb14f202f75453c49666a5b9d7ed9a1a068891daf824d477573f829f')
#^^ 0x12 = 'sha2-256' multihash used (public key length > 42)
# ^^ 0x20 = 32-bytes of raw hash digest length
>>> str(peer_id)
'bafzbeigbuzit76yu6ibpovctyslgnjnz27wzugqgrci5v6be2r3vop4ct4'
Snippet showing how to obtain the `RSA <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/>`_
public key bytestring using the `cryptography <https://github.com/pyca/cryptography>`_ library:
>>> from cryptography.hazmat.primitives.asymmetric import rsa
>>> from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat
>>> private_key = rsa.generate_private_key(
... public_exponent=65537,
... key_size=2048,
... )
>>> public_key = private_key.public_key()
>>> pk_bytes = public_key.public_bytes(
... encoding=Encoding.DER,
... format=PublicFormat.SubjectPublicKeyInfo
... )
>>> pk_bytes.hex()
"30820122300d06092a864886f70d01010105000382010f003082010a02820101"
"009a56a5c11e2705d0bfe0cd1fa66d5e519095cc741b62ed99ddf129c32e046e"
"5ba3958bb8a068b05a95a6a0623cc3c889b1581793cd84a34cc2307e0dd74c70"
"b4f230c74e5063ecd8e906d372be4eba13f47d04427a717ac78cb12b4b9c2ab5"
"591f36f98021a70f84d782c36c51819054228ff35a45efa3f82b27849ec89036"
"26b4a4c4b40f9f74b79caf55253687124c79cb10cd3bc73f0c44fbd341e5417d"
"2e85e900d22849d2bc85ca6bf037f1f5b4f9759b4b6942fccdf1140b30ea7557"
"87deb5c373c5953c14d64b523959a76a32a599903974a98cf38d4aaac7e359f8"
"6b00a91dcf424bf794592139e7097d7e65889259227c07155770276b6eda4cec"
"370203010001"
:param pk_bytes: the public key bytes
:type pk_bytes: :obj:`str` or :obj:`~multiformats.varint.BytesLike`
:raises ValueError: if ``pk_bytes`` is passed as a string and is not the hex-string of some bytes
"""
if isinstance(pk_bytes, str):
pk_bytes = bytes.fromhex(pk_bytes)
else:
validate(pk_bytes, BytesLike)
if len(pk_bytes) <= 42:
mh = multihash.get("identity")
digest = multihash.digest(pk_bytes, mh)
else:
mh = multihash.get("sha2-256")
digest = multihash.digest(pk_bytes, mh)
mc = multicodec.get(code=0x72) # multicodec.get("libp2p-key")
mb = multibase.get("base32")
return CID._new_instance(CID, mb, 1, mc, mh, digest)
|
set
|
Returns a new CID obtained by setting new values for one or more of:
``base``, ``version``, or ``codec``.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid
CID('base58btc', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(base="base32")
CID('base32', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(codec="dag-cbor")
CID('base58btc', 1, 'dag-cbor',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(version=0, codec="dag-pb")
CID('base58btc', 0, 'dag-pb',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid
CID('base58btc', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
# Note: 'CID.set' returns new instances,
# the original 'cid' instance is unchanged
If setting ``version`` to 0, ``base`` must be 'base58btc' and ``codec`` must be 'dag-pb'.
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid
CID('base58btc', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(version=0, codec="dag-pb")
CID('base58btc', 0, 'dag-pb',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(version=0)
ValueError: CIDv0 multicodec must be 'dag-pb', found 'raw' instead.
>>> cid.set(version=0, codec="dag-pb", base="base32")
ValueError: CIDv0 multibase must be 'base58btc', found 'base32' instead
:param base: the new CID multibase, or :obj:`None` if multibase unchanged
:type base: :obj:`None`, :obj:`str` or :class:`~multiformats.multibase.Multibase`, *optional*
:param version: the new CID version, or :obj:`None` if version unchanged
:type version: :obj:`None`, 0 or 1, *optional*
:param codec: the new content multicodec, or :obj:`None` if multicodec unchanged
:type codec: :obj:`None`, :obj:`str` or :class:`~multiformats.multicodec.Multicodec`, *optional*
:raises KeyError: if the multibase or multicodec are unknown
|
"""
Implementation of the `CID spec <https://github.com/multiformats/cid>`_.
This module differs from other modules of :mod:`~multiformats`, in that the functionality is completely
encapsulated by a single class :class:`CID`, which is imported from top level instead
of the module itself:
>>> from multiformats import CID
"""
from typing import Any, cast, FrozenSet, Tuple, Type, TypeVar, Union
from typing_extensions import Literal, Final
from typing_validation import validate
from bases import base58btc
from multiformats import varint, multicodec, multibase, multihash
from multiformats.multicodec import Multicodec
from multiformats.multibase import Multibase
from multiformats.multihash import Multihash, _validate_raw_digest_size
from multiformats.varint import BytesLike, byteslike
_CIDSubclass = TypeVar("_CIDSubclass", bound="CID")
CIDVersion = Literal[0, 1]
CIDVersionNumbers: Final[FrozenSet[int]] = frozenset({0, 1})
def _binary_cid_from_str(cid: str) -> Tuple[bytes, Multibase]:
if len(cid) == 46 and cid.startswith("Qm"):
# CIDv0 to be decoded as base58btc
return base58btc.decode(cid), multibase.get("base58btc")
mb, b = multibase.decode_raw(cid)
if b[0] == 0x12:
# CIDv0 may not be multibase encoded (0x12 is the first byte of sha2-256 multihashes)
# CIDv18 (first byte 18=0x12) will be skipped to prevent ambiguity
raise ValueError("CIDv0 may not be multibase encoded (found multibase encoded bytes starting with 0x12).")
return b, mb
def _CID_validate_multibase(base: Union[str, Multibase]) -> Multibase:
if isinstance(base, str):
base = multibase.get(base)
else:
multibase.validate_multibase(base)
return base
def _CID_validate_multicodec(codec: Union[str, int, Multicodec]) -> Multicodec:
if isinstance(codec, str):
codec = multicodec.get(codec)
elif isinstance(codec, int):
codec = multicodec.get(code=codec)
else:
multicodec.validate_multicodec(codec)
return codec
def _CID_validate_multihash(hashfun: Union[str, int, Multihash]) -> Multihash:
if isinstance(hashfun, str):
hashfun = multihash.get(hashfun)
elif isinstance(hashfun, int):
hashfun = multihash.get(code=hashfun)
else:
pass
return hashfun
def _CID_validate_raw_digest(raw_digest: Union[str, BytesLike], hashfun: Multihash) -> bytes:
if isinstance(raw_digest, str):
raw_digest = bytes.fromhex(raw_digest)
else:
validate(raw_digest, BytesLike)
if not isinstance(raw_digest, bytes):
raw_digest = bytes(raw_digest)
_, max_digest_size = hashfun.implementation
_validate_raw_digest_size(hashfun.name, raw_digest, max_digest_size)
return raw_digest
def _CID_validate_multihash_digest(digest: Union[str, BytesLike]) -> Tuple[Multihash, bytes]:
if isinstance(digest, str):
digest = bytes.fromhex(digest)
raw_digest: BytesLike
code, raw_digest = multihash.unwrap_raw(digest)
hashfun = _CID_validate_multihash(code)
raw_digest = _CID_validate_raw_digest(raw_digest, hashfun)
return hashfun, raw_digest
def _CID_validate_version(version: int, base: Multibase, codec: Multicodec, hashfun: Multihash) -> int:
if version in (2, 3):
raise ValueError("CID versions 2 and 3 are reserved for future use.")
if version not in (0, 1):
raise ValueError(f"CID version {version} is not allowed.")
if version == 0:
if base.name != 'base58btc':
raise ValueError(f"CIDv0 multibase must be 'base58btc', found {repr(base.name)} instead.")
if codec.name != "dag-pb":
raise ValueError(f"CIDv0 multicodec must be 'dag-pb', found {repr(codec.name)} instead.")
if hashfun.name != "sha2-256":
raise ValueError(f"CIDv0 multihash must be 'sha2-256', found {repr(hashfun.name)} instead.")
return version
class CID:
"""
Container class for `Content IDentifiers <https://github.com/multiformats/cid>`_.
CIDs can be explicitly instantiated by passing multibase, CID version, multicodec and multihash digest to the constructor:
>>> cid = CID("base58btc", 1, "raw",
... "12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95")
>>> str(cid)
'zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA'
Alternatively, a pair of multihash codec and raw hash digest can be passed in lieu of the multihash digest:
>>> raw_digest = bytes.fromhex(
... "6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95")
>>> cid = CID("base58btc", 1, "raw", ("sha2-256", raw_digest))
>>> str(cid)
'zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA'
The multihash digest and raw digest values can be passed either as :obj:`bytes`-like objects or as the corresponding hex strings:
>>> isinstance(raw_digest, bytes)
True
>>> raw_digest.hex()
'6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95'
Note: the hex strings are not multibase encoded.
Calling :obj:`bytes` on an instance of this class returns its binary representation, as a :obj:`bytes` object:
>>> cid = CID("base58btc", 1, "raw",
... "12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95")
>>> raw_digest.hex()
'6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95'
>>> bytes(cid).hex()
'015512206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95'
#^^ 0x01 = CIDv1
# ^^ 0x55 = 'raw' codec
>>> bytes(cid)
:param base: default multibase to use when encoding this CID
:type base: :obj:`str` or :class:`~multiformats.multibase.Multibase`
:param version: the CID version
:type version: 0 or 1
:param codec: the content multicodec
:type codec: :obj:`str`, :obj:`int` or :class:`~multiformats.multicodec.Multicodec`
:param digest: the content multihash digest, or a pair of multihash codec and raw content digest
:type digest: see below
The ``digest`` parameter can be specified in the following ways:
- as a :obj:`str`, in which case it is treated as a hex-string and converted to :obj:`bytes` using :obj:`bytes.fromhex`
- as a :obj:`~multiformats.varint.BytesLike`, in which case it is converted to :obj:`bytes` directly
- as a pair ``(multihash_codec, raw_digest)`` of a multihash and raw hash digest, which are used to produce a multihash digest
via the :meth:`~multiformats.multihash.Multihash.wrap` metho
If ``digest`` is specified by a pair, the ``multihash_codec`` value can be specified in the following ways:
- by multihash multicodec name, as a :obj:`str`
- by multihash multicodec code, as a :obj:`int`
- as a :class:`~multiformats.multihash.Multihash` object
If ``digest`` is specified by a pair, the ``raw_digest`` value can be specified in the following ways:
- as a :obj:`str`, in which case it is treated as a hex-string and converted to :obj:`bytes` using :obj:`bytes.fromhex`
- as a :obj:`~multiformats.varint.BytesLike`, in which case it is converted to :obj:`bytes` directly
:raises ValueError: if the CID version is unsupported
:raises ValueError: if version is 0 but base is not 'base58btc' or codec is not 'dag-pb'
:raises KeyError: if the multibase, multicodec or multihash are unknown
"""
_base: Multibase
_version: CIDVersion
_codec: Multicodec
_hashfun: Multihash
_digest: bytes
__slots__ = ("__weakref__", "_base", "_version", "_codec", "_hashfun", "_digest")
def __new__(cls: Type[_CIDSubclass],
base: Union[str, Multibase],
version: int,
codec: Union[str, int, Multicodec],
digest: Union[str, BytesLike, Tuple[Union[str, int, Multihash], Union[str, BytesLike]]],
) -> _CIDSubclass:
# pylint: disable = too-many-arguments
base = _CID_validate_multibase(base)
codec = _CID_validate_multicodec(codec)
raw_digest: Union[str, bytes]
hashfun: Union[str, int, Multihash]
if isinstance(digest, (str,)+byteslike):
hashfun, raw_digest = _CID_validate_multihash_digest(digest)
else:
validate(digest, Tuple[Union[str, int, Multihash], Union[str, BytesLike]])
hashfun, raw_digest = digest
hashfun = _CID_validate_multihash(hashfun)
raw_digest = _CID_validate_raw_digest(raw_digest, hashfun)
version = _CID_validate_version(version, base, codec, hashfun)
if isinstance(digest, bytes):
return CID._new_instance(cls, base, version, codec, hashfun, digest)
return CID._new_instance(cls, base, version, codec, hashfun, (hashfun, raw_digest))
@staticmethod
def _new_instance(CID_subclass: Type[_CIDSubclass],
base: Multibase,
version: int,
codec: Multicodec,
hashfun: Multihash,
digest: Union[bytes, Tuple[Multihash, bytes]],
) -> _CIDSubclass:
# pylint: disable = too-many-arguments
instance: _CIDSubclass = super().__new__(CID_subclass)
instance._base = base
assert version in (0, 1)
instance._version = cast(Literal[0, 1], version)
instance._codec = codec
instance._hashfun = hashfun
if isinstance(digest, bytes):
instance._digest = digest
elif isinstance(digest, byteslike):
instance._digest = bytes(digest)
else:
_hashfun, raw_digest = digest
if not isinstance(raw_digest, bytes):
raw_digest = bytes(raw_digest)
assert _hashfun == hashfun, "You passed different multihashes to a _new_instance call with digest as a pair."
instance._digest = hashfun.wrap(raw_digest)
return instance
@property
def version(self) -> CIDVersion:
"""
CID version.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.version
1
"""
return self._version
@property
def base(self) -> Multibase:
"""
Multibase used to encode the CID:
- if a CIDv1 was decoded from a multibase-encoded string, the encoding multibase is used
- if a CIDv1 was decoded from a bytestring, the 'base58btc' multibase is used
- for a CIDv0, 'base58btc' is always used
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.base
Multibase(name='base58btc', code='z',
status='default', description='base58 bitcoin')
"""
return self._base
@property
def codec(self) -> Multicodec:
"""
Codec that the multihash digest refers to.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.codec
Multicodec(name='raw', tag='ipld', code='0x55',
status='permanent', description='raw binary')
"""
return self._codec
@property
def hashfun(self) -> Multihash:
"""
Multihash used to produce the multihash digest.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.hashfun
Multicodec(name='sha2-256', tag='multihash', code='0x12',
status='permanent', description='')
"""
return self._hashfun
@property
def digest(self) -> bytes:
"""
Multihash digest.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.digest.hex()
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95'
"""
return self._digest
@property
def raw_digest(self) -> bytes:
"""
Raw hash digest, decoded from the multihash digest.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.raw_digest.hex()
'6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95'
"""
return multihash.unwrap(self._digest)
@property
def human_readable(self) -> str:
"""
Human-readable representation of the CID.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.human_readable
'base58btc - cidv1 - raw - (sha2-256 : 256 : 6E6FF7950A36187A801613426E858DCE686CD7D7E3C0FC42EE0330072D245C95)'
"""
raw_digest = self.raw_digest
hashfun_str = f"({self.hashfun.name} : {len(raw_digest)*8} : {raw_digest.hex().upper()})"
return f"{self.base.name} - cidv{self.version} - {self.codec.name} - {hashfun_str}"
def encode(self, base: Union[None, str, Multibase] = None) -> str:
"""
Encodes the CID using a given multibase. If :obj:`None` is given,
the CID's own multibase is used by default.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.encode() # default: cid.base
'zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA'
>>> cid.encode("base32")
'bafkreidon73zkcrwdb5iafqtijxildoonbwnpv7dyd6ef3qdgads2jc4su'
:param base: the multibase to be used for encoding
:type base: :obj:`None`, :obj:`str` or :class:`~multiformats.multibase.Multibase`, *optional*
:raises KeyError: see :meth:`multiformats.multibase.Multibase.encode`
"""
if self.version == 0:
if base is not None:
raise ValueError("CIDv0 cannot be multibase-encoded, please set multibase=None.")
return base58btc.encode(bytes(self))
if base is None or base == self.base:
base = self.base # use CID's own multibase as default
else:
if isinstance(base, str):
base = multibase.get(base)
else:
multibase.validate_multibase(base)
return base.encode(bytes(self))
# MASKED: set function (lines 379-449)
def __bytes__(self) -> bytes:
if self.version == 0:
return self.digest
return varint.encode(self.version)+varint.encode(self.codec.code)+self.digest
def __str__(self) -> str:
return self.encode()
def __repr__(self) -> str:
mb = self.base.name
v = self.version
mc = self.codec.name
d = self.digest
return f"CID({repr(mb)}, {v}, {repr(mc)}, {repr(d.hex())})"
@property
def _as_tuple(self) -> Tuple[Type["CID"], int, Multicodec, bytes]:
return (CID, self.version, self.codec, self.digest)
def __hash__(self) -> int:
return hash(self._as_tuple)
def __eq__(self, other: Any) -> bool:
if self is other:
return True
if not isinstance(other, CID):
return NotImplemented
return self._as_tuple == other._as_tuple
@staticmethod
def decode(cid: Union[str, BytesLike]) -> "CID":
"""
Decodes a CID from a bytestring or a hex string (which will be converted to :obj:`bytes`
using :obj:`bytes.fromhex`). Note: the hex string is not multibase encoded.
Example usage for CIDv1 multibase-encoded string:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> CID.decode(s)
CID('base58btc', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
Example usage for CIDv1 bytestring (multibase always set to 'base58btc'):
>>> b = bytes.fromhex(
... "015512206e6ff7950a36187a801613426e85"
... "8dce686cd7d7e3c0fc42ee0330072d245c95")
>>> CID.decode(b)
CID('base58btc', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
Example usage for CIDv0 base58-encoded string:
>>> s = "QmbWqxBEKC3P8tqsKc98xmWNzrzDtRLMiMPL8wBuTGsMnR"
>>> CID.decode(s)
CID('base58btc', 0, 'dag-pb',
'1220c3c4733ec8affd06cf9e9ff50ffc6bcd2ec85a6170004bb709669c31de94391a')
Example usage for CIDv0 bytestring (multibase always set to 'base58btc'):
>>> b = bytes.fromhex(
... "1220c3c4733ec8affd06cf9e9ff50ffc6b"
... "cd2ec85a6170004bb709669c31de94391a")
>>> CID.decode(b)
CID('base58btc', 0, 'dag-pb',
'1220c3c4733ec8affd06cf9e9ff50ffc6bcd2ec85a6170004bb709669c31de94391a')
:param cid: the CID bytes or multibase-encoded string
:type cid: :obj:`str` or :obj:`~multiformats.varint.BytesLike`
:raises ValueError: if the CID is malformed or the CID version is unsupported
:raises KeyError: if the multibase, multicodec or multihash are unknown
"""
if isinstance(cid, str):
cid, mb = _binary_cid_from_str(cid)
else:
mb = multibase.get("base58btc")
validate(cid, BytesLike)
cid = memoryview(cid)
# if len(cid) == 34 and cid.startswith(b"\x12\x20"):
if len(cid) == 34 and cid[0] == 0x12 and cid[1] == 0x20:
v = 0 # CID version
mc_code = 0x70 # multicodec.get("dag-pb")
digest = cid # multihash digest is what's left
else:
v, _, cid = varint.decode_raw(cid) # CID version
if v == 0:
raise ValueError("CIDv0 is malformed.")
if v in (2, 3):
raise ValueError("CID versions 2 and 3 are reserved for future use.")
if v != 1:
raise ValueError(f"CIDv{v} is currently not supported.")
mc_code, _, cid = multicodec.unwrap_raw(cid) # multicodec
digest = cid # multihash digest is what's left
mc = multicodec.get(code=mc_code)
mh_code, _ = multihash.unwrap_raw(digest)
mh = multihash.get(code=mh_code)
return CID._new_instance(CID, mb, v, mc, mh, digest)
@staticmethod
def peer_id(pk_bytes: Union[str, BytesLike]) -> "CID":
"""
Wraps the raw hash of a public key into a `PeerID <https://docs.libp2p.io/concepts/peer-id/>`_, as a CIDv1.
The ``pk_bytes`` argument should be the binary public key, encoded according to the
`PeerID spec <https://github.com/libp2p/specs/blob/master/peer-ids/peer-ids.md>`_.
This can be passed as a bytestring or as a hex string (which will be converted to :obj:`bytes` using :obj:`bytes.fromhex`).
Note: the hex string is not multibase encoded.
Example usage with Ed25519 public key:
>>> pk_bytes = bytes.fromhex(
... "1498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93")
... # a 32-byte Ed25519 public key
>>> peer_id = CID.peer_id(pk_bytes)
>>> peer_id
CID('base32', 1, 'libp2p-key',
'00201498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93')
#^^ 0x00 = 'identity' multihash used (public key length <= 42)
# ^^ 0x20 = 32-bytes of raw hash digestlength
>>> str(peer_id)
'bafzaaiautc2um6td375c3soz4bu4v4dv2fx4gp65jq5qdp5nvzsdg5t5sm'
Snippet showing how to obtain the `Ed25519 <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/ed25519/>`_
public key bytestring using the `cryptography <https://github.com/pyca/cryptography>`_ library:
>>> from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey
>>> from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat
>>> private_key = Ed25519PrivateKey.generate()
>>> public_key = private_key.public_key()
>>> pk_bytes = public_key.public_bytes(
... encoding=Encoding.Raw,
... format=PublicFormat.Raw
... )
>>> pk_bytes.hex()
"1498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93"
Example usage with DER-encoded RSA public key:
>>> pk_bytes = bytes.fromhex(
... "30820122300d06092a864886f70d01010105000382010f003082010a02820101"
... "009a56a5c11e2705d0bfe0cd1fa66d5e519095cc741b62ed99ddf129c32e046e"
... "5ba3958bb8a068b05a95a6a0623cc3c889b1581793cd84a34cc2307e0dd74c70"
... "b4f230c74e5063ecd8e906d372be4eba13f47d04427a717ac78cb12b4b9c2ab5"
... "591f36f98021a70f84d782c36c51819054228ff35a45efa3f82b27849ec89036"
... "26b4a4c4b40f9f74b79caf55253687124c79cb10cd3bc73f0c44fbd341e5417d"
... "2e85e900d22849d2bc85ca6bf037f1f5b4f9759b4b6942fccdf1140b30ea7557"
... "87deb5c373c5953c14d64b523959a76a32a599903974a98cf38d4aaac7e359f8"
... "6b00a91dcf424bf794592139e7097d7e65889259227c07155770276b6eda4cec"
... "370203010001")
... # a 294-byte RSA public key
>>> peer_id = CID.peer_id(pk_bytes)
>>> peer_id
CID('base32', 1, 'libp2p-key',
'1220c1a6513ffb14f202f75453c49666a5b9d7ed9a1a068891daf824d477573f829f')
#^^ 0x12 = 'sha2-256' multihash used (public key length > 42)
# ^^ 0x20 = 32-bytes of raw hash digest length
>>> str(peer_id)
'bafzbeigbuzit76yu6ibpovctyslgnjnz27wzugqgrci5v6be2r3vop4ct4'
Snippet showing how to obtain the `RSA <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/>`_
public key bytestring using the `cryptography <https://github.com/pyca/cryptography>`_ library:
>>> from cryptography.hazmat.primitives.asymmetric import rsa
>>> from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat
>>> private_key = rsa.generate_private_key(
... public_exponent=65537,
... key_size=2048,
... )
>>> public_key = private_key.public_key()
>>> pk_bytes = public_key.public_bytes(
... encoding=Encoding.DER,
... format=PublicFormat.SubjectPublicKeyInfo
... )
>>> pk_bytes.hex()
"30820122300d06092a864886f70d01010105000382010f003082010a02820101"
"009a56a5c11e2705d0bfe0cd1fa66d5e519095cc741b62ed99ddf129c32e046e"
"5ba3958bb8a068b05a95a6a0623cc3c889b1581793cd84a34cc2307e0dd74c70"
"b4f230c74e5063ecd8e906d372be4eba13f47d04427a717ac78cb12b4b9c2ab5"
"591f36f98021a70f84d782c36c51819054228ff35a45efa3f82b27849ec89036"
"26b4a4c4b40f9f74b79caf55253687124c79cb10cd3bc73f0c44fbd341e5417d"
"2e85e900d22849d2bc85ca6bf037f1f5b4f9759b4b6942fccdf1140b30ea7557"
"87deb5c373c5953c14d64b523959a76a32a599903974a98cf38d4aaac7e359f8"
"6b00a91dcf424bf794592139e7097d7e65889259227c07155770276b6eda4cec"
"370203010001"
:param pk_bytes: the public key bytes
:type pk_bytes: :obj:`str` or :obj:`~multiformats.varint.BytesLike`
:raises ValueError: if ``pk_bytes`` is passed as a string and is not the hex-string of some bytes
"""
if isinstance(pk_bytes, str):
pk_bytes = bytes.fromhex(pk_bytes)
else:
validate(pk_bytes, BytesLike)
if len(pk_bytes) <= 42:
mh = multihash.get("identity")
digest = multihash.digest(pk_bytes, mh)
else:
mh = multihash.get("sha2-256")
digest = multihash.digest(pk_bytes, mh)
mc = multicodec.get(code=0x72) # multicodec.get("libp2p-key")
mb = multibase.get("base32")
return CID._new_instance(CID, mb, 1, mc, mh, digest)
|
def set(self, *,
base: Union[None, str, Multibase] = None,
version: Union[None, int] = None,
codec: Union[None, str, int, Multicodec] = None
) -> "CID":
"""
Returns a new CID obtained by setting new values for one or more of:
``base``, ``version``, or ``codec``.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid
CID('base58btc', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(base="base32")
CID('base32', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(codec="dag-cbor")
CID('base58btc', 1, 'dag-cbor',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(version=0, codec="dag-pb")
CID('base58btc', 0, 'dag-pb',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid
CID('base58btc', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
# Note: 'CID.set' returns new instances,
# the original 'cid' instance is unchanged
If setting ``version`` to 0, ``base`` must be 'base58btc' and ``codec`` must be 'dag-pb'.
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid
CID('base58btc', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(version=0, codec="dag-pb")
CID('base58btc', 0, 'dag-pb',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(version=0)
ValueError: CIDv0 multicodec must be 'dag-pb', found 'raw' instead.
>>> cid.set(version=0, codec="dag-pb", base="base32")
ValueError: CIDv0 multibase must be 'base58btc', found 'base32' instead
:param base: the new CID multibase, or :obj:`None` if multibase unchanged
:type base: :obj:`None`, :obj:`str` or :class:`~multiformats.multibase.Multibase`, *optional*
:param version: the new CID version, or :obj:`None` if version unchanged
:type version: :obj:`None`, 0 or 1, *optional*
:param codec: the new content multicodec, or :obj:`None` if multicodec unchanged
:type codec: :obj:`None`, :obj:`str` or :class:`~multiformats.multicodec.Multicodec`, *optional*
:raises KeyError: if the multibase or multicodec are unknown
"""
hashfun = self.hashfun
digest = self.digest
if base is not None and base not in (self.base, self.base.name):
base = _CID_validate_multibase(base)
else:
base = self.base
if codec is not None and codec not in (self.codec, self.codec.name, self.codec.code):
codec = _CID_validate_multicodec(codec)
else:
codec = self.codec
if version is not None and version != self.version:
_CID_validate_version(version, base, codec, hashfun)
else:
version = self.version
return CID._new_instance(CID, base, version, codec, hashfun, digest)
| 379 | 449 |
"""
Implementation of the `CID spec <https://github.com/multiformats/cid>`_.
This module differs from other modules of :mod:`~multiformats`, in that the functionality is completely
encapsulated by a single class :class:`CID`, which is imported from top level instead
of the module itself:
>>> from multiformats import CID
"""
from typing import Any, cast, FrozenSet, Tuple, Type, TypeVar, Union
from typing_extensions import Literal, Final
from typing_validation import validate
from bases import base58btc
from multiformats import varint, multicodec, multibase, multihash
from multiformats.multicodec import Multicodec
from multiformats.multibase import Multibase
from multiformats.multihash import Multihash, _validate_raw_digest_size
from multiformats.varint import BytesLike, byteslike
_CIDSubclass = TypeVar("_CIDSubclass", bound="CID")
CIDVersion = Literal[0, 1]
CIDVersionNumbers: Final[FrozenSet[int]] = frozenset({0, 1})
def _binary_cid_from_str(cid: str) -> Tuple[bytes, Multibase]:
if len(cid) == 46 and cid.startswith("Qm"):
# CIDv0 to be decoded as base58btc
return base58btc.decode(cid), multibase.get("base58btc")
mb, b = multibase.decode_raw(cid)
if b[0] == 0x12:
# CIDv0 may not be multibase encoded (0x12 is the first byte of sha2-256 multihashes)
# CIDv18 (first byte 18=0x12) will be skipped to prevent ambiguity
raise ValueError("CIDv0 may not be multibase encoded (found multibase encoded bytes starting with 0x12).")
return b, mb
def _CID_validate_multibase(base: Union[str, Multibase]) -> Multibase:
if isinstance(base, str):
base = multibase.get(base)
else:
multibase.validate_multibase(base)
return base
def _CID_validate_multicodec(codec: Union[str, int, Multicodec]) -> Multicodec:
if isinstance(codec, str):
codec = multicodec.get(codec)
elif isinstance(codec, int):
codec = multicodec.get(code=codec)
else:
multicodec.validate_multicodec(codec)
return codec
def _CID_validate_multihash(hashfun: Union[str, int, Multihash]) -> Multihash:
if isinstance(hashfun, str):
hashfun = multihash.get(hashfun)
elif isinstance(hashfun, int):
hashfun = multihash.get(code=hashfun)
else:
pass
return hashfun
def _CID_validate_raw_digest(raw_digest: Union[str, BytesLike], hashfun: Multihash) -> bytes:
if isinstance(raw_digest, str):
raw_digest = bytes.fromhex(raw_digest)
else:
validate(raw_digest, BytesLike)
if not isinstance(raw_digest, bytes):
raw_digest = bytes(raw_digest)
_, max_digest_size = hashfun.implementation
_validate_raw_digest_size(hashfun.name, raw_digest, max_digest_size)
return raw_digest
def _CID_validate_multihash_digest(digest: Union[str, BytesLike]) -> Tuple[Multihash, bytes]:
if isinstance(digest, str):
digest = bytes.fromhex(digest)
raw_digest: BytesLike
code, raw_digest = multihash.unwrap_raw(digest)
hashfun = _CID_validate_multihash(code)
raw_digest = _CID_validate_raw_digest(raw_digest, hashfun)
return hashfun, raw_digest
def _CID_validate_version(version: int, base: Multibase, codec: Multicodec, hashfun: Multihash) -> int:
if version in (2, 3):
raise ValueError("CID versions 2 and 3 are reserved for future use.")
if version not in (0, 1):
raise ValueError(f"CID version {version} is not allowed.")
if version == 0:
if base.name != 'base58btc':
raise ValueError(f"CIDv0 multibase must be 'base58btc', found {repr(base.name)} instead.")
if codec.name != "dag-pb":
raise ValueError(f"CIDv0 multicodec must be 'dag-pb', found {repr(codec.name)} instead.")
if hashfun.name != "sha2-256":
raise ValueError(f"CIDv0 multihash must be 'sha2-256', found {repr(hashfun.name)} instead.")
return version
class CID:
"""
Container class for `Content IDentifiers <https://github.com/multiformats/cid>`_.
CIDs can be explicitly instantiated by passing multibase, CID version, multicodec and multihash digest to the constructor:
>>> cid = CID("base58btc", 1, "raw",
... "12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95")
>>> str(cid)
'zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA'
Alternatively, a pair of multihash codec and raw hash digest can be passed in lieu of the multihash digest:
>>> raw_digest = bytes.fromhex(
... "6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95")
>>> cid = CID("base58btc", 1, "raw", ("sha2-256", raw_digest))
>>> str(cid)
'zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA'
The multihash digest and raw digest values can be passed either as :obj:`bytes`-like objects or as the corresponding hex strings:
>>> isinstance(raw_digest, bytes)
True
>>> raw_digest.hex()
'6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95'
Note: the hex strings are not multibase encoded.
Calling :obj:`bytes` on an instance of this class returns its binary representation, as a :obj:`bytes` object:
>>> cid = CID("base58btc", 1, "raw",
... "12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95")
>>> raw_digest.hex()
'6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95'
>>> bytes(cid).hex()
'015512206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95'
#^^ 0x01 = CIDv1
# ^^ 0x55 = 'raw' codec
>>> bytes(cid)
:param base: default multibase to use when encoding this CID
:type base: :obj:`str` or :class:`~multiformats.multibase.Multibase`
:param version: the CID version
:type version: 0 or 1
:param codec: the content multicodec
:type codec: :obj:`str`, :obj:`int` or :class:`~multiformats.multicodec.Multicodec`
:param digest: the content multihash digest, or a pair of multihash codec and raw content digest
:type digest: see below
The ``digest`` parameter can be specified in the following ways:
- as a :obj:`str`, in which case it is treated as a hex-string and converted to :obj:`bytes` using :obj:`bytes.fromhex`
- as a :obj:`~multiformats.varint.BytesLike`, in which case it is converted to :obj:`bytes` directly
- as a pair ``(multihash_codec, raw_digest)`` of a multihash and raw hash digest, which are used to produce a multihash digest
via the :meth:`~multiformats.multihash.Multihash.wrap` metho
If ``digest`` is specified by a pair, the ``multihash_codec`` value can be specified in the following ways:
- by multihash multicodec name, as a :obj:`str`
- by multihash multicodec code, as a :obj:`int`
- as a :class:`~multiformats.multihash.Multihash` object
If ``digest`` is specified by a pair, the ``raw_digest`` value can be specified in the following ways:
- as a :obj:`str`, in which case it is treated as a hex-string and converted to :obj:`bytes` using :obj:`bytes.fromhex`
- as a :obj:`~multiformats.varint.BytesLike`, in which case it is converted to :obj:`bytes` directly
:raises ValueError: if the CID version is unsupported
:raises ValueError: if version is 0 but base is not 'base58btc' or codec is not 'dag-pb'
:raises KeyError: if the multibase, multicodec or multihash are unknown
"""
_base: Multibase
_version: CIDVersion
_codec: Multicodec
_hashfun: Multihash
_digest: bytes
__slots__ = ("__weakref__", "_base", "_version", "_codec", "_hashfun", "_digest")
def __new__(cls: Type[_CIDSubclass],
base: Union[str, Multibase],
version: int,
codec: Union[str, int, Multicodec],
digest: Union[str, BytesLike, Tuple[Union[str, int, Multihash], Union[str, BytesLike]]],
) -> _CIDSubclass:
# pylint: disable = too-many-arguments
base = _CID_validate_multibase(base)
codec = _CID_validate_multicodec(codec)
raw_digest: Union[str, bytes]
hashfun: Union[str, int, Multihash]
if isinstance(digest, (str,)+byteslike):
hashfun, raw_digest = _CID_validate_multihash_digest(digest)
else:
validate(digest, Tuple[Union[str, int, Multihash], Union[str, BytesLike]])
hashfun, raw_digest = digest
hashfun = _CID_validate_multihash(hashfun)
raw_digest = _CID_validate_raw_digest(raw_digest, hashfun)
version = _CID_validate_version(version, base, codec, hashfun)
if isinstance(digest, bytes):
return CID._new_instance(cls, base, version, codec, hashfun, digest)
return CID._new_instance(cls, base, version, codec, hashfun, (hashfun, raw_digest))
@staticmethod
def _new_instance(CID_subclass: Type[_CIDSubclass],
base: Multibase,
version: int,
codec: Multicodec,
hashfun: Multihash,
digest: Union[bytes, Tuple[Multihash, bytes]],
) -> _CIDSubclass:
# pylint: disable = too-many-arguments
instance: _CIDSubclass = super().__new__(CID_subclass)
instance._base = base
assert version in (0, 1)
instance._version = cast(Literal[0, 1], version)
instance._codec = codec
instance._hashfun = hashfun
if isinstance(digest, bytes):
instance._digest = digest
elif isinstance(digest, byteslike):
instance._digest = bytes(digest)
else:
_hashfun, raw_digest = digest
if not isinstance(raw_digest, bytes):
raw_digest = bytes(raw_digest)
assert _hashfun == hashfun, "You passed different multihashes to a _new_instance call with digest as a pair."
instance._digest = hashfun.wrap(raw_digest)
return instance
@property
def version(self) -> CIDVersion:
"""
CID version.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.version
1
"""
return self._version
@property
def base(self) -> Multibase:
"""
Multibase used to encode the CID:
- if a CIDv1 was decoded from a multibase-encoded string, the encoding multibase is used
- if a CIDv1 was decoded from a bytestring, the 'base58btc' multibase is used
- for a CIDv0, 'base58btc' is always used
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.base
Multibase(name='base58btc', code='z',
status='default', description='base58 bitcoin')
"""
return self._base
@property
def codec(self) -> Multicodec:
"""
Codec that the multihash digest refers to.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.codec
Multicodec(name='raw', tag='ipld', code='0x55',
status='permanent', description='raw binary')
"""
return self._codec
@property
def hashfun(self) -> Multihash:
"""
Multihash used to produce the multihash digest.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.hashfun
Multicodec(name='sha2-256', tag='multihash', code='0x12',
status='permanent', description='')
"""
return self._hashfun
@property
def digest(self) -> bytes:
"""
Multihash digest.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.digest.hex()
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95'
"""
return self._digest
@property
def raw_digest(self) -> bytes:
"""
Raw hash digest, decoded from the multihash digest.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.raw_digest.hex()
'6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95'
"""
return multihash.unwrap(self._digest)
@property
def human_readable(self) -> str:
"""
Human-readable representation of the CID.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.human_readable
'base58btc - cidv1 - raw - (sha2-256 : 256 : 6E6FF7950A36187A801613426E858DCE686CD7D7E3C0FC42EE0330072D245C95)'
"""
raw_digest = self.raw_digest
hashfun_str = f"({self.hashfun.name} : {len(raw_digest)*8} : {raw_digest.hex().upper()})"
return f"{self.base.name} - cidv{self.version} - {self.codec.name} - {hashfun_str}"
def encode(self, base: Union[None, str, Multibase] = None) -> str:
"""
Encodes the CID using a given multibase. If :obj:`None` is given,
the CID's own multibase is used by default.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid.encode() # default: cid.base
'zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA'
>>> cid.encode("base32")
'bafkreidon73zkcrwdb5iafqtijxildoonbwnpv7dyd6ef3qdgads2jc4su'
:param base: the multibase to be used for encoding
:type base: :obj:`None`, :obj:`str` or :class:`~multiformats.multibase.Multibase`, *optional*
:raises KeyError: see :meth:`multiformats.multibase.Multibase.encode`
"""
if self.version == 0:
if base is not None:
raise ValueError("CIDv0 cannot be multibase-encoded, please set multibase=None.")
return base58btc.encode(bytes(self))
if base is None or base == self.base:
base = self.base # use CID's own multibase as default
else:
if isinstance(base, str):
base = multibase.get(base)
else:
multibase.validate_multibase(base)
return base.encode(bytes(self))
def set(self, *,
base: Union[None, str, Multibase] = None,
version: Union[None, int] = None,
codec: Union[None, str, int, Multicodec] = None
) -> "CID":
"""
Returns a new CID obtained by setting new values for one or more of:
``base``, ``version``, or ``codec``.
Example usage:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid
CID('base58btc', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(base="base32")
CID('base32', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(codec="dag-cbor")
CID('base58btc', 1, 'dag-cbor',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(version=0, codec="dag-pb")
CID('base58btc', 0, 'dag-pb',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid
CID('base58btc', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
# Note: 'CID.set' returns new instances,
# the original 'cid' instance is unchanged
If setting ``version`` to 0, ``base`` must be 'base58btc' and ``codec`` must be 'dag-pb'.
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> cid = CID.decode(s)
>>> cid
CID('base58btc', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(version=0, codec="dag-pb")
CID('base58btc', 0, 'dag-pb',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
>>> cid.set(version=0)
ValueError: CIDv0 multicodec must be 'dag-pb', found 'raw' instead.
>>> cid.set(version=0, codec="dag-pb", base="base32")
ValueError: CIDv0 multibase must be 'base58btc', found 'base32' instead
:param base: the new CID multibase, or :obj:`None` if multibase unchanged
:type base: :obj:`None`, :obj:`str` or :class:`~multiformats.multibase.Multibase`, *optional*
:param version: the new CID version, or :obj:`None` if version unchanged
:type version: :obj:`None`, 0 or 1, *optional*
:param codec: the new content multicodec, or :obj:`None` if multicodec unchanged
:type codec: :obj:`None`, :obj:`str` or :class:`~multiformats.multicodec.Multicodec`, *optional*
:raises KeyError: if the multibase or multicodec are unknown
"""
hashfun = self.hashfun
digest = self.digest
if base is not None and base not in (self.base, self.base.name):
base = _CID_validate_multibase(base)
else:
base = self.base
if codec is not None and codec not in (self.codec, self.codec.name, self.codec.code):
codec = _CID_validate_multicodec(codec)
else:
codec = self.codec
if version is not None and version != self.version:
_CID_validate_version(version, base, codec, hashfun)
else:
version = self.version
return CID._new_instance(CID, base, version, codec, hashfun, digest)
def __bytes__(self) -> bytes:
if self.version == 0:
return self.digest
return varint.encode(self.version)+varint.encode(self.codec.code)+self.digest
def __str__(self) -> str:
return self.encode()
def __repr__(self) -> str:
mb = self.base.name
v = self.version
mc = self.codec.name
d = self.digest
return f"CID({repr(mb)}, {v}, {repr(mc)}, {repr(d.hex())})"
@property
def _as_tuple(self) -> Tuple[Type["CID"], int, Multicodec, bytes]:
return (CID, self.version, self.codec, self.digest)
def __hash__(self) -> int:
return hash(self._as_tuple)
def __eq__(self, other: Any) -> bool:
if self is other:
return True
if not isinstance(other, CID):
return NotImplemented
return self._as_tuple == other._as_tuple
@staticmethod
def decode(cid: Union[str, BytesLike]) -> "CID":
"""
Decodes a CID from a bytestring or a hex string (which will be converted to :obj:`bytes`
using :obj:`bytes.fromhex`). Note: the hex string is not multibase encoded.
Example usage for CIDv1 multibase-encoded string:
>>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"
>>> CID.decode(s)
CID('base58btc', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
Example usage for CIDv1 bytestring (multibase always set to 'base58btc'):
>>> b = bytes.fromhex(
... "015512206e6ff7950a36187a801613426e85"
... "8dce686cd7d7e3c0fc42ee0330072d245c95")
>>> CID.decode(b)
CID('base58btc', 1, 'raw',
'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95')
Example usage for CIDv0 base58-encoded string:
>>> s = "QmbWqxBEKC3P8tqsKc98xmWNzrzDtRLMiMPL8wBuTGsMnR"
>>> CID.decode(s)
CID('base58btc', 0, 'dag-pb',
'1220c3c4733ec8affd06cf9e9ff50ffc6bcd2ec85a6170004bb709669c31de94391a')
Example usage for CIDv0 bytestring (multibase always set to 'base58btc'):
>>> b = bytes.fromhex(
... "1220c3c4733ec8affd06cf9e9ff50ffc6b"
... "cd2ec85a6170004bb709669c31de94391a")
>>> CID.decode(b)
CID('base58btc', 0, 'dag-pb',
'1220c3c4733ec8affd06cf9e9ff50ffc6bcd2ec85a6170004bb709669c31de94391a')
:param cid: the CID bytes or multibase-encoded string
:type cid: :obj:`str` or :obj:`~multiformats.varint.BytesLike`
:raises ValueError: if the CID is malformed or the CID version is unsupported
:raises KeyError: if the multibase, multicodec or multihash are unknown
"""
if isinstance(cid, str):
cid, mb = _binary_cid_from_str(cid)
else:
mb = multibase.get("base58btc")
validate(cid, BytesLike)
cid = memoryview(cid)
# if len(cid) == 34 and cid.startswith(b"\x12\x20"):
if len(cid) == 34 and cid[0] == 0x12 and cid[1] == 0x20:
v = 0 # CID version
mc_code = 0x70 # multicodec.get("dag-pb")
digest = cid # multihash digest is what's left
else:
v, _, cid = varint.decode_raw(cid) # CID version
if v == 0:
raise ValueError("CIDv0 is malformed.")
if v in (2, 3):
raise ValueError("CID versions 2 and 3 are reserved for future use.")
if v != 1:
raise ValueError(f"CIDv{v} is currently not supported.")
mc_code, _, cid = multicodec.unwrap_raw(cid) # multicodec
digest = cid # multihash digest is what's left
mc = multicodec.get(code=mc_code)
mh_code, _ = multihash.unwrap_raw(digest)
mh = multihash.get(code=mh_code)
return CID._new_instance(CID, mb, v, mc, mh, digest)
@staticmethod
def peer_id(pk_bytes: Union[str, BytesLike]) -> "CID":
"""
Wraps the raw hash of a public key into a `PeerID <https://docs.libp2p.io/concepts/peer-id/>`_, as a CIDv1.
The ``pk_bytes`` argument should be the binary public key, encoded according to the
`PeerID spec <https://github.com/libp2p/specs/blob/master/peer-ids/peer-ids.md>`_.
This can be passed as a bytestring or as a hex string (which will be converted to :obj:`bytes` using :obj:`bytes.fromhex`).
Note: the hex string is not multibase encoded.
Example usage with Ed25519 public key:
>>> pk_bytes = bytes.fromhex(
... "1498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93")
... # a 32-byte Ed25519 public key
>>> peer_id = CID.peer_id(pk_bytes)
>>> peer_id
CID('base32', 1, 'libp2p-key',
'00201498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93')
#^^ 0x00 = 'identity' multihash used (public key length <= 42)
# ^^ 0x20 = 32-bytes of raw hash digestlength
>>> str(peer_id)
'bafzaaiautc2um6td375c3soz4bu4v4dv2fx4gp65jq5qdp5nvzsdg5t5sm'
Snippet showing how to obtain the `Ed25519 <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/ed25519/>`_
public key bytestring using the `cryptography <https://github.com/pyca/cryptography>`_ library:
>>> from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey
>>> from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat
>>> private_key = Ed25519PrivateKey.generate()
>>> public_key = private_key.public_key()
>>> pk_bytes = public_key.public_bytes(
... encoding=Encoding.Raw,
... format=PublicFormat.Raw
... )
>>> pk_bytes.hex()
"1498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93"
Example usage with DER-encoded RSA public key:
>>> pk_bytes = bytes.fromhex(
... "30820122300d06092a864886f70d01010105000382010f003082010a02820101"
... "009a56a5c11e2705d0bfe0cd1fa66d5e519095cc741b62ed99ddf129c32e046e"
... "5ba3958bb8a068b05a95a6a0623cc3c889b1581793cd84a34cc2307e0dd74c70"
... "b4f230c74e5063ecd8e906d372be4eba13f47d04427a717ac78cb12b4b9c2ab5"
... "591f36f98021a70f84d782c36c51819054228ff35a45efa3f82b27849ec89036"
... "26b4a4c4b40f9f74b79caf55253687124c79cb10cd3bc73f0c44fbd341e5417d"
... "2e85e900d22849d2bc85ca6bf037f1f5b4f9759b4b6942fccdf1140b30ea7557"
... "87deb5c373c5953c14d64b523959a76a32a599903974a98cf38d4aaac7e359f8"
... "6b00a91dcf424bf794592139e7097d7e65889259227c07155770276b6eda4cec"
... "370203010001")
... # a 294-byte RSA public key
>>> peer_id = CID.peer_id(pk_bytes)
>>> peer_id
CID('base32', 1, 'libp2p-key',
'1220c1a6513ffb14f202f75453c49666a5b9d7ed9a1a068891daf824d477573f829f')
#^^ 0x12 = 'sha2-256' multihash used (public key length > 42)
# ^^ 0x20 = 32-bytes of raw hash digest length
>>> str(peer_id)
'bafzbeigbuzit76yu6ibpovctyslgnjnz27wzugqgrci5v6be2r3vop4ct4'
Snippet showing how to obtain the `RSA <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/>`_
public key bytestring using the `cryptography <https://github.com/pyca/cryptography>`_ library:
>>> from cryptography.hazmat.primitives.asymmetric import rsa
>>> from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat
>>> private_key = rsa.generate_private_key(
... public_exponent=65537,
... key_size=2048,
... )
>>> public_key = private_key.public_key()
>>> pk_bytes = public_key.public_bytes(
... encoding=Encoding.DER,
... format=PublicFormat.SubjectPublicKeyInfo
... )
>>> pk_bytes.hex()
"30820122300d06092a864886f70d01010105000382010f003082010a02820101"
"009a56a5c11e2705d0bfe0cd1fa66d5e519095cc741b62ed99ddf129c32e046e"
"5ba3958bb8a068b05a95a6a0623cc3c889b1581793cd84a34cc2307e0dd74c70"
"b4f230c74e5063ecd8e906d372be4eba13f47d04427a717ac78cb12b4b9c2ab5"
"591f36f98021a70f84d782c36c51819054228ff35a45efa3f82b27849ec89036"
"26b4a4c4b40f9f74b79caf55253687124c79cb10cd3bc73f0c44fbd341e5417d"
"2e85e900d22849d2bc85ca6bf037f1f5b4f9759b4b6942fccdf1140b30ea7557"
"87deb5c373c5953c14d64b523959a76a32a599903974a98cf38d4aaac7e359f8"
"6b00a91dcf424bf794592139e7097d7e65889259227c07155770276b6eda4cec"
"370203010001"
:param pk_bytes: the public key bytes
:type pk_bytes: :obj:`str` or :obj:`~multiformats.varint.BytesLike`
:raises ValueError: if ``pk_bytes`` is passed as a string and is not the hex-string of some bytes
"""
if isinstance(pk_bytes, str):
pk_bytes = bytes.fromhex(pk_bytes)
else:
validate(pk_bytes, BytesLike)
if len(pk_bytes) <= 42:
mh = multihash.get("identity")
digest = multihash.digest(pk_bytes, mh)
else:
mh = multihash.get("sha2-256")
digest = multihash.digest(pk_bytes, mh)
mc = multicodec.get(code=0x72) # multicodec.get("libp2p-key")
mb = multibase.get("base32")
return CID._new_instance(CID, mb, 1, mc, mh, digest)
|
onerror
|
Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage : ``shutil.rmtree(path, onerror=onerror)``
|
import os
import shutil
from thlib.side.Qt import QtWidgets as QtGui
from thlib.side.Qt import QtGui as Qt4Gui
from thlib.side.Qt import QtCore
from thlib.environment import env_inst, env_tactic, cfg_controls, env_read_config, env_write_config, dl
import thlib.global_functions as gf
import thlib.tactic_classes as tc
from thlib.ui.misc.ui_watch_folders import Ui_ProjectWatchFolder
class Ui_projectWatchFoldersWidget(QtGui.QDialog, Ui_ProjectWatchFolder):
def __init__(self, project, parent=None):
super(self.__class__, self).__init__(parent=parent)
self.project = project
self.watch_folders_dict = self.get_watch_folders_dict()
self.watched_items = set()
env_inst.watch_folders[self.project.get_code()] = self
self.setupUi(self)
self.create_ui()
def create_ui(self):
self.watchFoldersTreeWidget.setStyleSheet('QTreeView::item {padding: 2px;}')
self.setSizeGripEnabled(True)
self.setWindowTitle('Watched Assets for Project: {0}'.format(self.project.info.get('title')))
self.create_fs_watcher()
self.create_watch_folders_tree_context_menu()
self.controls_actions()
self.readSettings()
self.watchEnabledCheckBox.setEnabled(False)
def create_fs_watcher(self):
self.fs_watcher = gf.FSObserver()
self.fs_watcher.set_created_signal(self.handle_watch_created_event)
def create_watch_folders_tree_context_menu(self):
self.watchFoldersTreeWidget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.watchFoldersTreeWidget.customContextMenuRequested.connect(self.open_menu)
def watch_items_menu(self):
# TODO Make this work
enable_watch = QtGui.QAction('Enable Watch', self.watchFoldersTreeWidget)
enable_watch.setIcon(gf.get_icon('eye'))
# enable_watch.triggered.connect(self.open_file_from_tree)
disable_watch = QtGui.QAction('Disable Watch', self.watchFoldersTreeWidget)
disable_watch.setIcon(gf.get_icon('eye-slash'))
# disable_watch.triggered.connect(self.open_file_from_tree)
edit_watch = QtGui.QAction('Edit Watch', self.watchFoldersTreeWidget)
edit_watch.setIcon(gf.get_icon('edit'))
# edit_watch.triggered.connect(self.open_file_from_tree)
delete_watch = QtGui.QAction('Delete Watch', self.watchFoldersTreeWidget)
delete_watch.setIcon(gf.get_icon('remove'))
# edit_watch.triggered.connect(self.open_file_from_tree)
menu = QtGui.QMenu()
menu.addAction(enable_watch)
menu.addAction(disable_watch)
menu.addAction(edit_watch)
menu.addAction(delete_watch)
return menu
def open_menu(self):
item = self.watchFoldersTreeWidget.currentItem()
if item:
if item.data(0, QtCore.Qt.UserRole):
menu = self.watch_items_menu()
if menu:
menu.exec_(Qt4Gui.QCursor.pos())
def add_item_to_fs_watch(self, skey, path=None, recursive=True):
watch_dict = self.get_watch_dict_by_skey(skey)
if not path:
path = watch_dict['path']
paths = []
for repo in watch_dict['rep']:
abs_path = env_tactic.get_base_dir(repo)['value'][0] + '/' + path
paths.append(gf.form_path(abs_path))
self.fs_watcher.append_watch(watch_name=skey, paths=paths, repos=watch_dict['rep'], pipeline=watch_dict['asset_pipeline'], recursive=recursive)
def remove_item_from_fs_watch(self, skey):
self.fs_watcher.remove_watch(watch_name=skey)
def handle_watch_created_event(self, event, watch):
dl.log(u'File dropped to watch folder {}'.format(event.src_path), group_id='watch_folder')
self.show()
self.setWindowState(self.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
QtGui.QDialog.activateWindow(self)
self.show()
self.hide()
search_key = watch.watch_name
pipeline = watch.pipeline
commit_path = gf.extract_dirname(event.src_path)
if watch.path == commit_path:
context = 'publish'
else:
context = gf.form_path(commit_path, 'linux').split('/')[-1]
description = 'From watch folder'
skey_dict = tc.split_search_key(search_key)
checkin_widget = env_inst.get_check_tree(
project_code=skey_dict['project_code'],
tab_code='checkin_out',
wdg_code=skey_dict['pipeline_code'],
)
checkin_widget.do_creating_ui()
match_template = gf.MatchTemplate(['$FILENAME.$EXT'])
files_objects_dict = match_template.get_files_objects([event.src_path])
stypes = self.project.get_stypes()
current_stype = stypes.get(skey_dict['pipeline_code'])
pipelines = current_stype.get_pipeline()
checkin_mode = None
if pipelines:
# here we do pipelines routine
current_pipeline = pipelines.get(pipeline)
if not current_pipeline:
# looks like we don't have pipeline with Search Type name, so we take first of all
# Also this is totally wrong, cause we should know exactly the pipeline and its processes, so need to write proper pipeline_code when creating watch folder
current_pipeline = list(pipelines.values())[0]
current_process = current_pipeline.get_pipeline_process(context)
if current_process:
checkin_mode = current_process.get('checkin_mode')
else:
context = 'publish'
checkin_widget.checkin_file_objects(
search_key=search_key,
context=context,
description=description,
files_objects=files_objects_dict.get('file'),
checkin_type=checkin_mode,
keep_file_name=False
)
else:
# here we go with publish, without pipeline
checkin_widget.checkin_file_objects(
search_key=search_key,
context='publish',
description=description,
files_objects=files_objects_dict.get('file'),
checkin_type=checkin_mode,
keep_file_name=False
)
def controls_actions(self):
pass
def fill_watch_folders_tree_widget(self):
self.watchFoldersTreeWidget.clear()
if self.watch_folders_dict:
for i, asset_skey in enumerate(self.watch_folders_dict.get('assets_skeys')):
root_item = QtGui.QTreeWidgetItem()
root_item.setData(0, QtCore.Qt.UserRole, asset_skey)
root_item.setText(1, self.watch_folders_dict['assets_stypes'][i])
root_item.setText(2, self.watch_folders_dict['assets_names'][i])
repos_names = []
for repo in self.watch_folders_dict['repos'][i]:
repos_names.append(env_tactic.get_base_dir(repo)['value'][1])
root_item.setText(3, ', '.join(repos_names))
# setting actual watch status
if self.watch_folders_dict['statuses'][i]:
if self.check_for_item_in_watch(asset_skey):
root_item.setText(0, 'Watching')
self.start_watch_by_skey(asset_skey)
else:
root_item.setText(0, 'Waiting')
else:
root_item.setText(0, 'Stopped')
self.stop_watch_by_skey(asset_skey)
self.watchFoldersTreeWidget.addTopLevelItem(root_item)
self.watchFoldersTreeWidget.resizeColumnToContents(0)
self.watchFoldersTreeWidget.resizeColumnToContents(1)
self.watchFoldersTreeWidget.resizeColumnToContents(2)
self.watchFoldersTreeWidget.resizeColumnToContents(3)
if self.watched_items:
self.start_watching()
else:
self.stop_watching()
def start_watching(self):
if not self.fs_watcher.is_started():
self.fs_watcher.start()
def stop_watching(self):
if self.fs_watcher.is_started():
self.fs_watcher.stop()
def stop_watch_by_skey(self, skey):
for item in self.watched_items:
if item.get_search_key() == skey:
self.remove_item_from_fs_watch(skey)
item.watchFolderToolButton.setChecked(False)
def start_watch_by_skey(self, skey):
for item in self.watched_items:
if item.get_search_key() == skey:
self.add_item_to_fs_watch(skey, item.get_watch_folder_path(), True)
item.watchFolderToolButton.setChecked(True)
def check_for_item_in_watch(self, skey):
for item in self.watched_items:
if item.get_search_key() == skey:
if item.is_have_watch_folder():
return True
def add_item_to_watch(self, sobject_item):
# checking if watch folder exists
watch_dict = self.get_watch_dict_by_skey(sobject_item.get_search_key())
all_folders_exists = True
base_dirs = env_tactic.get_all_base_dirs()
for key, val in base_dirs:
if val['value'][4] and val['value'][3] in watch_dict['rep']:
abs_path = u'{0}/{1}'.format(val['value'][0], watch_dict['path'])
if not os.path.exists(gf.form_path(abs_path)):
all_folders_exists = False
dl.warning('Folders structure for: {0} is not created. '
'Watch will be ignored.'.format(abs_path),
group_id='watch_folders_ui')
if all_folders_exists:
self.watched_items.add(sobject_item)
self.fill_watch_folders_tree_widget()
def remove_item_from_watch(self, sobject_item):
self.watched_items.discard(sobject_item)
def add_asset_to_watch(self, sobject_item):
# in case of some bugs double checking
if not self.get_watch_dict_by_skey(sobject_item.sobject.get_search_key()):
self.create_repo_editor_ui(sobject_item)
else:
sobject_item.check_watch_folder()
def edit_aseet_watch(self, sobject_item):
watch_dict = self.get_watch_dict_by_skey(sobject_item.sobject.get_search_key())
if watch_dict:
self.create_repo_editor_ui(sobject_item, mode='edit')
else:
sobject_item.check_watch_folder(True)
def delete_aseet_from_watch(self, sobject_item):
watch_dict = self.get_watch_dict_by_skey(sobject_item.sobject.get_search_key())
if watch_dict:
self.delete_watch_from_watch_folders_dict(sobject_item)
else:
sobject_item.check_watch_folder(True)
@gf.catch_error
def create_watch_folders(self, repos_list, sobject_item):
# creating base folders with paths
for repo in repos_list:
abs_path = env_tactic.get_base_dir(repo)['value'][0] + '/' + sobject_item.get_watch_folder_path()
# creating folder for publish
if not os.path.exists(gf.form_path(abs_path)):
os.makedirs(gf.form_path(abs_path))
# creating folders by processes
if sobject_item.get_process_list(include_hierarchy=True):
for process in sobject_item.get_process_list(include_hierarchy=True):
process_abs_path = abs_path + '/' + process
if not os.path.exists(gf.form_path(process_abs_path)):
os.makedirs(gf.form_path(process_abs_path))
@gf.catch_error
def delete_watch_folders_and_files(self, repos_list, sobject_item):
# MASKED: onerror function (lines 317-332)
# else:
# raise
for repo in repos_list:
abs_path = env_tactic.get_base_dir(repo)['value'][0] + '/' + sobject_item.get_watch_folder_path()
if os.path.exists(gf.form_path(abs_path)):
shutil.rmtree(gf.form_path(abs_path), ignore_errors=True, onerror=onerror)
def add_watch_to_watch_folders_dict(self, repos_list, sobject_item):
self.watch_folders_dict['assets_names'].append(sobject_item.get_title())
self.watch_folders_dict['assets_codes'].append(sobject_item.sobject.info.get('code'))
self.watch_folders_dict['assets_stypes'].append(sobject_item.stype.get_pretty_name())
self.watch_folders_dict['assets_skeys'].append(sobject_item.sobject.get_search_key())
self.watch_folders_dict['assets_pipelines'].append(sobject_item.sobject.get_pipeline_code())
self.watch_folders_dict['paths'].append(sobject_item.get_watch_folder_path())
self.watch_folders_dict['repos'].append(repos_list)
self.watch_folders_dict['statuses'].append(True)
self.create_watch_folders(repos_list, sobject_item)
sobject_item.check_watch_folder()
self.writeSettings()
def save_watch_to_watch_folders_dict(self, repos_list, sobject_item):
watch_dict = self.get_watch_dict_by_skey(sobject_item.sobject.get_search_key())
if watch_dict:
idx = watch_dict['idx']
self.watch_folders_dict['assets_names'][idx] = sobject_item.get_title()
self.watch_folders_dict['assets_codes'][idx] = sobject_item.sobject.info.get('code')
self.watch_folders_dict['assets_stypes'][idx] = sobject_item.stype.get_pretty_name()
self.watch_folders_dict['assets_skeys'][idx] = sobject_item.sobject.get_search_key()
self.watch_folders_dict['assets_pipelines'][idx] = sobject_item.sobject.get_pipeline_code()
self.watch_folders_dict['paths'][idx] = sobject_item.get_watch_folder_path()
self.watch_folders_dict['repos'][idx] = repos_list
self.create_watch_folders(repos_list, sobject_item)
sobject_item.check_watch_folder()
self.writeSettings()
def edit_watch_to_watch_folders_dict(self, sobject_item, asset_name=None, asset_code=None,asset_stype=None,
asset_skey=None, asset_pipeline=None, path=None, repo=None, status=False):
watch_dict = self.get_watch_dict_by_skey(sobject_item.sobject.get_search_key())
if watch_dict:
idx = watch_dict['idx']
if asset_name:
self.watch_folders_dict['assets_names'][idx] = sobject_item.get_title()
if asset_code:
self.watch_folders_dict['assets_codes'][idx] = sobject_item.sobject.info.get('code')
if asset_stype:
self.watch_folders_dict['assets_stypes'][idx] = sobject_item.stype.get_pretty_name()
if asset_skey:
self.watch_folders_dict['assets_skeys'][idx] = sobject_item.sobject.get_search_key()
if asset_pipeline:
self.watch_folders_dict['assets_pipelines'][idx] = sobject_item.sobject.get_pipeline_code()
if path:
self.watch_folders_dict['paths'][idx] = path
if repo:
self.watch_folders_dict['repos'][idx] = repo
self.watch_folders_dict['statuses'][idx] = status
sobject_item.check_watch_folder()
self.fill_watch_folders_tree_widget()
self.writeSettings()
def delete_watch_from_watch_folders_dict(self, sobject_item):
buttons = (('Remove', QtGui.QMessageBox.YesRole), ('Keep', QtGui.QMessageBox.ActionRole), ('Cancel', QtGui.QMessageBox.NoRole))
reply = gf.show_message_predefined(
'Remove Watch Folder dirs from repos?',
'Watch Folder Directories and Files can also be removed from Your Repositories'
'<br>Remove or Keep this Dirs and Files?</br>',
buttons=buttons,
message_type='question'
)
delete_files = False
delete_watch_folder = False
if reply == QtGui.QMessageBox.YesRole:
delete_files = True
delete_watch_folder = True
elif reply == QtGui.QMessageBox.ActionRole:
delete_files = False
delete_watch_folder = True
if delete_watch_folder:
self.stop_watch_by_skey(sobject_item.sobject.get_search_key())
idx = self.get_watch_dict_by_skey(sobject_item.sobject.get_search_key())['idx']
self.watch_folders_dict['assets_names'].pop(idx)
self.watch_folders_dict['assets_codes'].pop(idx)
self.watch_folders_dict['assets_stypes'].pop(idx)
self.watch_folders_dict['assets_skeys'].pop(idx)
self.watch_folders_dict['assets_pipelines'].pop(idx)
self.watch_folders_dict['paths'].pop(idx)
repos = self.watch_folders_dict['repos'].pop(idx)
self.watch_folders_dict['statuses'].pop(idx)
sobject_item.check_watch_folder(True)
self.writeSettings()
if delete_files:
self.delete_watch_folders_and_files(repos, sobject_item)
def create_repo_editor_ui(self, sobject_item, mode='create'):
add_watch_ui = Ui_repositoryEditorWidget(sobject_item=sobject_item, mode=mode, parent=env_inst.ui_main)
add_watch_ui.saved_signal.connect(self.add_watch_to_watch_folders_dict)
add_watch_ui.edited_signal.connect(self.save_watch_to_watch_folders_dict)
add_watch_ui.exec_()
def set_watch_folders_from_dict(self, watch_folders_dict=None):
if watch_folders_dict:
print('FILLING WATCH FOLDER')
def get_watch_dict_by_skey(self, skey):
if self.watch_folders_dict:
for i, asset_skey in enumerate(self.watch_folders_dict.get('assets_skeys')):
if skey == asset_skey:
return {
'asset_code': self.watch_folders_dict['assets_codes'][i],
'asset_name': self.watch_folders_dict['assets_names'][i],
'asset_stype': self.watch_folders_dict['assets_stypes'][i],
'asset_skey': self.watch_folders_dict['assets_skeys'][i],
'asset_pipeline': self.watch_folders_dict['assets_pipelines'][i],
'path': self.watch_folders_dict['paths'][i],
'rep': self.watch_folders_dict['repos'][i],
'status': self.watch_folders_dict['statuses'][i],
'idx': i,
}
@staticmethod
def get_watch_folders_dict():
return {
'assets_codes': [],
'assets_names': [],
'assets_stypes': [],
'assets_skeys': [],
'assets_pipelines': [],
'paths': [],
'repos': [],
'statuses': [],
}
def set_settings_from_dict(self, settings_dict=None):
ref_settings_dict = {
'watch_folders_dict': self.watch_folders_dict,
}
settings = gf.check_config(ref_settings_dict, settings_dict)
self.watch_folders_dict = settings['watch_folders_dict']
def get_settings_dict(self):
settings_dict = {
'watch_folders_dict': self.watch_folders_dict,
}
return settings_dict
def readSettings(self):
self.set_settings_from_dict(env_read_config(
filename='ui_watch_folder',
unique_id='ui_main/{0}/{1}'.format(self.project.get_type(), self.project.get_code()),
long_abs_path=True))
def writeSettings(self):
env_write_config(
self.get_settings_dict(),
filename='ui_watch_folder',
unique_id='ui_main/{0}/{1}'.format(self.project.get_type(), self.project.get_code()),
long_abs_path=True)
def showEvent(self, event):
event.accept()
self.fill_watch_folders_tree_widget()
def closeEvent(self, event):
self.writeSettings()
event.accept()
class Ui_repositoryEditorWidget(QtGui.QDialog):
saved_signal = QtCore.Signal(object, object)
edited_signal = QtCore.Signal(object, object)
def __init__(self, sobject_item, mode='create', parent=None):
super(self.__class__, self).__init__(parent=parent)
self.sobject_item = sobject_item
self.mode = mode
self.saved = False
self.exclude_repo_list = self.get_exclude_repo_list()
self.create_ui()
def create_ui(self):
if self.mode == 'create':
self.setWindowTitle('Choose Repositories to Watch')
else:
self.setWindowTitle('Editing Watch Folders')
self.resize(600, 420)
self.setSizeGripEnabled(True)
self.creat_layout()
self.create_repo_path_line_edit()
self.create_repo_combo_box()
self.create_repos_tree_widget()
self.create_buttons()
if self.mode == 'edit':
self.fill_repo_combo_box(self.exclude_repo_list)
self.fill_repo_tree_widget(self.exclude_repo_list)
else:
self.fill_repo_combo_box()
self.fill_repo_tree_widget()
self.check_save_ability()
self.controls_actions()
def controls_actions(self):
self.add_new_button.clicked.connect(self.add_new_repo)
self.remove_button.clicked.connect(self.delete_selected_repo)
self.save_button.clicked.connect(self.save_and_close)
self.close_button.clicked.connect(self.close)
def creat_layout(self):
self.main_layout = QtGui.QGridLayout()
self.main_layout.setContentsMargins(9, 9, 9, 9)
self.main_layout.setColumnStretch(0, 1)
self.setLayout(self.main_layout)
def create_repos_tree_widget(self):
self.repos_tree_widget = QtGui.QTreeWidget()
self.repos_tree_widget.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.repos_tree_widget.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.repos_tree_widget.setRootIsDecorated(False)
self.repos_tree_widget.setHeaderHidden(True)
self.repos_tree_widget.setObjectName('repos_tree_widget')
self.repos_tree_widget.setStyleSheet(gf.get_qtreeview_style())
self.main_layout.addWidget(self.repos_tree_widget, 2, 0, 2, 1)
def create_repo_path_line_edit(self):
self.repo_path_line_edit_layout = QtGui.QHBoxLayout()
self.repo_path_line_edit_layout.addWidget(QtGui.QLabel('Relative Watch Path:'))
self.repo_path_line_edit = QtGui.QLineEdit()
self.repo_path_line_edit_layout.addWidget(self.repo_path_line_edit)
if self.mode == 'create':
paths = tc.get_dirs_with_naming(self.sobject_item.get_search_key(), process_list=['watch_folder'])
self.repo_path_line_edit.setText(paths['versionless'][0])
elif self.mode == 'edit':
self.repo_path_line_edit.setText(self.sobject_item.get_watch_folder_path())
self.main_layout.addLayout(self.repo_path_line_edit_layout, 0, 0, 1, 2)
def create_repo_combo_box(self):
self.repo_combo_box = QtGui.QComboBox()
self.main_layout.addWidget(self.repo_combo_box, 1, 0, 1, 1)
def check_save_ability(self):
if self.repos_tree_widget.topLevelItemCount() < 1:
self.save_button.setEnabled(False)
else:
self.save_button.setEnabled(True)
def get_exclude_repo_list(self):
watch_folder_ui = env_inst.watch_folders.get(self.sobject_item.project.get_code())
watch_dict = watch_folder_ui.get_watch_dict_by_skey(self.sobject_item.get_search_key())
if watch_dict:
return watch_dict['rep']
else:
return []
def fill_repo_combo_box(self, exlude_list=None):
self.repo_combo_box.clear()
if not exlude_list:
exlude_list = []
base_dirs = env_tactic.get_all_base_dirs()
# Default repo states
for key, val in base_dirs:
if val['value'][4] and val['value'][3] not in exlude_list:
self.repo_combo_box.addItem(val['value'][1])
self.repo_combo_box.setItemData(self.repo_combo_box.count() - 1, val)
self.repo_combo_box.addItem('All Repos')
current_repo = gf.get_value_from_config(cfg_controls.get_checkin(), 'repositoryComboBox')
if current_repo:
self.repo_combo_box.setCurrentIndex(current_repo)
def fill_repo_tree_widget(self, exlude_list=None):
self.repos_tree_widget.clear()
if not exlude_list:
exlude_list = []
base_dirs = env_tactic.get_all_base_dirs()
# Default repo states
for key, val in base_dirs:
if val['value'][4] and val['value'][3] in exlude_list:
root_item = QtGui.QTreeWidgetItem()
root_item.setText(0, val['value'][1])
root_item.setData(0, QtCore.Qt.UserRole, val)
self.repos_tree_widget.addTopLevelItem(root_item)
def create_buttons(self):
self.add_new_button = QtGui.QPushButton('Add')
self.add_new_button.setMinimumWidth(90)
self.remove_button = QtGui.QPushButton('Remove')
self.remove_button.setMinimumWidth(90)
self.save_button = QtGui.QPushButton('Save and Close')
self.save_button.setMinimumWidth(90)
self.close_button = QtGui.QPushButton('Cancel')
self.close_button.setMinimumWidth(90)
self.main_layout.addWidget(self.add_new_button, 1, 1, 1, 1)
self.main_layout.addWidget(self.remove_button, 2, 1, 1, 1)
self.main_layout.addWidget(self.save_button, 4, 0, 1, 1)
self.main_layout.addWidget(self.close_button, 4, 1, 1, 1)
spacer = QtGui.QSpacerItem(0, 0, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.main_layout.addItem(spacer, 3, 1, 1, 1)
def add_new_repo(self):
current_repo_index = self.repo_combo_box.currentIndex()
current_repo = self.repo_combo_box.itemData(current_repo_index)
if current_repo:
self.repo_combo_box.removeItem(current_repo_index)
root_item = QtGui.QTreeWidgetItem()
root_item.setText(0, current_repo['value'][1])
root_item.setData(0, QtCore.Qt.UserRole, current_repo)
self.exclude_repo_list.append(current_repo['value'][3])
self.repos_tree_widget.addTopLevelItem(root_item)
else:
for i in range(self.repo_combo_box.count()-1):
current_repo = self.repo_combo_box.itemData(i)
root_item = QtGui.QTreeWidgetItem()
root_item.setText(0, current_repo['value'][1])
root_item.setData(0, QtCore.Qt.UserRole, current_repo)
self.exclude_repo_list.append(current_repo['value'][3])
self.repos_tree_widget.addTopLevelItem(root_item)
self.fill_repo_combo_box(self.exclude_repo_list)
self.check_save_ability()
def delete_selected_repo(self):
current_repo_item = self.repos_tree_widget.currentItem()
if current_repo_item:
current_repo = current_repo_item.data(0, QtCore.Qt.UserRole)
self.exclude_repo_list.remove(current_repo['value'][3])
self.repos_tree_widget.takeTopLevelItem(self.repos_tree_widget.currentIndex().row())
self.fill_repo_combo_box(self.exclude_repo_list)
self.check_save_ability()
def set_saved(self):
self.saved = True
def save_and_close(self):
self.set_saved()
params = (self.get_repos_list(), self.sobject_item)
self.sobject_item.set_watch_folder_path(str(self.repo_path_line_edit.text()))
if self.mode == 'create':
self.saved_signal.emit(*params)
if self.mode == 'edit':
self.edited_signal.emit(*params)
self.close()
def get_repos_list(self):
repos_list = []
for i in range(self.repos_tree_widget.topLevelItemCount()):
top_item = self.repos_tree_widget.topLevelItem(i)
repo_dict = top_item.data(0, QtCore.Qt.UserRole)
repos_list.append(repo_dict['value'][3])
return repos_list
|
def onerror(func, path, exc_info):
"""
Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage : ``shutil.rmtree(path, onerror=onerror)``
"""
import stat
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
| 317 | 332 |
import os
import shutil
from thlib.side.Qt import QtWidgets as QtGui
from thlib.side.Qt import QtGui as Qt4Gui
from thlib.side.Qt import QtCore
from thlib.environment import env_inst, env_tactic, cfg_controls, env_read_config, env_write_config, dl
import thlib.global_functions as gf
import thlib.tactic_classes as tc
from thlib.ui.misc.ui_watch_folders import Ui_ProjectWatchFolder
class Ui_projectWatchFoldersWidget(QtGui.QDialog, Ui_ProjectWatchFolder):
def __init__(self, project, parent=None):
super(self.__class__, self).__init__(parent=parent)
self.project = project
self.watch_folders_dict = self.get_watch_folders_dict()
self.watched_items = set()
env_inst.watch_folders[self.project.get_code()] = self
self.setupUi(self)
self.create_ui()
def create_ui(self):
self.watchFoldersTreeWidget.setStyleSheet('QTreeView::item {padding: 2px;}')
self.setSizeGripEnabled(True)
self.setWindowTitle('Watched Assets for Project: {0}'.format(self.project.info.get('title')))
self.create_fs_watcher()
self.create_watch_folders_tree_context_menu()
self.controls_actions()
self.readSettings()
self.watchEnabledCheckBox.setEnabled(False)
def create_fs_watcher(self):
self.fs_watcher = gf.FSObserver()
self.fs_watcher.set_created_signal(self.handle_watch_created_event)
def create_watch_folders_tree_context_menu(self):
self.watchFoldersTreeWidget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.watchFoldersTreeWidget.customContextMenuRequested.connect(self.open_menu)
def watch_items_menu(self):
# TODO Make this work
enable_watch = QtGui.QAction('Enable Watch', self.watchFoldersTreeWidget)
enable_watch.setIcon(gf.get_icon('eye'))
# enable_watch.triggered.connect(self.open_file_from_tree)
disable_watch = QtGui.QAction('Disable Watch', self.watchFoldersTreeWidget)
disable_watch.setIcon(gf.get_icon('eye-slash'))
# disable_watch.triggered.connect(self.open_file_from_tree)
edit_watch = QtGui.QAction('Edit Watch', self.watchFoldersTreeWidget)
edit_watch.setIcon(gf.get_icon('edit'))
# edit_watch.triggered.connect(self.open_file_from_tree)
delete_watch = QtGui.QAction('Delete Watch', self.watchFoldersTreeWidget)
delete_watch.setIcon(gf.get_icon('remove'))
# edit_watch.triggered.connect(self.open_file_from_tree)
menu = QtGui.QMenu()
menu.addAction(enable_watch)
menu.addAction(disable_watch)
menu.addAction(edit_watch)
menu.addAction(delete_watch)
return menu
def open_menu(self):
item = self.watchFoldersTreeWidget.currentItem()
if item:
if item.data(0, QtCore.Qt.UserRole):
menu = self.watch_items_menu()
if menu:
menu.exec_(Qt4Gui.QCursor.pos())
def add_item_to_fs_watch(self, skey, path=None, recursive=True):
watch_dict = self.get_watch_dict_by_skey(skey)
if not path:
path = watch_dict['path']
paths = []
for repo in watch_dict['rep']:
abs_path = env_tactic.get_base_dir(repo)['value'][0] + '/' + path
paths.append(gf.form_path(abs_path))
self.fs_watcher.append_watch(watch_name=skey, paths=paths, repos=watch_dict['rep'], pipeline=watch_dict['asset_pipeline'], recursive=recursive)
def remove_item_from_fs_watch(self, skey):
self.fs_watcher.remove_watch(watch_name=skey)
def handle_watch_created_event(self, event, watch):
dl.log(u'File dropped to watch folder {}'.format(event.src_path), group_id='watch_folder')
self.show()
self.setWindowState(self.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
QtGui.QDialog.activateWindow(self)
self.show()
self.hide()
search_key = watch.watch_name
pipeline = watch.pipeline
commit_path = gf.extract_dirname(event.src_path)
if watch.path == commit_path:
context = 'publish'
else:
context = gf.form_path(commit_path, 'linux').split('/')[-1]
description = 'From watch folder'
skey_dict = tc.split_search_key(search_key)
checkin_widget = env_inst.get_check_tree(
project_code=skey_dict['project_code'],
tab_code='checkin_out',
wdg_code=skey_dict['pipeline_code'],
)
checkin_widget.do_creating_ui()
match_template = gf.MatchTemplate(['$FILENAME.$EXT'])
files_objects_dict = match_template.get_files_objects([event.src_path])
stypes = self.project.get_stypes()
current_stype = stypes.get(skey_dict['pipeline_code'])
pipelines = current_stype.get_pipeline()
checkin_mode = None
if pipelines:
# here we do pipelines routine
current_pipeline = pipelines.get(pipeline)
if not current_pipeline:
# looks like we don't have pipeline with Search Type name, so we take first of all
# Also this is totally wrong, cause we should know exactly the pipeline and its processes, so need to write proper pipeline_code when creating watch folder
current_pipeline = list(pipelines.values())[0]
current_process = current_pipeline.get_pipeline_process(context)
if current_process:
checkin_mode = current_process.get('checkin_mode')
else:
context = 'publish'
checkin_widget.checkin_file_objects(
search_key=search_key,
context=context,
description=description,
files_objects=files_objects_dict.get('file'),
checkin_type=checkin_mode,
keep_file_name=False
)
else:
# here we go with publish, without pipeline
checkin_widget.checkin_file_objects(
search_key=search_key,
context='publish',
description=description,
files_objects=files_objects_dict.get('file'),
checkin_type=checkin_mode,
keep_file_name=False
)
def controls_actions(self):
pass
def fill_watch_folders_tree_widget(self):
self.watchFoldersTreeWidget.clear()
if self.watch_folders_dict:
for i, asset_skey in enumerate(self.watch_folders_dict.get('assets_skeys')):
root_item = QtGui.QTreeWidgetItem()
root_item.setData(0, QtCore.Qt.UserRole, asset_skey)
root_item.setText(1, self.watch_folders_dict['assets_stypes'][i])
root_item.setText(2, self.watch_folders_dict['assets_names'][i])
repos_names = []
for repo in self.watch_folders_dict['repos'][i]:
repos_names.append(env_tactic.get_base_dir(repo)['value'][1])
root_item.setText(3, ', '.join(repos_names))
# setting actual watch status
if self.watch_folders_dict['statuses'][i]:
if self.check_for_item_in_watch(asset_skey):
root_item.setText(0, 'Watching')
self.start_watch_by_skey(asset_skey)
else:
root_item.setText(0, 'Waiting')
else:
root_item.setText(0, 'Stopped')
self.stop_watch_by_skey(asset_skey)
self.watchFoldersTreeWidget.addTopLevelItem(root_item)
self.watchFoldersTreeWidget.resizeColumnToContents(0)
self.watchFoldersTreeWidget.resizeColumnToContents(1)
self.watchFoldersTreeWidget.resizeColumnToContents(2)
self.watchFoldersTreeWidget.resizeColumnToContents(3)
if self.watched_items:
self.start_watching()
else:
self.stop_watching()
def start_watching(self):
if not self.fs_watcher.is_started():
self.fs_watcher.start()
def stop_watching(self):
if self.fs_watcher.is_started():
self.fs_watcher.stop()
def stop_watch_by_skey(self, skey):
for item in self.watched_items:
if item.get_search_key() == skey:
self.remove_item_from_fs_watch(skey)
item.watchFolderToolButton.setChecked(False)
def start_watch_by_skey(self, skey):
for item in self.watched_items:
if item.get_search_key() == skey:
self.add_item_to_fs_watch(skey, item.get_watch_folder_path(), True)
item.watchFolderToolButton.setChecked(True)
def check_for_item_in_watch(self, skey):
for item in self.watched_items:
if item.get_search_key() == skey:
if item.is_have_watch_folder():
return True
def add_item_to_watch(self, sobject_item):
# checking if watch folder exists
watch_dict = self.get_watch_dict_by_skey(sobject_item.get_search_key())
all_folders_exists = True
base_dirs = env_tactic.get_all_base_dirs()
for key, val in base_dirs:
if val['value'][4] and val['value'][3] in watch_dict['rep']:
abs_path = u'{0}/{1}'.format(val['value'][0], watch_dict['path'])
if not os.path.exists(gf.form_path(abs_path)):
all_folders_exists = False
dl.warning('Folders structure for: {0} is not created. '
'Watch will be ignored.'.format(abs_path),
group_id='watch_folders_ui')
if all_folders_exists:
self.watched_items.add(sobject_item)
self.fill_watch_folders_tree_widget()
def remove_item_from_watch(self, sobject_item):
self.watched_items.discard(sobject_item)
def add_asset_to_watch(self, sobject_item):
# in case of some bugs double checking
if not self.get_watch_dict_by_skey(sobject_item.sobject.get_search_key()):
self.create_repo_editor_ui(sobject_item)
else:
sobject_item.check_watch_folder()
def edit_aseet_watch(self, sobject_item):
watch_dict = self.get_watch_dict_by_skey(sobject_item.sobject.get_search_key())
if watch_dict:
self.create_repo_editor_ui(sobject_item, mode='edit')
else:
sobject_item.check_watch_folder(True)
def delete_aseet_from_watch(self, sobject_item):
watch_dict = self.get_watch_dict_by_skey(sobject_item.sobject.get_search_key())
if watch_dict:
self.delete_watch_from_watch_folders_dict(sobject_item)
else:
sobject_item.check_watch_folder(True)
@gf.catch_error
def create_watch_folders(self, repos_list, sobject_item):
# creating base folders with paths
for repo in repos_list:
abs_path = env_tactic.get_base_dir(repo)['value'][0] + '/' + sobject_item.get_watch_folder_path()
# creating folder for publish
if not os.path.exists(gf.form_path(abs_path)):
os.makedirs(gf.form_path(abs_path))
# creating folders by processes
if sobject_item.get_process_list(include_hierarchy=True):
for process in sobject_item.get_process_list(include_hierarchy=True):
process_abs_path = abs_path + '/' + process
if not os.path.exists(gf.form_path(process_abs_path)):
os.makedirs(gf.form_path(process_abs_path))
@gf.catch_error
def delete_watch_folders_and_files(self, repos_list, sobject_item):
def onerror(func, path, exc_info):
"""
Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage : ``shutil.rmtree(path, onerror=onerror)``
"""
import stat
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
# else:
# raise
for repo in repos_list:
abs_path = env_tactic.get_base_dir(repo)['value'][0] + '/' + sobject_item.get_watch_folder_path()
if os.path.exists(gf.form_path(abs_path)):
shutil.rmtree(gf.form_path(abs_path), ignore_errors=True, onerror=onerror)
def add_watch_to_watch_folders_dict(self, repos_list, sobject_item):
self.watch_folders_dict['assets_names'].append(sobject_item.get_title())
self.watch_folders_dict['assets_codes'].append(sobject_item.sobject.info.get('code'))
self.watch_folders_dict['assets_stypes'].append(sobject_item.stype.get_pretty_name())
self.watch_folders_dict['assets_skeys'].append(sobject_item.sobject.get_search_key())
self.watch_folders_dict['assets_pipelines'].append(sobject_item.sobject.get_pipeline_code())
self.watch_folders_dict['paths'].append(sobject_item.get_watch_folder_path())
self.watch_folders_dict['repos'].append(repos_list)
self.watch_folders_dict['statuses'].append(True)
self.create_watch_folders(repos_list, sobject_item)
sobject_item.check_watch_folder()
self.writeSettings()
def save_watch_to_watch_folders_dict(self, repos_list, sobject_item):
watch_dict = self.get_watch_dict_by_skey(sobject_item.sobject.get_search_key())
if watch_dict:
idx = watch_dict['idx']
self.watch_folders_dict['assets_names'][idx] = sobject_item.get_title()
self.watch_folders_dict['assets_codes'][idx] = sobject_item.sobject.info.get('code')
self.watch_folders_dict['assets_stypes'][idx] = sobject_item.stype.get_pretty_name()
self.watch_folders_dict['assets_skeys'][idx] = sobject_item.sobject.get_search_key()
self.watch_folders_dict['assets_pipelines'][idx] = sobject_item.sobject.get_pipeline_code()
self.watch_folders_dict['paths'][idx] = sobject_item.get_watch_folder_path()
self.watch_folders_dict['repos'][idx] = repos_list
self.create_watch_folders(repos_list, sobject_item)
sobject_item.check_watch_folder()
self.writeSettings()
def edit_watch_to_watch_folders_dict(self, sobject_item, asset_name=None, asset_code=None,asset_stype=None,
asset_skey=None, asset_pipeline=None, path=None, repo=None, status=False):
watch_dict = self.get_watch_dict_by_skey(sobject_item.sobject.get_search_key())
if watch_dict:
idx = watch_dict['idx']
if asset_name:
self.watch_folders_dict['assets_names'][idx] = sobject_item.get_title()
if asset_code:
self.watch_folders_dict['assets_codes'][idx] = sobject_item.sobject.info.get('code')
if asset_stype:
self.watch_folders_dict['assets_stypes'][idx] = sobject_item.stype.get_pretty_name()
if asset_skey:
self.watch_folders_dict['assets_skeys'][idx] = sobject_item.sobject.get_search_key()
if asset_pipeline:
self.watch_folders_dict['assets_pipelines'][idx] = sobject_item.sobject.get_pipeline_code()
if path:
self.watch_folders_dict['paths'][idx] = path
if repo:
self.watch_folders_dict['repos'][idx] = repo
self.watch_folders_dict['statuses'][idx] = status
sobject_item.check_watch_folder()
self.fill_watch_folders_tree_widget()
self.writeSettings()
def delete_watch_from_watch_folders_dict(self, sobject_item):
buttons = (('Remove', QtGui.QMessageBox.YesRole), ('Keep', QtGui.QMessageBox.ActionRole), ('Cancel', QtGui.QMessageBox.NoRole))
reply = gf.show_message_predefined(
'Remove Watch Folder dirs from repos?',
'Watch Folder Directories and Files can also be removed from Your Repositories'
'<br>Remove or Keep this Dirs and Files?</br>',
buttons=buttons,
message_type='question'
)
delete_files = False
delete_watch_folder = False
if reply == QtGui.QMessageBox.YesRole:
delete_files = True
delete_watch_folder = True
elif reply == QtGui.QMessageBox.ActionRole:
delete_files = False
delete_watch_folder = True
if delete_watch_folder:
self.stop_watch_by_skey(sobject_item.sobject.get_search_key())
idx = self.get_watch_dict_by_skey(sobject_item.sobject.get_search_key())['idx']
self.watch_folders_dict['assets_names'].pop(idx)
self.watch_folders_dict['assets_codes'].pop(idx)
self.watch_folders_dict['assets_stypes'].pop(idx)
self.watch_folders_dict['assets_skeys'].pop(idx)
self.watch_folders_dict['assets_pipelines'].pop(idx)
self.watch_folders_dict['paths'].pop(idx)
repos = self.watch_folders_dict['repos'].pop(idx)
self.watch_folders_dict['statuses'].pop(idx)
sobject_item.check_watch_folder(True)
self.writeSettings()
if delete_files:
self.delete_watch_folders_and_files(repos, sobject_item)
def create_repo_editor_ui(self, sobject_item, mode='create'):
add_watch_ui = Ui_repositoryEditorWidget(sobject_item=sobject_item, mode=mode, parent=env_inst.ui_main)
add_watch_ui.saved_signal.connect(self.add_watch_to_watch_folders_dict)
add_watch_ui.edited_signal.connect(self.save_watch_to_watch_folders_dict)
add_watch_ui.exec_()
def set_watch_folders_from_dict(self, watch_folders_dict=None):
if watch_folders_dict:
print('FILLING WATCH FOLDER')
def get_watch_dict_by_skey(self, skey):
if self.watch_folders_dict:
for i, asset_skey in enumerate(self.watch_folders_dict.get('assets_skeys')):
if skey == asset_skey:
return {
'asset_code': self.watch_folders_dict['assets_codes'][i],
'asset_name': self.watch_folders_dict['assets_names'][i],
'asset_stype': self.watch_folders_dict['assets_stypes'][i],
'asset_skey': self.watch_folders_dict['assets_skeys'][i],
'asset_pipeline': self.watch_folders_dict['assets_pipelines'][i],
'path': self.watch_folders_dict['paths'][i],
'rep': self.watch_folders_dict['repos'][i],
'status': self.watch_folders_dict['statuses'][i],
'idx': i,
}
@staticmethod
def get_watch_folders_dict():
return {
'assets_codes': [],
'assets_names': [],
'assets_stypes': [],
'assets_skeys': [],
'assets_pipelines': [],
'paths': [],
'repos': [],
'statuses': [],
}
def set_settings_from_dict(self, settings_dict=None):
ref_settings_dict = {
'watch_folders_dict': self.watch_folders_dict,
}
settings = gf.check_config(ref_settings_dict, settings_dict)
self.watch_folders_dict = settings['watch_folders_dict']
def get_settings_dict(self):
settings_dict = {
'watch_folders_dict': self.watch_folders_dict,
}
return settings_dict
def readSettings(self):
self.set_settings_from_dict(env_read_config(
filename='ui_watch_folder',
unique_id='ui_main/{0}/{1}'.format(self.project.get_type(), self.project.get_code()),
long_abs_path=True))
def writeSettings(self):
env_write_config(
self.get_settings_dict(),
filename='ui_watch_folder',
unique_id='ui_main/{0}/{1}'.format(self.project.get_type(), self.project.get_code()),
long_abs_path=True)
def showEvent(self, event):
event.accept()
self.fill_watch_folders_tree_widget()
def closeEvent(self, event):
self.writeSettings()
event.accept()
class Ui_repositoryEditorWidget(QtGui.QDialog):
saved_signal = QtCore.Signal(object, object)
edited_signal = QtCore.Signal(object, object)
def __init__(self, sobject_item, mode='create', parent=None):
super(self.__class__, self).__init__(parent=parent)
self.sobject_item = sobject_item
self.mode = mode
self.saved = False
self.exclude_repo_list = self.get_exclude_repo_list()
self.create_ui()
def create_ui(self):
if self.mode == 'create':
self.setWindowTitle('Choose Repositories to Watch')
else:
self.setWindowTitle('Editing Watch Folders')
self.resize(600, 420)
self.setSizeGripEnabled(True)
self.creat_layout()
self.create_repo_path_line_edit()
self.create_repo_combo_box()
self.create_repos_tree_widget()
self.create_buttons()
if self.mode == 'edit':
self.fill_repo_combo_box(self.exclude_repo_list)
self.fill_repo_tree_widget(self.exclude_repo_list)
else:
self.fill_repo_combo_box()
self.fill_repo_tree_widget()
self.check_save_ability()
self.controls_actions()
def controls_actions(self):
self.add_new_button.clicked.connect(self.add_new_repo)
self.remove_button.clicked.connect(self.delete_selected_repo)
self.save_button.clicked.connect(self.save_and_close)
self.close_button.clicked.connect(self.close)
def creat_layout(self):
self.main_layout = QtGui.QGridLayout()
self.main_layout.setContentsMargins(9, 9, 9, 9)
self.main_layout.setColumnStretch(0, 1)
self.setLayout(self.main_layout)
def create_repos_tree_widget(self):
self.repos_tree_widget = QtGui.QTreeWidget()
self.repos_tree_widget.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.repos_tree_widget.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.repos_tree_widget.setRootIsDecorated(False)
self.repos_tree_widget.setHeaderHidden(True)
self.repos_tree_widget.setObjectName('repos_tree_widget')
self.repos_tree_widget.setStyleSheet(gf.get_qtreeview_style())
self.main_layout.addWidget(self.repos_tree_widget, 2, 0, 2, 1)
def create_repo_path_line_edit(self):
self.repo_path_line_edit_layout = QtGui.QHBoxLayout()
self.repo_path_line_edit_layout.addWidget(QtGui.QLabel('Relative Watch Path:'))
self.repo_path_line_edit = QtGui.QLineEdit()
self.repo_path_line_edit_layout.addWidget(self.repo_path_line_edit)
if self.mode == 'create':
paths = tc.get_dirs_with_naming(self.sobject_item.get_search_key(), process_list=['watch_folder'])
self.repo_path_line_edit.setText(paths['versionless'][0])
elif self.mode == 'edit':
self.repo_path_line_edit.setText(self.sobject_item.get_watch_folder_path())
self.main_layout.addLayout(self.repo_path_line_edit_layout, 0, 0, 1, 2)
def create_repo_combo_box(self):
self.repo_combo_box = QtGui.QComboBox()
self.main_layout.addWidget(self.repo_combo_box, 1, 0, 1, 1)
def check_save_ability(self):
if self.repos_tree_widget.topLevelItemCount() < 1:
self.save_button.setEnabled(False)
else:
self.save_button.setEnabled(True)
def get_exclude_repo_list(self):
watch_folder_ui = env_inst.watch_folders.get(self.sobject_item.project.get_code())
watch_dict = watch_folder_ui.get_watch_dict_by_skey(self.sobject_item.get_search_key())
if watch_dict:
return watch_dict['rep']
else:
return []
def fill_repo_combo_box(self, exlude_list=None):
self.repo_combo_box.clear()
if not exlude_list:
exlude_list = []
base_dirs = env_tactic.get_all_base_dirs()
# Default repo states
for key, val in base_dirs:
if val['value'][4] and val['value'][3] not in exlude_list:
self.repo_combo_box.addItem(val['value'][1])
self.repo_combo_box.setItemData(self.repo_combo_box.count() - 1, val)
self.repo_combo_box.addItem('All Repos')
current_repo = gf.get_value_from_config(cfg_controls.get_checkin(), 'repositoryComboBox')
if current_repo:
self.repo_combo_box.setCurrentIndex(current_repo)
def fill_repo_tree_widget(self, exlude_list=None):
self.repos_tree_widget.clear()
if not exlude_list:
exlude_list = []
base_dirs = env_tactic.get_all_base_dirs()
# Default repo states
for key, val in base_dirs:
if val['value'][4] and val['value'][3] in exlude_list:
root_item = QtGui.QTreeWidgetItem()
root_item.setText(0, val['value'][1])
root_item.setData(0, QtCore.Qt.UserRole, val)
self.repos_tree_widget.addTopLevelItem(root_item)
def create_buttons(self):
self.add_new_button = QtGui.QPushButton('Add')
self.add_new_button.setMinimumWidth(90)
self.remove_button = QtGui.QPushButton('Remove')
self.remove_button.setMinimumWidth(90)
self.save_button = QtGui.QPushButton('Save and Close')
self.save_button.setMinimumWidth(90)
self.close_button = QtGui.QPushButton('Cancel')
self.close_button.setMinimumWidth(90)
self.main_layout.addWidget(self.add_new_button, 1, 1, 1, 1)
self.main_layout.addWidget(self.remove_button, 2, 1, 1, 1)
self.main_layout.addWidget(self.save_button, 4, 0, 1, 1)
self.main_layout.addWidget(self.close_button, 4, 1, 1, 1)
spacer = QtGui.QSpacerItem(0, 0, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.main_layout.addItem(spacer, 3, 1, 1, 1)
def add_new_repo(self):
current_repo_index = self.repo_combo_box.currentIndex()
current_repo = self.repo_combo_box.itemData(current_repo_index)
if current_repo:
self.repo_combo_box.removeItem(current_repo_index)
root_item = QtGui.QTreeWidgetItem()
root_item.setText(0, current_repo['value'][1])
root_item.setData(0, QtCore.Qt.UserRole, current_repo)
self.exclude_repo_list.append(current_repo['value'][3])
self.repos_tree_widget.addTopLevelItem(root_item)
else:
for i in range(self.repo_combo_box.count()-1):
current_repo = self.repo_combo_box.itemData(i)
root_item = QtGui.QTreeWidgetItem()
root_item.setText(0, current_repo['value'][1])
root_item.setData(0, QtCore.Qt.UserRole, current_repo)
self.exclude_repo_list.append(current_repo['value'][3])
self.repos_tree_widget.addTopLevelItem(root_item)
self.fill_repo_combo_box(self.exclude_repo_list)
self.check_save_ability()
def delete_selected_repo(self):
current_repo_item = self.repos_tree_widget.currentItem()
if current_repo_item:
current_repo = current_repo_item.data(0, QtCore.Qt.UserRole)
self.exclude_repo_list.remove(current_repo['value'][3])
self.repos_tree_widget.takeTopLevelItem(self.repos_tree_widget.currentIndex().row())
self.fill_repo_combo_box(self.exclude_repo_list)
self.check_save_ability()
def set_saved(self):
self.saved = True
def save_and_close(self):
self.set_saved()
params = (self.get_repos_list(), self.sobject_item)
self.sobject_item.set_watch_folder_path(str(self.repo_path_line_edit.text()))
if self.mode == 'create':
self.saved_signal.emit(*params)
if self.mode == 'edit':
self.edited_signal.emit(*params)
self.close()
def get_repos_list(self):
repos_list = []
for i in range(self.repos_tree_widget.topLevelItemCount()):
top_item = self.repos_tree_widget.topLevelItem(i)
repo_dict = top_item.data(0, QtCore.Qt.UserRole)
repos_list.append(repo_dict['value'][3])
return repos_list
|
pressure
|
Calculate pressure at specified loading.
For the TemkinApprox model, the pressure will
be computed numerically as no analytical inversion is possible.
Parameters
----------
loading : float
The loading at which to calculate the pressure.
Returns
-------
float
Pressure at specified loading.
|
"""Temkin Approximation isotherm model."""
import numpy
import scipy
from ..utilities.exceptions import CalculationError
from .base_model import IsothermBaseModel
class TemkinApprox(IsothermBaseModel):
r"""
Asymptotic approximation to the Temkin isotherm.
.. math::
n(p) = n_m \frac{K p}{1 + K p} + n_m \theta (\frac{K p}{1 + K p})^2 (\frac{K p}{1 + K p} -1)
Notes
-----
The Temkin adsorption isotherm [#]_, like the Langmuir model, considers
a surface with n_m identical adsorption sites, but takes into account adsorbate-
adsorbate interactions by assuming that the enthalpy of adsorption is a linear
function of the coverage. The Temkin isotherm is derived [#]_ using a
mean-field argument and used an asymptotic approximation
to obtain an explicit equation for the loading.
Here, :math:`n_m` and K have the same physical meaning as in the Langmuir model.
The additional parameter :math:`\theta` describes the strength of the adsorbate-adsorbate
interactions (:math:`\theta < 0` for attractions).
References
----------
.. [#] V. P. M.I. Tempkin, Kinetics of ammonia synthesis on promoted iron
catalyst, Acta Phys. Chim. USSR 12 (1940) 327–356.
.. [#] Phys. Chem. Chem. Phys., 2014,16, 5499-5513
"""
# Model parameters
name = 'TemkinApprox'
calculates = 'loading'
param_names = ["n_m", "K", "tht"]
param_bounds = {
"n_m": [0, numpy.inf],
"K": [0, numpy.inf],
"tht": [0, numpy.inf],
}
def __init__(self):
"""Instantiation function."""
self.params = {"n_m": numpy.nan, "K": numpy.nan, "tht": numpy.nan}
def loading(self, pressure):
"""
Calculate loading at specified pressure.
Parameters
----------
pressure : float
The pressure at which to calculate the loading.
Returns
-------
float
Loading at specified pressure.
"""
lang_load = self.params["K"] * pressure / (1.0 + self.params["K"] * pressure)
return self.params["n_m"] * (lang_load + self.params["tht"] * lang_load ** 2 *
(lang_load - 1))
# MASKED: pressure function (lines 72-99)
def spreading_pressure(self, pressure):
r"""
Calculate spreading pressure at specified gas pressure.
Function that calculates spreading pressure by solving the
following integral at each point i.
.. math::
\pi = \int_{0}^{p_i} \frac{n_i(p_i)}{p_i} dp_i
The integral for the TemkinApprox model is solved analytically.
.. math::
\pi = n_m \Big( \ln{(1 + K p)} + \frac{\theta (2 K p + 1)}{2(1 + K p)^2}\Big)
Parameters
----------
pressure : float
The pressure at which to calculate the spreading pressure.
Returns
-------
float
Spreading pressure at specified pressure.
"""
one_plus_kp = 1.0 + self.params["K"] * pressure
return self.params["n_m"] * (numpy.log(one_plus_kp) +
self.params["tht"] * (2.0 * self.params["K"] * pressure + 1.0) /
(2.0 * one_plus_kp ** 2))
def initial_guess(self, pressure, loading):
"""
Return initial guess for fitting.
Parameters
----------
pressure : ndarray
Pressure data.
loading : ndarray
Loading data.
Returns
-------
dict
Dictionary of initial guesses for the parameters.
"""
saturation_loading, langmuir_k = super().initial_guess(pressure, loading)
guess = {"n_m": saturation_loading, "K": langmuir_k, "tht": 0.0}
for param in guess:
if guess[param] < self.param_bounds[param][0]:
guess[param] = self.param_bounds[param][0]
if guess[param] > self.param_bounds[param][1]:
guess[param] = self.param_bounds[param][1]
return guess
|
def pressure(self, loading):
"""
Calculate pressure at specified loading.
For the TemkinApprox model, the pressure will
be computed numerically as no analytical inversion is possible.
Parameters
----------
loading : float
The loading at which to calculate the pressure.
Returns
-------
float
Pressure at specified loading.
"""
def fun(x):
return self.loading(x) - loading
opt_res = scipy.optimize.root(fun, 0, method='hybr')
if not opt_res.success:
raise CalculationError("""
Root finding for value {0} failed.
""".format(loading))
return opt_res.x
| 72 | 99 |
"""Temkin Approximation isotherm model."""
import numpy
import scipy
from ..utilities.exceptions import CalculationError
from .base_model import IsothermBaseModel
class TemkinApprox(IsothermBaseModel):
r"""
Asymptotic approximation to the Temkin isotherm.
.. math::
n(p) = n_m \frac{K p}{1 + K p} + n_m \theta (\frac{K p}{1 + K p})^2 (\frac{K p}{1 + K p} -1)
Notes
-----
The Temkin adsorption isotherm [#]_, like the Langmuir model, considers
a surface with n_m identical adsorption sites, but takes into account adsorbate-
adsorbate interactions by assuming that the enthalpy of adsorption is a linear
function of the coverage. The Temkin isotherm is derived [#]_ using a
mean-field argument and used an asymptotic approximation
to obtain an explicit equation for the loading.
Here, :math:`n_m` and K have the same physical meaning as in the Langmuir model.
The additional parameter :math:`\theta` describes the strength of the adsorbate-adsorbate
interactions (:math:`\theta < 0` for attractions).
References
----------
.. [#] V. P. M.I. Tempkin, Kinetics of ammonia synthesis on promoted iron
catalyst, Acta Phys. Chim. USSR 12 (1940) 327–356.
.. [#] Phys. Chem. Chem. Phys., 2014,16, 5499-5513
"""
# Model parameters
name = 'TemkinApprox'
calculates = 'loading'
param_names = ["n_m", "K", "tht"]
param_bounds = {
"n_m": [0, numpy.inf],
"K": [0, numpy.inf],
"tht": [0, numpy.inf],
}
def __init__(self):
"""Instantiation function."""
self.params = {"n_m": numpy.nan, "K": numpy.nan, "tht": numpy.nan}
def loading(self, pressure):
"""
Calculate loading at specified pressure.
Parameters
----------
pressure : float
The pressure at which to calculate the loading.
Returns
-------
float
Loading at specified pressure.
"""
lang_load = self.params["K"] * pressure / (1.0 + self.params["K"] * pressure)
return self.params["n_m"] * (lang_load + self.params["tht"] * lang_load ** 2 *
(lang_load - 1))
def pressure(self, loading):
"""
Calculate pressure at specified loading.
For the TemkinApprox model, the pressure will
be computed numerically as no analytical inversion is possible.
Parameters
----------
loading : float
The loading at which to calculate the pressure.
Returns
-------
float
Pressure at specified loading.
"""
def fun(x):
return self.loading(x) - loading
opt_res = scipy.optimize.root(fun, 0, method='hybr')
if not opt_res.success:
raise CalculationError("""
Root finding for value {0} failed.
""".format(loading))
return opt_res.x
def spreading_pressure(self, pressure):
r"""
Calculate spreading pressure at specified gas pressure.
Function that calculates spreading pressure by solving the
following integral at each point i.
.. math::
\pi = \int_{0}^{p_i} \frac{n_i(p_i)}{p_i} dp_i
The integral for the TemkinApprox model is solved analytically.
.. math::
\pi = n_m \Big( \ln{(1 + K p)} + \frac{\theta (2 K p + 1)}{2(1 + K p)^2}\Big)
Parameters
----------
pressure : float
The pressure at which to calculate the spreading pressure.
Returns
-------
float
Spreading pressure at specified pressure.
"""
one_plus_kp = 1.0 + self.params["K"] * pressure
return self.params["n_m"] * (numpy.log(one_plus_kp) +
self.params["tht"] * (2.0 * self.params["K"] * pressure + 1.0) /
(2.0 * one_plus_kp ** 2))
def initial_guess(self, pressure, loading):
"""
Return initial guess for fitting.
Parameters
----------
pressure : ndarray
Pressure data.
loading : ndarray
Loading data.
Returns
-------
dict
Dictionary of initial guesses for the parameters.
"""
saturation_loading, langmuir_k = super().initial_guess(pressure, loading)
guess = {"n_m": saturation_loading, "K": langmuir_k, "tht": 0.0}
for param in guess:
if guess[param] < self.param_bounds[param][0]:
guess[param] = self.param_bounds[param][0]
if guess[param] > self.param_bounds[param][1]:
guess[param] = self.param_bounds[param][1]
return guess
|
initial_guess
|
Return initial guess for fitting.
Parameters
----------
pressure : ndarray
Pressure data.
loading : ndarray
Loading data.
Returns
-------
dict
Dictionary of initial guesses for the parameters.
|
"""Temkin Approximation isotherm model."""
import numpy
import scipy
from ..utilities.exceptions import CalculationError
from .base_model import IsothermBaseModel
class TemkinApprox(IsothermBaseModel):
r"""
Asymptotic approximation to the Temkin isotherm.
.. math::
n(p) = n_m \frac{K p}{1 + K p} + n_m \theta (\frac{K p}{1 + K p})^2 (\frac{K p}{1 + K p} -1)
Notes
-----
The Temkin adsorption isotherm [#]_, like the Langmuir model, considers
a surface with n_m identical adsorption sites, but takes into account adsorbate-
adsorbate interactions by assuming that the enthalpy of adsorption is a linear
function of the coverage. The Temkin isotherm is derived [#]_ using a
mean-field argument and used an asymptotic approximation
to obtain an explicit equation for the loading.
Here, :math:`n_m` and K have the same physical meaning as in the Langmuir model.
The additional parameter :math:`\theta` describes the strength of the adsorbate-adsorbate
interactions (:math:`\theta < 0` for attractions).
References
----------
.. [#] V. P. M.I. Tempkin, Kinetics of ammonia synthesis on promoted iron
catalyst, Acta Phys. Chim. USSR 12 (1940) 327–356.
.. [#] Phys. Chem. Chem. Phys., 2014,16, 5499-5513
"""
# Model parameters
name = 'TemkinApprox'
calculates = 'loading'
param_names = ["n_m", "K", "tht"]
param_bounds = {
"n_m": [0, numpy.inf],
"K": [0, numpy.inf],
"tht": [0, numpy.inf],
}
def __init__(self):
"""Instantiation function."""
self.params = {"n_m": numpy.nan, "K": numpy.nan, "tht": numpy.nan}
def loading(self, pressure):
"""
Calculate loading at specified pressure.
Parameters
----------
pressure : float
The pressure at which to calculate the loading.
Returns
-------
float
Loading at specified pressure.
"""
lang_load = self.params["K"] * pressure / (1.0 + self.params["K"] * pressure)
return self.params["n_m"] * (lang_load + self.params["tht"] * lang_load ** 2 *
(lang_load - 1))
def pressure(self, loading):
"""
Calculate pressure at specified loading.
For the TemkinApprox model, the pressure will
be computed numerically as no analytical inversion is possible.
Parameters
----------
loading : float
The loading at which to calculate the pressure.
Returns
-------
float
Pressure at specified loading.
"""
def fun(x):
return self.loading(x) - loading
opt_res = scipy.optimize.root(fun, 0, method='hybr')
if not opt_res.success:
raise CalculationError("""
Root finding for value {0} failed.
""".format(loading))
return opt_res.x
def spreading_pressure(self, pressure):
r"""
Calculate spreading pressure at specified gas pressure.
Function that calculates spreading pressure by solving the
following integral at each point i.
.. math::
\pi = \int_{0}^{p_i} \frac{n_i(p_i)}{p_i} dp_i
The integral for the TemkinApprox model is solved analytically.
.. math::
\pi = n_m \Big( \ln{(1 + K p)} + \frac{\theta (2 K p + 1)}{2(1 + K p)^2}\Big)
Parameters
----------
pressure : float
The pressure at which to calculate the spreading pressure.
Returns
-------
float
Spreading pressure at specified pressure.
"""
one_plus_kp = 1.0 + self.params["K"] * pressure
return self.params["n_m"] * (numpy.log(one_plus_kp) +
self.params["tht"] * (2.0 * self.params["K"] * pressure + 1.0) /
(2.0 * one_plus_kp ** 2))
# MASKED: initial_guess function (lines 133-159)
|
def initial_guess(self, pressure, loading):
"""
Return initial guess for fitting.
Parameters
----------
pressure : ndarray
Pressure data.
loading : ndarray
Loading data.
Returns
-------
dict
Dictionary of initial guesses for the parameters.
"""
saturation_loading, langmuir_k = super().initial_guess(pressure, loading)
guess = {"n_m": saturation_loading, "K": langmuir_k, "tht": 0.0}
for param in guess:
if guess[param] < self.param_bounds[param][0]:
guess[param] = self.param_bounds[param][0]
if guess[param] > self.param_bounds[param][1]:
guess[param] = self.param_bounds[param][1]
return guess
| 133 | 159 |
"""Temkin Approximation isotherm model."""
import numpy
import scipy
from ..utilities.exceptions import CalculationError
from .base_model import IsothermBaseModel
class TemkinApprox(IsothermBaseModel):
r"""
Asymptotic approximation to the Temkin isotherm.
.. math::
n(p) = n_m \frac{K p}{1 + K p} + n_m \theta (\frac{K p}{1 + K p})^2 (\frac{K p}{1 + K p} -1)
Notes
-----
The Temkin adsorption isotherm [#]_, like the Langmuir model, considers
a surface with n_m identical adsorption sites, but takes into account adsorbate-
adsorbate interactions by assuming that the enthalpy of adsorption is a linear
function of the coverage. The Temkin isotherm is derived [#]_ using a
mean-field argument and used an asymptotic approximation
to obtain an explicit equation for the loading.
Here, :math:`n_m` and K have the same physical meaning as in the Langmuir model.
The additional parameter :math:`\theta` describes the strength of the adsorbate-adsorbate
interactions (:math:`\theta < 0` for attractions).
References
----------
.. [#] V. P. M.I. Tempkin, Kinetics of ammonia synthesis on promoted iron
catalyst, Acta Phys. Chim. USSR 12 (1940) 327–356.
.. [#] Phys. Chem. Chem. Phys., 2014,16, 5499-5513
"""
# Model parameters
name = 'TemkinApprox'
calculates = 'loading'
param_names = ["n_m", "K", "tht"]
param_bounds = {
"n_m": [0, numpy.inf],
"K": [0, numpy.inf],
"tht": [0, numpy.inf],
}
def __init__(self):
"""Instantiation function."""
self.params = {"n_m": numpy.nan, "K": numpy.nan, "tht": numpy.nan}
def loading(self, pressure):
"""
Calculate loading at specified pressure.
Parameters
----------
pressure : float
The pressure at which to calculate the loading.
Returns
-------
float
Loading at specified pressure.
"""
lang_load = self.params["K"] * pressure / (1.0 + self.params["K"] * pressure)
return self.params["n_m"] * (lang_load + self.params["tht"] * lang_load ** 2 *
(lang_load - 1))
def pressure(self, loading):
"""
Calculate pressure at specified loading.
For the TemkinApprox model, the pressure will
be computed numerically as no analytical inversion is possible.
Parameters
----------
loading : float
The loading at which to calculate the pressure.
Returns
-------
float
Pressure at specified loading.
"""
def fun(x):
return self.loading(x) - loading
opt_res = scipy.optimize.root(fun, 0, method='hybr')
if not opt_res.success:
raise CalculationError("""
Root finding for value {0} failed.
""".format(loading))
return opt_res.x
def spreading_pressure(self, pressure):
r"""
Calculate spreading pressure at specified gas pressure.
Function that calculates spreading pressure by solving the
following integral at each point i.
.. math::
\pi = \int_{0}^{p_i} \frac{n_i(p_i)}{p_i} dp_i
The integral for the TemkinApprox model is solved analytically.
.. math::
\pi = n_m \Big( \ln{(1 + K p)} + \frac{\theta (2 K p + 1)}{2(1 + K p)^2}\Big)
Parameters
----------
pressure : float
The pressure at which to calculate the spreading pressure.
Returns
-------
float
Spreading pressure at specified pressure.
"""
one_plus_kp = 1.0 + self.params["K"] * pressure
return self.params["n_m"] * (numpy.log(one_plus_kp) +
self.params["tht"] * (2.0 * self.params["K"] * pressure + 1.0) /
(2.0 * one_plus_kp ** 2))
def initial_guess(self, pressure, loading):
"""
Return initial guess for fitting.
Parameters
----------
pressure : ndarray
Pressure data.
loading : ndarray
Loading data.
Returns
-------
dict
Dictionary of initial guesses for the parameters.
"""
saturation_loading, langmuir_k = super().initial_guess(pressure, loading)
guess = {"n_m": saturation_loading, "K": langmuir_k, "tht": 0.0}
for param in guess:
if guess[param] < self.param_bounds[param][0]:
guess[param] = self.param_bounds[param][0]
if guess[param] > self.param_bounds[param][1]:
guess[param] = self.param_bounds[param][1]
return guess
|
collect_potential_dependencies
|
Collect all potential dependencies of a job. These might contain
ambiguities. The keys of the returned dict represent the files to be considered.
|
__author__ = "Johannes Köster"
__copyright__ = "Copyright 2015-2019, Johannes Köster"
__email__ = "[email protected]"
__license__ = "MIT"
import html
import os
import shutil
import textwrap
import time
import tarfile
from collections import defaultdict, Counter
from itertools import chain, filterfalse, groupby
from functools import partial
from pathlib import Path
import uuid
import math
from snakemake.io import PeriodicityDetector, wait_for_files, is_flagged
from snakemake.jobs import Reason, JobFactory, GroupJobFactory, Job
from snakemake.exceptions import MissingInputException
from snakemake.exceptions import MissingRuleException, AmbiguousRuleException
from snakemake.exceptions import CyclicGraphException, MissingOutputException
from snakemake.exceptions import IncompleteFilesException, ImproperOutputException
from snakemake.exceptions import PeriodicWildcardError
from snakemake.exceptions import RemoteFileException, WorkflowError, ChildIOException
from snakemake.exceptions import InputFunctionException
from snakemake.logging import logger
from snakemake.common import DYNAMIC_FILL, group_into_chunks
from snakemake.deployment import conda, singularity
from snakemake.output_index import OutputIndex
from snakemake import workflow
class Batch:
"""Definition of a batch for calculating only a partial DAG."""
def __init__(self, rulename: str, idx: int, batches: int):
assert idx <= batches
assert idx > 0
self.rulename = rulename
self.idx = idx
self.batches = batches
def get_batch(self, items: list):
"""Return the defined batch of the given items.
Items are usually input files."""
# make sure that we always consider items in the same order
if len(items) < self.batches:
raise WorkflowError(
"Batching rule {} has less input files than batches. "
"Please choose a smaller number of batches.".format(self.rulename)
)
items = sorted(items)
batch_len = math.floor(len(items) / self.batches)
# self.batch is one-based, hence we have to subtract 1
idx = self.idx - 1
i = idx * batch_len
if self.is_final:
# extend the last batch to cover rest of list
return items[i:]
else:
return items[i : i + batch_len]
@property
def is_final(self):
return self.idx == self.batches
def __str__(self):
return "{}/{} (rule {})".format(self.idx, self.batches, self.rulename)
class DAG:
"""Directed acyclic graph of jobs."""
def __init__(
self,
workflow,
rules=None,
dryrun=False,
targetfiles=None,
targetrules=None,
forceall=False,
forcerules=None,
forcefiles=None,
priorityfiles=None,
priorityrules=None,
untilfiles=None,
untilrules=None,
omitfiles=None,
omitrules=None,
ignore_ambiguity=False,
force_incomplete=False,
ignore_incomplete=False,
notemp=False,
keep_remote_local=False,
batch=None,
):
self.dryrun = dryrun
self.dependencies = defaultdict(partial(defaultdict, set))
self.depending = defaultdict(partial(defaultdict, set))
self._needrun = set()
self._priority = dict()
self._reason = defaultdict(Reason)
self._finished = set()
self._dynamic = set()
self._len = 0
self.workflow = workflow
self.rules = set(rules)
self.ignore_ambiguity = ignore_ambiguity
self.targetfiles = targetfiles
self.targetrules = targetrules
self.priorityfiles = priorityfiles
self.priorityrules = priorityrules
self.targetjobs = set()
self.prioritytargetjobs = set()
self._ready_jobs = set()
self.notemp = notemp
self.keep_remote_local = keep_remote_local
self._jobid = dict()
self.job_cache = dict()
self.conda_envs = dict()
self.container_imgs = dict()
self._progress = 0
self._group = dict()
self.job_factory = JobFactory()
self.group_job_factory = GroupJobFactory()
self.forcerules = set()
self.forcefiles = set()
self.untilrules = set()
self.untilfiles = set()
self.omitrules = set()
self.omitfiles = set()
self.updated_subworkflow_files = set()
if forceall:
self.forcerules.update(self.rules)
elif forcerules:
self.forcerules.update(forcerules)
if forcefiles:
self.forcefiles.update(forcefiles)
if untilrules:
self.untilrules.update(set(rule.name for rule in untilrules))
if untilfiles:
self.untilfiles.update(untilfiles)
if omitrules:
self.omitrules.update(set(rule.name for rule in omitrules))
if omitfiles:
self.omitfiles.update(omitfiles)
self.has_dynamic_rules = any(rule.dynamic_output for rule in self.rules)
self.omitforce = set()
self.batch = batch
if batch is not None and not batch.is_final:
# Since not all input files of a batching rule are considered, we cannot run
# beyond that rule.
# For the final batch, we do not need to omit anything.
self.omitrules.add(batch.rulename)
self.force_incomplete = force_incomplete
self.ignore_incomplete = ignore_incomplete
self.periodic_wildcard_detector = PeriodicityDetector()
self.update_output_index()
def init(self, progress=False):
""" Initialise the DAG. """
for job in map(self.rule2job, self.targetrules):
job = self.update([job], progress=progress)
self.targetjobs.add(job)
for file in self.targetfiles:
job = self.update(self.file2jobs(file), file=file, progress=progress)
self.targetjobs.add(job)
self.cleanup()
self.update_needrun()
self.set_until_jobs()
self.delete_omitfrom_jobs()
self.update_jobids()
self.check_directory_outputs()
# check if remaining jobs are valid
for i, job in enumerate(self.jobs):
job.is_valid()
def check_directory_outputs(self):
"""Check that no output file is contained in a directory output of the same or another rule."""
outputs = sorted(
{
(path(f), job)
for job in self.jobs
for f in job.output
for path in (os.path.abspath, os.path.realpath)
}
)
for i in range(len(outputs) - 1):
(a, job_a), (b, job_b) = outputs[i : i + 2]
try:
common = os.path.commonpath([a, b])
except ValueError:
# commonpath raises error if windows drives are different.
continue
if a != b and common == os.path.commonpath([a]) and job_a != job_b:
raise ChildIOException(parent=outputs[i], child=outputs[i + 1])
@property
def checkpoint_jobs(self):
for job in self.needrun_jobs:
if job.is_checkpoint:
yield job
def update_checkpoint_outputs(self):
workflow.checkpoints.future_output = set(
f for job in self.checkpoint_jobs for f in job.output
)
def update_jobids(self):
for job in self.jobs:
if job not in self._jobid:
self._jobid[job] = len(self._jobid)
def cleanup_workdir(self):
for io_dir in set(
os.path.dirname(io_file)
for job in self.jobs
for io_file in chain(job.output, job.input)
if not os.path.exists(io_file)
):
if os.path.exists(io_dir) and not len(os.listdir(io_dir)):
os.removedirs(io_dir)
def cleanup(self):
self.job_cache.clear()
final_jobs = set(self.jobs)
todelete = [job for job in self.dependencies if job not in final_jobs]
for job in todelete:
del self.dependencies[job]
try:
del self.depending[job]
except KeyError:
pass
def create_conda_envs(
self, dryrun=False, forceall=False, init_only=False, quiet=False
):
# First deduplicate based on job.conda_env_file
jobs = self.jobs if forceall else self.needrun_jobs
env_set = {
(job.conda_env_file, job.container_img_url)
for job in jobs
if job.conda_env_file
}
# Then based on md5sum values
self.conda_envs = dict()
for (env_file, simg_url) in env_set:
simg = None
if simg_url and self.workflow.use_singularity:
assert (
simg_url in self.container_imgs
), "bug: must first pull singularity images"
simg = self.container_imgs[simg_url]
env = conda.Env(
env_file,
self,
container_img=simg,
cleanup=self.workflow.conda_cleanup_pkgs,
)
self.conda_envs[(env_file, simg_url)] = env
if not init_only:
for env in self.conda_envs.values():
if not dryrun or not quiet:
env.create(dryrun)
def pull_container_imgs(self, dryrun=False, forceall=False, quiet=False):
# First deduplicate based on job.conda_env_file
jobs = self.jobs if forceall else self.needrun_jobs
img_set = {job.container_img_url for job in jobs if job.container_img_url}
for img_url in img_set:
img = singularity.Image(img_url, self)
if not dryrun or not quiet:
img.pull(dryrun)
self.container_imgs[img_url] = img
def update_output_index(self):
"""Update the OutputIndex."""
self.output_index = OutputIndex(self.rules)
def check_incomplete(self):
"""Check if any output files are incomplete. This is done by looking up
markers in the persistence module."""
if not self.ignore_incomplete:
incomplete = self.incomplete_files
if incomplete:
if self.force_incomplete:
logger.debug("Forcing incomplete files:")
logger.debug("\t" + "\n\t".join(incomplete))
self.forcefiles.update(incomplete)
else:
raise IncompleteFilesException(incomplete)
def incomplete_external_jobid(self, job):
"""Return the external jobid of the job if it is marked as incomplete.
Returns None, if job is not incomplete, or if no external jobid has been
registered or if force_incomplete is True.
"""
if self.force_incomplete:
return None
jobids = self.workflow.persistence.external_jobids(job)
if len(jobids) == 1:
return jobids[0]
elif len(jobids) > 1:
raise WorkflowError(
"Multiple different external jobids registered "
"for output files of incomplete job {} ({}). This job "
"cannot be resumed. Execute Snakemake with --rerun-incomplete "
"to fix this issue.".format(job.jobid, jobids)
)
def check_dynamic(self):
"""Check dynamic output and update downstream rules if necessary."""
if self.has_dynamic_rules:
for job in filter(
lambda job: (job.dynamic_output and not self.needrun(job)), self.jobs
):
self.update_dynamic(job)
self.postprocess()
def is_edit_notebook_job(self, job):
return self.workflow.edit_notebook and job.targetfile in self.targetfiles
@property
def dynamic_output_jobs(self):
"""Iterate over all jobs with dynamic output files."""
return (job for job in self.jobs if job.dynamic_output)
@property
def jobs(self):
""" All jobs in the DAG. """
for job in self.bfs(self.dependencies, *self.targetjobs):
yield job
@property
def needrun_jobs(self):
""" Jobs that need to be executed. """
for job in filter(
self.needrun,
self.bfs(self.dependencies, *self.targetjobs, stop=self.noneedrun_finished),
):
yield job
@property
def local_needrun_jobs(self):
"""Iterate over all jobs that need to be run and are marked as local."""
return filter(lambda job: job.is_local, self.needrun_jobs)
@property
def finished_jobs(self):
""" Iterate over all jobs that have been finished."""
for job in filter(self.finished, self.bfs(self.dependencies, *self.targetjobs)):
yield job
@property
def ready_jobs(self):
"""Jobs that are ready to execute."""
return self._ready_jobs
def needrun(self, job):
"""Return whether a given job needs to be executed."""
return job in self._needrun
def priority(self, job):
"""Return priority of given job."""
return self._priority[job]
def noneedrun_finished(self, job):
"""
Return whether a given job is finished or was not
required to run at all.
"""
return not self.needrun(job) or self.finished(job)
def reason(self, job):
""" Return the reason of the job execution. """
return self._reason[job]
def finished(self, job):
""" Return whether a job is finished. """
return job in self._finished
def dynamic(self, job):
"""
Return whether a job is dynamic (i.e. it is only a placeholder
for those that are created after the job with dynamic output has
finished.
"""
if job.is_group():
for j in job:
if j in self._dynamic:
return True
else:
return job in self._dynamic
def requested_files(self, job):
"""Return the files a job requests."""
return set(*self.depending[job].values())
@property
def incomplete_files(self):
"""Return list of incomplete files."""
return list(
chain(
*(
job.output
for job in filter(
self.workflow.persistence.incomplete,
filterfalse(self.needrun, self.jobs),
)
)
)
)
@property
def newversion_files(self):
"""Return list of files where the current version is newer than the
recorded version.
"""
return list(
chain(
*(
job.output
for job in filter(self.workflow.persistence.newversion, self.jobs)
)
)
)
def missing_temp(self, job):
"""
Return whether a temp file that is input of the given job is missing.
"""
for job_, files in self.depending[job].items():
if self.needrun(job_) and any(not f.exists for f in files):
return True
return False
def check_and_touch_output(
self,
job,
wait=3,
ignore_missing_output=False,
no_touch=False,
force_stay_on_remote=False,
):
""" Raise exception if output files of job are missing. """
expanded_output = [job.shadowed_path(path) for path in job.expanded_output]
if job.benchmark:
expanded_output.append(job.benchmark)
if not ignore_missing_output:
try:
wait_for_files(
expanded_output,
latency_wait=wait,
force_stay_on_remote=force_stay_on_remote,
ignore_pipe=True,
)
except IOError as e:
raise MissingOutputException(
str(e) + "\nThis might be due to "
"filesystem latency. If that is the case, consider to increase the "
"wait time with --latency-wait.",
rule=job.rule,
)
# Ensure that outputs are of the correct type (those flagged with directory()
# are directories and not files and vice versa).
for f in expanded_output:
if (f.is_directory and not os.path.isdir(f)) or (
os.path.isdir(f) and not f.is_directory
):
raise ImproperOutputException(job.rule, [f])
# It is possible, due to archive expansion or cluster clock skew, that
# the files appear older than the input. But we know they must be new,
# so touch them to update timestamps. This also serves to touch outputs
# when using the --touch flag.
# Note that if the input files somehow have a future date then this will
# not currently be spotted and the job will always be re-run.
if not no_touch:
for f in expanded_output:
# This won't create normal files if missing, but will create
# the flag file for directories.
if f.exists_local:
f.touch()
def unshadow_output(self, job, only_log=False):
""" Move files from shadow directory to real output paths. """
if not job.shadow_dir or not job.expanded_output:
return
files = job.log if only_log else chain(job.expanded_output, job.log)
for real_output in files:
shadow_output = job.shadowed_path(real_output).file
# Remake absolute symlinks as relative
if os.path.islink(shadow_output):
dest = os.readlink(shadow_output)
if os.path.isabs(dest):
rel_dest = os.path.relpath(dest, job.shadow_dir)
os.remove(shadow_output)
os.symlink(rel_dest, shadow_output)
if os.path.realpath(shadow_output) == os.path.realpath(real_output):
continue
logger.debug(
"Moving shadow output {} to destination {}".format(
shadow_output, real_output
)
)
shutil.move(shadow_output, real_output)
shutil.rmtree(job.shadow_dir)
def check_periodic_wildcards(self, job):
"""Raise an exception if a wildcard of the given job appears to be periodic,
indicating a cyclic dependency."""
for wildcard, value in job.wildcards_dict.items():
periodic_substring = self.periodic_wildcard_detector.is_periodic(value)
if periodic_substring is not None:
raise PeriodicWildcardError(
"The value {} in wildcard {} is periodically repeated ({}). "
"This would lead to an infinite recursion. "
"To avoid this, e.g. restrict the wildcards in this rule to certain values.".format(
periodic_substring, wildcard, value
),
rule=job.rule,
)
def handle_protected(self, job):
""" Write-protect output files that are marked with protected(). """
for f in job.expanded_output:
if f in job.protected_output:
logger.info("Write-protecting output file {}.".format(f))
f.protect()
def handle_touch(self, job):
""" Touches those output files that are marked for touching. """
for f in job.expanded_output:
if f in job.touch_output:
f = job.shadowed_path(f)
logger.info("Touching output file {}.".format(f))
f.touch_or_create()
assert os.path.exists(f)
def temp_input(self, job):
for job_, files in self.dependencies[job].items():
for f in filter(job_.temp_output.__contains__, files):
yield f
def temp_size(self, job):
"""Return the total size of temporary input files of the job.
If none, return 0.
"""
return sum(f.size for f in self.temp_input(job))
def handle_temp(self, job):
""" Remove temp files if they are no longer needed. Update temp_mtimes. """
if self.notemp:
return
is_temp = lambda f: is_flagged(f, "temp")
# handle temp input
needed = lambda job_, f: any(
f in files
for j, files in self.depending[job_].items()
if not self.finished(j) and self.needrun(j) and j != job
)
def unneeded_files():
# temp input
for job_, files in self.dependencies[job].items():
tempfiles = set(f for f in job_.expanded_output if is_temp(f))
yield from filterfalse(partial(needed, job_), tempfiles & files)
# temp output
if not job.dynamic_output and (
job not in self.targetjobs or job.rule.name == self.workflow.first_rule
):
tempfiles = (
f
for f in job.expanded_output
if is_temp(f) and f not in self.targetfiles
)
yield from filterfalse(partial(needed, job), tempfiles)
for f in unneeded_files():
logger.info("Removing temporary output file {}.".format(f))
f.remove(remove_non_empty_dir=True)
def handle_log(self, job, upload_remote=True):
for f in job.log:
if not f.exists_local:
# If log file was not created during job, create an empty one.
f.touch_or_create()
if upload_remote and f.is_remote and not f.should_stay_on_remote:
f.upload_to_remote()
if not f.exists_remote:
raise RemoteFileException(
"The file upload was attempted, but it does not "
"exist on remote. Check that your credentials have "
"read AND write permissions."
)
def handle_remote(self, job, upload=True):
""" Remove local files if they are no longer needed and upload. """
if upload:
# handle output files
files = list(job.expanded_output)
if job.benchmark:
files.append(job.benchmark)
for f in files:
if f.is_remote and not f.should_stay_on_remote:
f.upload_to_remote()
remote_mtime = f.mtime
# immediately force local mtime to match remote,
# since conversions from S3 headers are not 100% reliable
# without this, newness comparisons may fail down the line
f.touch(times=(remote_mtime, remote_mtime))
if not f.exists_remote:
raise RemoteFileException(
"The file upload was attempted, but it does not "
"exist on remote. Check that your credentials have "
"read AND write permissions."
)
if not self.keep_remote_local:
# handle input files
needed = lambda job_, f: any(
f in files
for j, files in self.depending[job_].items()
if not self.finished(j) and self.needrun(j) and j != job
)
def unneeded_files():
putative = (
lambda f: f.is_remote
and not f.protected
and not f.should_keep_local
)
generated_input = set()
for job_, files in self.dependencies[job].items():
generated_input |= files
for f in filter(putative, files):
if not needed(job_, f):
yield f
for f, f_ in zip(job.output, job.rule.output):
if putative(f) and not needed(job, f) and not f in self.targetfiles:
if f in job.dynamic_output:
for f_ in job.expand_dynamic(f_):
yield f_
else:
yield f
for f in filter(putative, job.input):
# TODO what about remote inputs that are used by multiple jobs?
if f not in generated_input:
yield f
for f in unneeded_files():
if f.exists_local:
logger.info("Removing local output file: {}".format(f))
f.remove()
def jobid(self, job):
"""Return job id of given job."""
if job.is_group():
return job.jobid
else:
return self._jobid[job]
def update(
self, jobs, file=None, visited=None, skip_until_dynamic=False, progress=False
):
""" Update the DAG by adding given jobs and their dependencies. """
if visited is None:
visited = set()
producer = None
exceptions = list()
jobs = sorted(jobs, reverse=not self.ignore_ambiguity)
cycles = list()
for job in jobs:
logger.dag_debug(dict(status="candidate", job=job))
if file in job.input:
cycles.append(job)
continue
if job in visited:
cycles.append(job)
continue
try:
self.check_periodic_wildcards(job)
self.update_(
job,
visited=set(visited),
skip_until_dynamic=skip_until_dynamic,
progress=progress,
)
# TODO this might fail if a rule discarded here is needed
# elsewhere
if producer:
if job < producer or self.ignore_ambiguity:
break
elif producer is not None:
raise AmbiguousRuleException(file, job, producer)
producer = job
except (
MissingInputException,
CyclicGraphException,
PeriodicWildcardError,
WorkflowError,
) as ex:
exceptions.append(ex)
except RecursionError as e:
raise WorkflowError(
e,
"If building the DAG exceeds the recursion limit, "
"this is likely due to a cyclic dependency."
"E.g. you might have a sequence of rules that "
"can generate their own input. Try to make "
"the output files more specific. "
"A common pattern is to have different prefixes "
"in the output files of different rules."
+ "\nProblematic file pattern: {}".format(file)
if file
else "",
)
if producer is None:
if cycles:
job = cycles[0]
raise CyclicGraphException(job.rule, file, rule=job.rule)
if len(exceptions) > 1:
raise WorkflowError(*exceptions)
elif len(exceptions) == 1:
raise exceptions[0]
else:
logger.dag_debug(dict(status="selected", job=producer))
logger.dag_debug(
dict(
file=file,
msg="Producer found, hence exceptions are ignored.",
exception=WorkflowError(*exceptions),
)
)
n = len(self.dependencies)
if progress and n % 1000 == 0 and n and self._progress != n:
logger.info("Processed {} potential jobs.".format(n))
self._progress = n
return producer
def update_(self, job, visited=None, skip_until_dynamic=False, progress=False):
""" Update the DAG by adding the given job and its dependencies. """
if job in self.dependencies:
return
if visited is None:
visited = set()
visited.add(job)
dependencies = self.dependencies[job]
potential_dependencies = self.collect_potential_dependencies(job)
skip_until_dynamic = skip_until_dynamic and not job.dynamic_output
missing_input = set()
producer = dict()
exceptions = dict()
for file, jobs in potential_dependencies.items():
# If possible, obtain inventory information starting from
# given file and store it in the IOCache.
# This should provide faster access to existence and mtime information
# than querying file by file. If the file type does not support inventory
# information, this call is a no-op.
file.inventory()
if not jobs:
# no producing job found
if not file.exists:
# file not found, hence missing input
missing_input.add(file)
# file found, no problem
continue
try:
selected_job = self.update(
jobs,
file=file,
visited=visited,
skip_until_dynamic=skip_until_dynamic or file in job.dynamic_input,
progress=progress,
)
producer[file] = selected_job
except (
MissingInputException,
CyclicGraphException,
PeriodicWildcardError,
WorkflowError,
) as ex:
if not file.exists:
self.delete_job(job, recursive=False) # delete job from tree
raise ex
else:
logger.dag_debug(
dict(
file=file,
msg="No producers found, but file is present on disk.",
exception=ex,
)
)
for file, job_ in producer.items():
dependencies[job_].add(file)
self.depending[job_][job].add(file)
if self.is_batch_rule(job.rule) and self.batch.is_final:
# For the final batch, ensure that all input files from
# previous batches are present on disk.
if any(
f for f in job.input if f not in potential_dependencies and not f.exists
):
raise WorkflowError(
"Unable to execute batch {} because not all previous batches "
"have been completed before or files have been deleted.".format(
self.batch
)
)
if missing_input:
self.delete_job(job, recursive=False) # delete job from tree
raise MissingInputException(job.rule, missing_input)
if skip_until_dynamic:
self._dynamic.add(job)
def update_needrun(self):
""" Update the information whether a job needs to be executed. """
output_mintime = dict()
def update_output_mintime(job):
try:
return output_mintime[job]
except KeyError:
for job_ in chain([job], self.depending[job]):
try:
t = output_mintime[job_]
except KeyError:
t = job_.output_mintime
if t is not None:
output_mintime[job] = t
return
output_mintime[job] = None
def update_needrun(job):
reason = self.reason(job)
noinitreason = not reason
updated_subworkflow_input = self.updated_subworkflow_files.intersection(
job.input
)
if (
job not in self.omitforce
and job.rule in self.forcerules
or not self.forcefiles.isdisjoint(job.output)
):
reason.forced = True
elif updated_subworkflow_input:
reason.updated_input.update(updated_subworkflow_input)
elif job in self.targetjobs:
# TODO find a way to handle added/removed input files here?
if not job.output and not job.benchmark:
if job.input:
if job.rule.norun:
reason.updated_input_run.update(
[f for f in job.input if not f.exists]
)
else:
reason.nooutput = True
else:
reason.noio = True
else:
if job.rule in self.targetrules:
missing_output = job.missing_output()
else:
missing_output = job.missing_output(
requested=set(chain(*self.depending[job].values()))
| self.targetfiles
)
reason.missing_output.update(missing_output)
if not reason:
output_mintime_ = output_mintime.get(job)
if output_mintime_:
updated_input = [
f for f in job.input if f.exists and f.is_newer(output_mintime_)
]
reason.updated_input.update(updated_input)
if noinitreason and reason:
reason.derived = False
reason = self.reason
_needrun = self._needrun
dependencies = self.dependencies
depending = self.depending
_needrun.clear()
candidates = list(self.jobs)
# Update the output mintime of all jobs.
# We traverse them in BFS (level order) starting from target jobs.
# Then, we check output mintime of job itself and all direct descendants,
# which have already been visited in the level before.
# This way, we achieve a linear runtime.
for job in candidates:
update_output_mintime(job)
# update prior reason for all candidate jobs
for job in candidates:
update_needrun(job)
queue = list(filter(reason, candidates))
visited = set(queue)
while queue:
job = queue.pop(0)
_needrun.add(job)
for job_, files in dependencies[job].items():
missing_output = job_.missing_output(requested=files)
reason(job_).missing_output.update(missing_output)
if missing_output and not job_ in visited:
visited.add(job_)
queue.append(job_)
for job_, files in depending[job].items():
if job_ in candidates:
reason(job_).updated_input_run.update(files)
if not job_ in visited:
visited.add(job_)
queue.append(job_)
# update len including finished jobs (because they have already increased the job counter)
self._len = len(self._finished | self._needrun)
def in_until(self, job):
"""Return whether given job has been specified via --until."""
return job.rule.name in self.untilrules or not self.untilfiles.isdisjoint(
job.output
)
def in_omitfrom(self, job):
"""Return whether given job has been specified via --omit-from."""
return job.rule.name in self.omitrules or not self.omitfiles.isdisjoint(
job.output
)
def until_jobs(self):
"""Returns a generator of jobs specified by untiljobs."""
return (job for job in self.jobs if self.in_until(job))
def omitfrom_jobs(self):
"""Returns a generator of jobs specified by omitfromjobs."""
return (job for job in self.jobs if self.in_omitfrom(job))
def downstream_of_omitfrom(self):
"""Returns the downstream of --omit-from rules or files and themselves."""
return self.bfs(self.depending, *self.omitfrom_jobs())
def delete_omitfrom_jobs(self):
"""Removes jobs downstream of jobs specified by --omit-from."""
if not self.omitrules and not self.omitfiles:
return
downstream_jobs = list(
self.downstream_of_omitfrom()
) # need to cast as list before deleting jobs
for job in downstream_jobs:
self.delete_job(job, recursive=False, add_dependencies=True)
def set_until_jobs(self):
"""Removes jobs downstream of jobs specified by --omit-from."""
if not self.untilrules and not self.untilfiles:
return
self.targetjobs = set(self.until_jobs())
def update_priority(self):
""" Update job priorities. """
prioritized = (
lambda job: job.rule in self.priorityrules
or not self.priorityfiles.isdisjoint(job.output)
)
for job in self.needrun_jobs:
self._priority[job] = job.rule.priority
for job in self.bfs(
self.dependencies,
*filter(prioritized, self.needrun_jobs),
stop=self.noneedrun_finished,
):
self._priority[job] = Job.HIGHEST_PRIORITY
def update_groups(self):
groups = dict()
for job in self.needrun_jobs:
if job.group is None:
continue
stop = lambda j: j.group != job.group
# BFS into depending needrun jobs if in same group
# Note: never go up here (into depending), because it may contain
# jobs that have been sorted out due to e.g. ruleorder.
group = self.group_job_factory.new(
job.group,
(
job
for job in self.bfs(self.dependencies, job, stop=stop)
if self.needrun(job)
),
)
# merge with previously determined groups if present
for j in group:
if j in groups:
other = groups[j]
other.merge(group)
group = other
# update assignment
for j in group:
if j not in groups:
groups[j] = group
self._group = groups
self._update_group_components()
def _update_group_components(self):
# span connected components if requested
for groupid, conn_components in groupby(
set(self._group.values()), key=lambda group: group.groupid
):
n_components = self.workflow.group_components.get(groupid, 1)
if n_components > 1:
for chunk in group_into_chunks(n_components, conn_components):
if len(chunk) > 1:
primary = chunk[0]
for secondary in chunk[1:]:
primary.merge(secondary)
for j in primary:
self._group[j] = primary
def update_ready(self, jobs=None):
"""Update information whether a job is ready to execute.
Given jobs must be needrun jobs!
"""
if jobs is None:
jobs = self.needrun_jobs
candidate_groups = set()
for job in jobs:
if not self.finished(job) and self._ready(job):
if job.group is None:
self._ready_jobs.add(job)
else:
group = self._group[job]
group.finalize()
candidate_groups.add(group)
self._ready_jobs.update(
group
for group in candidate_groups
if all(self._ready(job) for job in group)
)
def get_jobs_or_groups(self):
visited_groups = set()
for job in self.jobs:
if job.group is None:
yield job
else:
group = self._group[job]
if group in visited_groups:
continue
visited_groups.add(group)
yield group
def close_remote_objects(self):
"""Close all remote objects."""
for job in self.jobs:
if not self.needrun(job):
job.close_remote()
def postprocess(self):
"""Postprocess the DAG. This has to be invoked after any change to the
DAG topology."""
self.update_jobids()
self.update_needrun()
self.update_priority()
self.handle_pipes()
self.update_groups()
self.update_ready()
self.close_remote_objects()
self.update_checkpoint_outputs()
def handle_pipes(self):
"""Use pipes to determine job groups. Check if every pipe has exactly
one consumer"""
for job in self.needrun_jobs:
candidate_groups = set()
if job.group is not None:
candidate_groups.add(job.group)
all_depending = set()
has_pipe = False
for f in job.output:
if is_flagged(f, "pipe"):
if job.is_run:
raise WorkflowError(
"Rule defines pipe output but "
"uses a 'run' directive. This is "
"not possible for technical "
"reasons. Consider using 'shell' or "
"'script'.",
rule=job.rule,
)
has_pipe = True
depending = [
j for j, files in self.depending[job].items() if f in files
]
if len(depending) > 1:
raise WorkflowError(
"Output file {} is marked as pipe "
"but more than one job depends on "
"it. Make sure that any pipe "
"output is only consumed by one "
"job".format(f),
rule=job.rule,
)
elif len(depending) == 0:
raise WorkflowError(
"Output file {} is marked as pipe "
"but it has no consumer. This is "
"invalid because it can lead to "
"a dead lock.".format(f),
rule=job.rule,
)
depending = depending[0]
if depending.is_run:
raise WorkflowError(
"Rule consumes pipe input but "
"uses a 'run' directive. This is "
"not possible for technical "
"reasons. Consider using 'shell' or "
"'script'.",
rule=job.rule,
)
all_depending.add(depending)
if depending.group is not None:
candidate_groups.add(depending.group)
if not has_pipe:
continue
if len(candidate_groups) > 1:
raise WorkflowError(
"An output file is marked as "
"pipe, but consuming jobs "
"are part of conflicting "
"groups.",
rule=job.rule,
)
elif candidate_groups:
# extend the candidate group to all involved jobs
group = candidate_groups.pop()
else:
# generate a random unique group name
group = str(uuid.uuid4())
job.group = group
for j in all_depending:
j.group = group
def _ready(self, job):
"""Return whether the given job is ready to execute."""
group = self._group.get(job, None)
if group is None:
is_external_needrun_dep = self.needrun
else:
def is_external_needrun_dep(j):
g = self._group.get(j, None)
return self.needrun(j) and (g is None or g != group)
return self._finished.issuperset(
filter(is_external_needrun_dep, self.dependencies[job])
)
def update_checkpoint_dependencies(self, jobs=None):
"""Update dependencies of checkpoints."""
updated = False
self.update_checkpoint_outputs()
if jobs is None:
jobs = [job for job in self.jobs if not self.needrun(job)]
for job in jobs:
if job.is_checkpoint:
depending = list(self.depending[job])
# re-evaluate depending jobs, replace and update DAG
for j in depending:
logger.info("Updating job {} ({}).".format(self.jobid(j), j))
newjob = j.updated()
self.replace_job(j, newjob, recursive=False)
updated = True
if updated:
# This has to be done for each checkpoint,
# otherwise, jobs may be missing in the end.
self.postprocess()
return updated
def finish(self, job, update_dynamic=True):
"""Finish a given job (e.g. remove from ready jobs, mark depending jobs
as ready)."""
try:
self._ready_jobs.remove(job)
except KeyError:
pass
if job.is_group():
jobs = job
else:
jobs = [job]
self._finished.update(jobs)
updated_dag = False
if update_dynamic:
updated_dag = self.update_checkpoint_dependencies(jobs)
# mark depending jobs as ready
# skip jobs that are marked as until jobs
self.update_ready(
j
for job in jobs
for j in self.depending[job]
if not self.in_until(job) and self.needrun(j)
)
for job in jobs:
if update_dynamic and job.dynamic_output:
logger.info("Dynamically updating jobs")
newjob = self.update_dynamic(job)
if newjob:
# simulate that this job ran and was finished before
self.omitforce.add(newjob)
self._needrun.add(newjob)
self._finished.add(newjob)
updated_dag = True
self.postprocess()
self.handle_protected(newjob)
self.handle_touch(newjob)
if updated_dag:
# We might have new jobs, so we need to ensure that all conda envs
# and singularity images are set up.
if self.workflow.use_singularity:
self.pull_container_imgs()
if self.workflow.use_conda:
self.create_conda_envs()
def new_job(self, rule, targetfile=None, format_wildcards=None):
"""Create new job for given rule and (optional) targetfile.
This will reuse existing jobs with the same wildcards."""
key = (rule, targetfile)
if key in self.job_cache:
assert targetfile is not None
return self.job_cache[key]
wildcards_dict = rule.get_wildcards(targetfile)
job = self.job_factory.new(
rule,
self,
wildcards_dict=wildcards_dict,
format_wildcards=format_wildcards,
targetfile=targetfile,
)
self.cache_job(job)
return job
def cache_job(self, job):
for f in job.products:
self.job_cache[(job.rule, f)] = job
def update_dynamic(self, job):
"""Update the DAG by evaluating the output of the given job that
contains dynamic output files."""
dynamic_wildcards = job.dynamic_wildcards
if not dynamic_wildcards:
# this happens e.g. in dryrun if output is not yet present
return
depending = list(
filter(lambda job_: not self.finished(job_), self.bfs(self.depending, job))
)
newrule, non_dynamic_wildcards = job.rule.dynamic_branch(
dynamic_wildcards, input=False
)
self.specialize_rule(job.rule, newrule)
# no targetfile needed for job
newjob = self.new_job(newrule, format_wildcards=non_dynamic_wildcards)
self.replace_job(job, newjob)
for job_ in depending:
needs_update = any(
f.get_wildcard_names() & dynamic_wildcards.keys()
for f in job_.rule.dynamic_input
)
if needs_update:
newrule_ = job_.rule.dynamic_branch(dynamic_wildcards)
if newrule_ is not None:
self.specialize_rule(job_.rule, newrule_)
if not self.dynamic(job_):
logger.debug("Updating job {}.".format(job_))
newjob_ = self.new_job(
newrule_, targetfile=job_.output[0] if job_.output else None
)
unexpected_output = self.reason(
job_
).missing_output.intersection(newjob.existing_output)
if unexpected_output:
logger.warning(
"Warning: the following output files of rule {} were not "
"present when the DAG was created:\n{}".format(
newjob_.rule, unexpected_output
)
)
self.replace_job(job_, newjob_)
return newjob
def delete_job(self, job, recursive=True, add_dependencies=False):
"""Delete given job from DAG."""
if job in self.targetjobs:
self.targetjobs.remove(job)
if add_dependencies:
for _job in self.dependencies[job]:
self.targetjobs.add(_job)
for job_ in self.depending[job]:
del self.dependencies[job_][job]
del self.depending[job]
for job_ in self.dependencies[job]:
depending = self.depending[job_]
del depending[job]
if not depending and recursive:
self.delete_job(job_)
del self.dependencies[job]
if job in self._needrun:
self._len -= 1
self._needrun.remove(job)
del self._reason[job]
if job in self._finished:
self._finished.remove(job)
if job in self._dynamic:
self._dynamic.remove(job)
if job in self._ready_jobs:
self._ready_jobs.remove(job)
# remove from cache
for f in job.output:
try:
del self.job_cache[(job.rule, f)]
except KeyError:
pass
def replace_job(self, job, newjob, recursive=True):
"""Replace given job with new job."""
add_to_targetjobs = job in self.targetjobs
depending = list(self.depending[job].items())
if self.finished(job):
self._finished.add(newjob)
self.delete_job(job, recursive=recursive)
if add_to_targetjobs:
self.targetjobs.add(newjob)
self.cache_job(newjob)
self.update([newjob])
logger.debug("Replace {} with dynamic branch {}".format(job, newjob))
for job_, files in depending:
# if not job_.dynamic_input:
logger.debug("updating depending job {}".format(job_))
self.dependencies[job_][newjob].update(files)
self.depending[newjob][job_].update(files)
def specialize_rule(self, rule, newrule):
"""Specialize the given rule by inserting newrule into the DAG."""
assert newrule is not None
self.rules.add(newrule)
self.update_output_index()
def is_batch_rule(self, rule):
"""Return True if the underlying rule is to be used for batching the DAG."""
return self.batch is not None and rule.name == self.batch.rulename
# MASKED: collect_potential_dependencies function (lines 1423-1458)
def bfs(self, direction, *jobs, stop=lambda job: False):
"""Perform a breadth-first traversal of the DAG."""
queue = list(jobs)
visited = set(queue)
while queue:
job = queue.pop(0)
if stop(job):
# stop criterion reached for this node
continue
yield job
for job_, _ in direction[job].items():
if not job_ in visited:
queue.append(job_)
visited.add(job_)
def level_bfs(self, direction, *jobs, stop=lambda job: False):
"""Perform a breadth-first traversal of the DAG, but also yield the
level together with each job."""
queue = [(job, 0) for job in jobs]
visited = set(jobs)
while queue:
job, level = queue.pop(0)
if stop(job):
# stop criterion reached for this node
continue
yield level, job
level += 1
for job_, _ in direction[job].items():
if not job_ in visited:
queue.append((job_, level))
visited.add(job_)
def dfs(self, direction, *jobs, stop=lambda job: False, post=True):
"""Perform depth-first traversal of the DAG."""
visited = set()
def _dfs(job):
"""Inner function for DFS traversal."""
if stop(job):
return
if not post:
yield job
for job_ in direction[job]:
if not job_ in visited:
visited.add(job_)
for j in _dfs(job_):
yield j
if post:
yield job
for job in jobs:
for job_ in self._dfs(direction, job, visited, stop=stop, post=post):
yield job_
def new_wildcards(self, job):
"""Return wildcards that are newly introduced in this job,
compared to its ancestors."""
new_wildcards = set(job.wildcards.items())
for job_ in self.dependencies[job]:
if not new_wildcards:
return set()
for wildcard in job_.wildcards.items():
new_wildcards.discard(wildcard)
return new_wildcards
def rule2job(self, targetrule):
"""Generate a new job from a given rule."""
if targetrule.has_wildcards():
raise WorkflowError(
"Target rules may not contain wildcards. Please specify concrete files or a rule without wildcards."
)
return self.new_job(targetrule)
def file2jobs(self, targetfile):
rules = self.output_index.match(targetfile)
jobs = []
exceptions = list()
for rule in rules:
if rule.is_producer(targetfile):
try:
jobs.append(self.new_job(rule, targetfile=targetfile))
except InputFunctionException as e:
exceptions.append(e)
if not jobs:
if exceptions:
raise exceptions[0]
raise MissingRuleException(targetfile)
return jobs
def rule_dot2(self):
dag = defaultdict(list)
visited = set()
preselect = set()
def preselect_parents(job):
for parent in self.depending[job]:
if parent in preselect:
continue
preselect.add(parent)
preselect_parents(parent)
def build_ruledag(job, key=lambda job: job.rule.name):
if job in visited:
return
visited.add(job)
deps = sorted(self.dependencies[job], key=key)
deps = [
(
group[0]
if preselect.isdisjoint(group)
else preselect.intersection(group).pop()
)
for group in (list(g) for _, g in groupby(deps, key))
]
dag[job].extend(deps)
preselect_parents(job)
for dep in deps:
build_ruledag(dep)
for job in self.targetjobs:
build_ruledag(job)
return self._dot(dag.keys(), print_wildcards=False, print_types=False, dag=dag)
def rule_dot(self):
graph = defaultdict(set)
for job in self.jobs:
graph[job.rule].update(dep.rule for dep in self.dependencies[job])
return self._dot(graph)
def dot(self):
def node2style(job):
if not self.needrun(job):
return "rounded,dashed"
if self.dynamic(job) or job.dynamic_input:
return "rounded,dotted"
return "rounded"
def format_wildcard(wildcard):
name, value = wildcard
if DYNAMIC_FILL in value:
value = "..."
return "{}: {}".format(name, value)
node2rule = lambda job: job.rule
node2label = lambda job: "\\n".join(
chain(
[job.rule.name], sorted(map(format_wildcard, self.new_wildcards(job)))
)
)
dag = {job: self.dependencies[job] for job in self.jobs}
return self._dot(
dag, node2rule=node2rule, node2style=node2style, node2label=node2label
)
def _dot(
self,
graph,
node2rule=lambda node: node,
node2style=lambda node: "rounded",
node2label=lambda node: node,
):
# color rules
huefactor = 2 / (3 * len(self.rules))
rulecolor = {
rule: "{:.2f} 0.6 0.85".format(i * huefactor)
for i, rule in enumerate(self.rules)
}
# markup
node_markup = '\t{}[label = "{}", color = "{}", style="{}"];'.format
edge_markup = "\t{} -> {}".format
# node ids
ids = {node: i for i, node in enumerate(graph)}
# calculate nodes
nodes = [
node_markup(
ids[node],
node2label(node),
rulecolor[node2rule(node)],
node2style(node),
)
for node in graph
]
# calculate edges
edges = [
edge_markup(ids[dep], ids[node])
for node, deps in graph.items()
for dep in deps
]
return textwrap.dedent(
"""\
digraph snakemake_dag {{
graph[bgcolor=white, margin=0];
node[shape=box, style=rounded, fontname=sans, \
fontsize=10, penwidth=2];
edge[penwidth=2, color=grey];
{items}
}}\
"""
).format(items="\n".join(nodes + edges))
def filegraph_dot(
self,
node2rule=lambda node: node,
node2style=lambda node: "rounded",
node2label=lambda node: node,
):
# NOTE: This is code from the rule_dot method.
# This method could be split like there as well, however,
# it cannot easily reuse the _dot method due to the different node type
graph = defaultdict(set)
for job in self.jobs:
graph[job.rule].update(dep.rule for dep in self.dependencies[job])
# node ids
ids = {node: i for i, node in enumerate(graph)}
# Compute colors for rules
def hsv_to_htmlhexrgb(h, s, v):
"""Convert hsv colors to hex-encoded rgb colors usable by html."""
import colorsys
hex_r, hex_g, hex_b = (round(255 * x) for x in colorsys.hsv_to_rgb(h, s, v))
return "#{hex_r:0>2X}{hex_g:0>2X}{hex_b:0>2X}".format(
hex_r=hex_r, hex_g=hex_g, hex_b=hex_b
)
huefactor = 2 / (3 * len(self.rules))
rulecolor = {
rule: hsv_to_htmlhexrgb(i * huefactor, 0.6, 0.85)
for i, rule in enumerate(self.rules)
}
def resolve_input_functions(input_files):
"""Iterate over all input files and replace input functions
with a fixed string.
"""
files = []
for f in input_files:
if callable(f):
files.append("<input function>")
# NOTE: This is a workaround. It would be more informative
# to show the code of the input function here (if it is
# short enough). This cannot be easily done with the inspect
# module, since the line numbers in the Snakefile do not
# behave as expected. One (complicated) solution for this
# would be to find the Snakefile and directly extract the
# code of the function.
else:
files.append(repr(f).strip("'"))
return files
def html_node(node_id, node, color):
"""Assemble a html style node for graphviz"""
input_files = resolve_input_functions(node._input)
output_files = [repr(f).strip("'") for f in node._output]
input_header = (
'<b><font point-size="14">↪ input</font></b>'
if input_files
else ""
)
output_header = (
'<b><font point-size="14">output →</font></b>'
if output_files
else ""
)
html_node = [
'{node_id} [ shape=none, margin=0, label=<<table border="2" color="{color}" cellspacing="3" cellborder="0">'.format(
node_id=node_id, color=color
),
"<tr><td>",
'<b><font point-size="18">{node.name}</font></b>'.format(node=node),
"</td></tr>",
"<hr/>",
'<tr><td align="left"> {input_header} </td></tr>'.format(
input_header=input_header
),
]
for filename in sorted(input_files):
# Escape html relevant chars like '<' and '>' in filenames
# These can be added by input functions etc. and cannot be
# displayed in graphviz HTML nodes.
in_file = html.escape(filename)
html_node.extend(
[
"<tr>",
'<td align="left"><font face="monospace">{in_file}</font></td>'.format(
in_file=in_file
),
"</tr>",
]
)
html_node.append("<hr/>")
html_node.append(
'<tr><td align="right"> {output_header} </td> </tr>'.format(
output_header=output_header
)
)
for filename in sorted(output_files):
out_file = html.escape(filename)
html_node.extend(
[
"<tr>",
'<td align="left"><font face="monospace">{out_file}</font></td>'
"</tr>".format(out_file=out_file),
]
)
html_node.append("</table>>]")
return "\n".join(html_node)
nodes = [
html_node(ids[node], node, rulecolor[node2rule(node)]) for node in graph
]
# calculate edges
edge_markup = "\t{} -> {}".format
edges = [
edge_markup(ids[dep], ids[node], ids[dep], ids[node])
for node, deps in graph.items()
for dep in deps
]
return textwrap.dedent(
"""\
digraph snakemake_dag {{
graph[bgcolor=white, margin=0];
node[shape=box, style=rounded, fontname=sans, \
fontsize=10, penwidth=2];
edge[penwidth=2, color=grey];
{items}
}}\
"""
).format(items="\n".join(nodes + edges))
def summary(self, detailed=False):
if detailed:
yield "output_file\tdate\trule\tversion\tlog-file(s)\tinput-file(s)\tshellcmd\tstatus\tplan"
else:
yield "output_file\tdate\trule\tversion\tlog-file(s)\tstatus\tplan"
for job in self.jobs:
output = job.rule.output if self.dynamic(job) else job.expanded_output
for f in output:
rule = self.workflow.persistence.rule(f)
rule = "-" if rule is None else rule
version = self.workflow.persistence.version(f)
version = "-" if version is None else str(version)
date = time.ctime(f.mtime) if f.exists else "-"
pending = "update pending" if self.reason(job) else "no update"
log = self.workflow.persistence.log(f)
log = "-" if log is None else ",".join(log)
input = self.workflow.persistence.input(f)
input = "-" if input is None else ",".join(input)
shellcmd = self.workflow.persistence.shellcmd(f)
shellcmd = "-" if shellcmd is None else shellcmd
# remove new line characters, leading and trailing whitespace
shellcmd = shellcmd.strip().replace("\n", "; ")
status = "ok"
if not f.exists:
status = "missing"
elif self.reason(job).updated_input:
status = "updated input files"
elif self.workflow.persistence.version_changed(job, file=f):
status = "version changed to {}".format(job.rule.version)
elif self.workflow.persistence.code_changed(job, file=f):
status = "rule implementation changed"
elif self.workflow.persistence.input_changed(job, file=f):
status = "set of input files changed"
elif self.workflow.persistence.params_changed(job, file=f):
status = "params changed"
if detailed:
yield "\t".join(
(f, date, rule, version, log, input, shellcmd, status, pending)
)
else:
yield "\t".join((f, date, rule, version, log, status, pending))
def archive(self, path):
"""Archives workflow such that it can be re-run on a different system.
Archiving includes git versioned files (i.e. Snakefiles, config files, ...),
ancestral input files and conda environments.
"""
if path.endswith(".tar"):
mode = "x"
elif path.endswith("tar.bz2"):
mode = "x:bz2"
elif path.endswith("tar.xz"):
mode = "x:xz"
elif path.endswith("tar.gz"):
mode = "x:gz"
else:
raise WorkflowError(
"Unsupported archive format "
"(supported: .tar, .tar.gz, .tar.bz2, .tar.xz)"
)
if os.path.exists(path):
raise WorkflowError("Archive already exists:\n" + path)
self.create_conda_envs(forceall=True)
try:
workdir = Path(os.path.abspath(os.getcwd()))
with tarfile.open(path, mode=mode, dereference=True) as archive:
archived = set()
def add(path):
if workdir not in Path(os.path.abspath(path)).parents:
logger.warning(
"Path {} cannot be archived: "
"not within working directory.".format(path)
)
else:
f = os.path.relpath(path)
if f not in archived:
archive.add(f)
archived.add(f)
logger.info("archived " + f)
logger.info(
"Archiving snakefiles, scripts and files under "
"version control..."
)
for f in self.workflow.get_sources():
add(f)
logger.info("Archiving external input files...")
for job in self.jobs:
# input files
for f in job.input:
if not any(
f in files for files in self.dependencies[job].values()
):
# this is an input file that is not created by any job
add(f)
logger.info("Archiving conda environments...")
envs = set()
for job in self.jobs:
if job.conda_env_file:
env_archive = job.archive_conda_env()
envs.add(env_archive)
for env in envs:
add(env)
except (Exception, BaseException) as e:
os.remove(path)
raise e
def clean(self, only_temp=False, dryrun=False):
"""Removes files generated by the workflow."""
for job in self.jobs:
for f in job.output:
if not only_temp or is_flagged(f, "temp"):
# The reason for the second check is that dangling
# symlinks fail f.exists.
if f.exists or os.path.islink(f):
if f.protected:
logger.error("Skipping write-protected file {}.".format(f))
else:
msg = "Deleting {}" if not dryrun else "Would delete {}"
logger.info(msg.format(f))
if not dryrun:
# Remove non-empty dirs if flagged as temp()
f.remove(remove_non_empty_dir=only_temp)
def list_untracked(self):
"""List files in the workdir that are not in the dag."""
used_files = set()
files_in_cwd = set()
for job in self.jobs:
used_files.update(
os.path.relpath(file)
for file in chain(job.local_input, job.local_output, job.log)
)
for root, dirs, files in os.walk(os.getcwd()):
# Ignore hidden files and don't traverse into hidden dirs
files_in_cwd.update(
[
os.path.relpath(os.path.join(root, f))
for f in files
if not f[0] == "."
]
)
dirs[:] = [d for d in dirs if not d[0] == "."]
for f in sorted(list(files_in_cwd - used_files)):
logger.info(f)
def d3dag(self, max_jobs=10000):
def node(job):
jobid = self.jobid(job)
return {
"id": jobid,
"value": {
"jobid": jobid,
"label": job.rule.name,
"rule": job.rule.name,
},
}
def edge(a, b):
return {"u": self.jobid(a), "v": self.jobid(b)}
jobs = list(self.jobs)
if len(jobs) > max_jobs:
logger.info(
"Job-DAG is too large for visualization (>{} jobs).".format(max_jobs)
)
else:
logger.d3dag(
nodes=[node(job) for job in jobs],
edges=[
edge(dep, job)
for job in jobs
for dep in self.dependencies[job]
if self.needrun(dep)
],
)
def stats(self):
rules = Counter()
rules.update(job.rule for job in self.needrun_jobs)
rules.update(job.rule for job in self.finished_jobs)
yield "Job counts:"
yield "\tcount\tjobs"
for rule, count in sorted(rules.most_common(), key=lambda item: item[0].name):
yield "\t{}\t{}".format(count, rule)
yield "\t{}".format(len(self))
def __str__(self):
return self.dot()
def __len__(self):
return self._len
|
def collect_potential_dependencies(self, job):
"""Collect all potential dependencies of a job. These might contain
ambiguities. The keys of the returned dict represent the files to be considered."""
dependencies = defaultdict(list)
# use a set to circumvent multiple jobs for the same file
# if user specified it twice
file2jobs = self.file2jobs
input_files = list(job.unique_input)
if self.is_batch_rule(job.rule):
# only consider the defined partition of the input files
input_batch = self.batch.get_batch(input_files)
if len(input_batch) != len(input_files):
logger.info(
"Considering only batch {} for DAG computation.\n"
"All jobs beyond the batching rule are omitted until the final batch.\n"
"Don't forget to run the other batches too.".format(self.batch)
)
input_files = input_batch
for file in input_files:
# omit the file if it comes from a subworkflow
if file in job.subworkflow_input:
continue
try:
if file in job.dependencies:
jobs = [self.new_job(job.dependencies[file], targetfile=file)]
else:
jobs = file2jobs(file)
dependencies[file].extend(jobs)
except MissingRuleException as ex:
# no dependency found
dependencies[file] = []
return dependencies
| 1,423 | 1,458 |
__author__ = "Johannes Köster"
__copyright__ = "Copyright 2015-2019, Johannes Köster"
__email__ = "[email protected]"
__license__ = "MIT"
import html
import os
import shutil
import textwrap
import time
import tarfile
from collections import defaultdict, Counter
from itertools import chain, filterfalse, groupby
from functools import partial
from pathlib import Path
import uuid
import math
from snakemake.io import PeriodicityDetector, wait_for_files, is_flagged
from snakemake.jobs import Reason, JobFactory, GroupJobFactory, Job
from snakemake.exceptions import MissingInputException
from snakemake.exceptions import MissingRuleException, AmbiguousRuleException
from snakemake.exceptions import CyclicGraphException, MissingOutputException
from snakemake.exceptions import IncompleteFilesException, ImproperOutputException
from snakemake.exceptions import PeriodicWildcardError
from snakemake.exceptions import RemoteFileException, WorkflowError, ChildIOException
from snakemake.exceptions import InputFunctionException
from snakemake.logging import logger
from snakemake.common import DYNAMIC_FILL, group_into_chunks
from snakemake.deployment import conda, singularity
from snakemake.output_index import OutputIndex
from snakemake import workflow
class Batch:
"""Definition of a batch for calculating only a partial DAG."""
def __init__(self, rulename: str, idx: int, batches: int):
assert idx <= batches
assert idx > 0
self.rulename = rulename
self.idx = idx
self.batches = batches
def get_batch(self, items: list):
"""Return the defined batch of the given items.
Items are usually input files."""
# make sure that we always consider items in the same order
if len(items) < self.batches:
raise WorkflowError(
"Batching rule {} has less input files than batches. "
"Please choose a smaller number of batches.".format(self.rulename)
)
items = sorted(items)
batch_len = math.floor(len(items) / self.batches)
# self.batch is one-based, hence we have to subtract 1
idx = self.idx - 1
i = idx * batch_len
if self.is_final:
# extend the last batch to cover rest of list
return items[i:]
else:
return items[i : i + batch_len]
@property
def is_final(self):
return self.idx == self.batches
def __str__(self):
return "{}/{} (rule {})".format(self.idx, self.batches, self.rulename)
class DAG:
"""Directed acyclic graph of jobs."""
def __init__(
self,
workflow,
rules=None,
dryrun=False,
targetfiles=None,
targetrules=None,
forceall=False,
forcerules=None,
forcefiles=None,
priorityfiles=None,
priorityrules=None,
untilfiles=None,
untilrules=None,
omitfiles=None,
omitrules=None,
ignore_ambiguity=False,
force_incomplete=False,
ignore_incomplete=False,
notemp=False,
keep_remote_local=False,
batch=None,
):
self.dryrun = dryrun
self.dependencies = defaultdict(partial(defaultdict, set))
self.depending = defaultdict(partial(defaultdict, set))
self._needrun = set()
self._priority = dict()
self._reason = defaultdict(Reason)
self._finished = set()
self._dynamic = set()
self._len = 0
self.workflow = workflow
self.rules = set(rules)
self.ignore_ambiguity = ignore_ambiguity
self.targetfiles = targetfiles
self.targetrules = targetrules
self.priorityfiles = priorityfiles
self.priorityrules = priorityrules
self.targetjobs = set()
self.prioritytargetjobs = set()
self._ready_jobs = set()
self.notemp = notemp
self.keep_remote_local = keep_remote_local
self._jobid = dict()
self.job_cache = dict()
self.conda_envs = dict()
self.container_imgs = dict()
self._progress = 0
self._group = dict()
self.job_factory = JobFactory()
self.group_job_factory = GroupJobFactory()
self.forcerules = set()
self.forcefiles = set()
self.untilrules = set()
self.untilfiles = set()
self.omitrules = set()
self.omitfiles = set()
self.updated_subworkflow_files = set()
if forceall:
self.forcerules.update(self.rules)
elif forcerules:
self.forcerules.update(forcerules)
if forcefiles:
self.forcefiles.update(forcefiles)
if untilrules:
self.untilrules.update(set(rule.name for rule in untilrules))
if untilfiles:
self.untilfiles.update(untilfiles)
if omitrules:
self.omitrules.update(set(rule.name for rule in omitrules))
if omitfiles:
self.omitfiles.update(omitfiles)
self.has_dynamic_rules = any(rule.dynamic_output for rule in self.rules)
self.omitforce = set()
self.batch = batch
if batch is not None and not batch.is_final:
# Since not all input files of a batching rule are considered, we cannot run
# beyond that rule.
# For the final batch, we do not need to omit anything.
self.omitrules.add(batch.rulename)
self.force_incomplete = force_incomplete
self.ignore_incomplete = ignore_incomplete
self.periodic_wildcard_detector = PeriodicityDetector()
self.update_output_index()
def init(self, progress=False):
""" Initialise the DAG. """
for job in map(self.rule2job, self.targetrules):
job = self.update([job], progress=progress)
self.targetjobs.add(job)
for file in self.targetfiles:
job = self.update(self.file2jobs(file), file=file, progress=progress)
self.targetjobs.add(job)
self.cleanup()
self.update_needrun()
self.set_until_jobs()
self.delete_omitfrom_jobs()
self.update_jobids()
self.check_directory_outputs()
# check if remaining jobs are valid
for i, job in enumerate(self.jobs):
job.is_valid()
def check_directory_outputs(self):
"""Check that no output file is contained in a directory output of the same or another rule."""
outputs = sorted(
{
(path(f), job)
for job in self.jobs
for f in job.output
for path in (os.path.abspath, os.path.realpath)
}
)
for i in range(len(outputs) - 1):
(a, job_a), (b, job_b) = outputs[i : i + 2]
try:
common = os.path.commonpath([a, b])
except ValueError:
# commonpath raises error if windows drives are different.
continue
if a != b and common == os.path.commonpath([a]) and job_a != job_b:
raise ChildIOException(parent=outputs[i], child=outputs[i + 1])
@property
def checkpoint_jobs(self):
for job in self.needrun_jobs:
if job.is_checkpoint:
yield job
def update_checkpoint_outputs(self):
workflow.checkpoints.future_output = set(
f for job in self.checkpoint_jobs for f in job.output
)
def update_jobids(self):
for job in self.jobs:
if job not in self._jobid:
self._jobid[job] = len(self._jobid)
def cleanup_workdir(self):
for io_dir in set(
os.path.dirname(io_file)
for job in self.jobs
for io_file in chain(job.output, job.input)
if not os.path.exists(io_file)
):
if os.path.exists(io_dir) and not len(os.listdir(io_dir)):
os.removedirs(io_dir)
def cleanup(self):
self.job_cache.clear()
final_jobs = set(self.jobs)
todelete = [job for job in self.dependencies if job not in final_jobs]
for job in todelete:
del self.dependencies[job]
try:
del self.depending[job]
except KeyError:
pass
def create_conda_envs(
self, dryrun=False, forceall=False, init_only=False, quiet=False
):
# First deduplicate based on job.conda_env_file
jobs = self.jobs if forceall else self.needrun_jobs
env_set = {
(job.conda_env_file, job.container_img_url)
for job in jobs
if job.conda_env_file
}
# Then based on md5sum values
self.conda_envs = dict()
for (env_file, simg_url) in env_set:
simg = None
if simg_url and self.workflow.use_singularity:
assert (
simg_url in self.container_imgs
), "bug: must first pull singularity images"
simg = self.container_imgs[simg_url]
env = conda.Env(
env_file,
self,
container_img=simg,
cleanup=self.workflow.conda_cleanup_pkgs,
)
self.conda_envs[(env_file, simg_url)] = env
if not init_only:
for env in self.conda_envs.values():
if not dryrun or not quiet:
env.create(dryrun)
def pull_container_imgs(self, dryrun=False, forceall=False, quiet=False):
# First deduplicate based on job.conda_env_file
jobs = self.jobs if forceall else self.needrun_jobs
img_set = {job.container_img_url for job in jobs if job.container_img_url}
for img_url in img_set:
img = singularity.Image(img_url, self)
if not dryrun or not quiet:
img.pull(dryrun)
self.container_imgs[img_url] = img
def update_output_index(self):
"""Update the OutputIndex."""
self.output_index = OutputIndex(self.rules)
def check_incomplete(self):
"""Check if any output files are incomplete. This is done by looking up
markers in the persistence module."""
if not self.ignore_incomplete:
incomplete = self.incomplete_files
if incomplete:
if self.force_incomplete:
logger.debug("Forcing incomplete files:")
logger.debug("\t" + "\n\t".join(incomplete))
self.forcefiles.update(incomplete)
else:
raise IncompleteFilesException(incomplete)
def incomplete_external_jobid(self, job):
"""Return the external jobid of the job if it is marked as incomplete.
Returns None, if job is not incomplete, or if no external jobid has been
registered or if force_incomplete is True.
"""
if self.force_incomplete:
return None
jobids = self.workflow.persistence.external_jobids(job)
if len(jobids) == 1:
return jobids[0]
elif len(jobids) > 1:
raise WorkflowError(
"Multiple different external jobids registered "
"for output files of incomplete job {} ({}). This job "
"cannot be resumed. Execute Snakemake with --rerun-incomplete "
"to fix this issue.".format(job.jobid, jobids)
)
def check_dynamic(self):
"""Check dynamic output and update downstream rules if necessary."""
if self.has_dynamic_rules:
for job in filter(
lambda job: (job.dynamic_output and not self.needrun(job)), self.jobs
):
self.update_dynamic(job)
self.postprocess()
def is_edit_notebook_job(self, job):
return self.workflow.edit_notebook and job.targetfile in self.targetfiles
@property
def dynamic_output_jobs(self):
"""Iterate over all jobs with dynamic output files."""
return (job for job in self.jobs if job.dynamic_output)
@property
def jobs(self):
""" All jobs in the DAG. """
for job in self.bfs(self.dependencies, *self.targetjobs):
yield job
@property
def needrun_jobs(self):
""" Jobs that need to be executed. """
for job in filter(
self.needrun,
self.bfs(self.dependencies, *self.targetjobs, stop=self.noneedrun_finished),
):
yield job
@property
def local_needrun_jobs(self):
"""Iterate over all jobs that need to be run and are marked as local."""
return filter(lambda job: job.is_local, self.needrun_jobs)
@property
def finished_jobs(self):
""" Iterate over all jobs that have been finished."""
for job in filter(self.finished, self.bfs(self.dependencies, *self.targetjobs)):
yield job
@property
def ready_jobs(self):
"""Jobs that are ready to execute."""
return self._ready_jobs
def needrun(self, job):
"""Return whether a given job needs to be executed."""
return job in self._needrun
def priority(self, job):
"""Return priority of given job."""
return self._priority[job]
def noneedrun_finished(self, job):
"""
Return whether a given job is finished or was not
required to run at all.
"""
return not self.needrun(job) or self.finished(job)
def reason(self, job):
""" Return the reason of the job execution. """
return self._reason[job]
def finished(self, job):
""" Return whether a job is finished. """
return job in self._finished
def dynamic(self, job):
"""
Return whether a job is dynamic (i.e. it is only a placeholder
for those that are created after the job with dynamic output has
finished.
"""
if job.is_group():
for j in job:
if j in self._dynamic:
return True
else:
return job in self._dynamic
def requested_files(self, job):
"""Return the files a job requests."""
return set(*self.depending[job].values())
@property
def incomplete_files(self):
"""Return list of incomplete files."""
return list(
chain(
*(
job.output
for job in filter(
self.workflow.persistence.incomplete,
filterfalse(self.needrun, self.jobs),
)
)
)
)
@property
def newversion_files(self):
"""Return list of files where the current version is newer than the
recorded version.
"""
return list(
chain(
*(
job.output
for job in filter(self.workflow.persistence.newversion, self.jobs)
)
)
)
def missing_temp(self, job):
"""
Return whether a temp file that is input of the given job is missing.
"""
for job_, files in self.depending[job].items():
if self.needrun(job_) and any(not f.exists for f in files):
return True
return False
def check_and_touch_output(
self,
job,
wait=3,
ignore_missing_output=False,
no_touch=False,
force_stay_on_remote=False,
):
""" Raise exception if output files of job are missing. """
expanded_output = [job.shadowed_path(path) for path in job.expanded_output]
if job.benchmark:
expanded_output.append(job.benchmark)
if not ignore_missing_output:
try:
wait_for_files(
expanded_output,
latency_wait=wait,
force_stay_on_remote=force_stay_on_remote,
ignore_pipe=True,
)
except IOError as e:
raise MissingOutputException(
str(e) + "\nThis might be due to "
"filesystem latency. If that is the case, consider to increase the "
"wait time with --latency-wait.",
rule=job.rule,
)
# Ensure that outputs are of the correct type (those flagged with directory()
# are directories and not files and vice versa).
for f in expanded_output:
if (f.is_directory and not os.path.isdir(f)) or (
os.path.isdir(f) and not f.is_directory
):
raise ImproperOutputException(job.rule, [f])
# It is possible, due to archive expansion or cluster clock skew, that
# the files appear older than the input. But we know they must be new,
# so touch them to update timestamps. This also serves to touch outputs
# when using the --touch flag.
# Note that if the input files somehow have a future date then this will
# not currently be spotted and the job will always be re-run.
if not no_touch:
for f in expanded_output:
# This won't create normal files if missing, but will create
# the flag file for directories.
if f.exists_local:
f.touch()
def unshadow_output(self, job, only_log=False):
""" Move files from shadow directory to real output paths. """
if not job.shadow_dir or not job.expanded_output:
return
files = job.log if only_log else chain(job.expanded_output, job.log)
for real_output in files:
shadow_output = job.shadowed_path(real_output).file
# Remake absolute symlinks as relative
if os.path.islink(shadow_output):
dest = os.readlink(shadow_output)
if os.path.isabs(dest):
rel_dest = os.path.relpath(dest, job.shadow_dir)
os.remove(shadow_output)
os.symlink(rel_dest, shadow_output)
if os.path.realpath(shadow_output) == os.path.realpath(real_output):
continue
logger.debug(
"Moving shadow output {} to destination {}".format(
shadow_output, real_output
)
)
shutil.move(shadow_output, real_output)
shutil.rmtree(job.shadow_dir)
def check_periodic_wildcards(self, job):
"""Raise an exception if a wildcard of the given job appears to be periodic,
indicating a cyclic dependency."""
for wildcard, value in job.wildcards_dict.items():
periodic_substring = self.periodic_wildcard_detector.is_periodic(value)
if periodic_substring is not None:
raise PeriodicWildcardError(
"The value {} in wildcard {} is periodically repeated ({}). "
"This would lead to an infinite recursion. "
"To avoid this, e.g. restrict the wildcards in this rule to certain values.".format(
periodic_substring, wildcard, value
),
rule=job.rule,
)
def handle_protected(self, job):
""" Write-protect output files that are marked with protected(). """
for f in job.expanded_output:
if f in job.protected_output:
logger.info("Write-protecting output file {}.".format(f))
f.protect()
def handle_touch(self, job):
""" Touches those output files that are marked for touching. """
for f in job.expanded_output:
if f in job.touch_output:
f = job.shadowed_path(f)
logger.info("Touching output file {}.".format(f))
f.touch_or_create()
assert os.path.exists(f)
def temp_input(self, job):
for job_, files in self.dependencies[job].items():
for f in filter(job_.temp_output.__contains__, files):
yield f
def temp_size(self, job):
"""Return the total size of temporary input files of the job.
If none, return 0.
"""
return sum(f.size for f in self.temp_input(job))
def handle_temp(self, job):
""" Remove temp files if they are no longer needed. Update temp_mtimes. """
if self.notemp:
return
is_temp = lambda f: is_flagged(f, "temp")
# handle temp input
needed = lambda job_, f: any(
f in files
for j, files in self.depending[job_].items()
if not self.finished(j) and self.needrun(j) and j != job
)
def unneeded_files():
# temp input
for job_, files in self.dependencies[job].items():
tempfiles = set(f for f in job_.expanded_output if is_temp(f))
yield from filterfalse(partial(needed, job_), tempfiles & files)
# temp output
if not job.dynamic_output and (
job not in self.targetjobs or job.rule.name == self.workflow.first_rule
):
tempfiles = (
f
for f in job.expanded_output
if is_temp(f) and f not in self.targetfiles
)
yield from filterfalse(partial(needed, job), tempfiles)
for f in unneeded_files():
logger.info("Removing temporary output file {}.".format(f))
f.remove(remove_non_empty_dir=True)
def handle_log(self, job, upload_remote=True):
for f in job.log:
if not f.exists_local:
# If log file was not created during job, create an empty one.
f.touch_or_create()
if upload_remote and f.is_remote and not f.should_stay_on_remote:
f.upload_to_remote()
if not f.exists_remote:
raise RemoteFileException(
"The file upload was attempted, but it does not "
"exist on remote. Check that your credentials have "
"read AND write permissions."
)
def handle_remote(self, job, upload=True):
""" Remove local files if they are no longer needed and upload. """
if upload:
# handle output files
files = list(job.expanded_output)
if job.benchmark:
files.append(job.benchmark)
for f in files:
if f.is_remote and not f.should_stay_on_remote:
f.upload_to_remote()
remote_mtime = f.mtime
# immediately force local mtime to match remote,
# since conversions from S3 headers are not 100% reliable
# without this, newness comparisons may fail down the line
f.touch(times=(remote_mtime, remote_mtime))
if not f.exists_remote:
raise RemoteFileException(
"The file upload was attempted, but it does not "
"exist on remote. Check that your credentials have "
"read AND write permissions."
)
if not self.keep_remote_local:
# handle input files
needed = lambda job_, f: any(
f in files
for j, files in self.depending[job_].items()
if not self.finished(j) and self.needrun(j) and j != job
)
def unneeded_files():
putative = (
lambda f: f.is_remote
and not f.protected
and not f.should_keep_local
)
generated_input = set()
for job_, files in self.dependencies[job].items():
generated_input |= files
for f in filter(putative, files):
if not needed(job_, f):
yield f
for f, f_ in zip(job.output, job.rule.output):
if putative(f) and not needed(job, f) and not f in self.targetfiles:
if f in job.dynamic_output:
for f_ in job.expand_dynamic(f_):
yield f_
else:
yield f
for f in filter(putative, job.input):
# TODO what about remote inputs that are used by multiple jobs?
if f not in generated_input:
yield f
for f in unneeded_files():
if f.exists_local:
logger.info("Removing local output file: {}".format(f))
f.remove()
def jobid(self, job):
"""Return job id of given job."""
if job.is_group():
return job.jobid
else:
return self._jobid[job]
def update(
self, jobs, file=None, visited=None, skip_until_dynamic=False, progress=False
):
""" Update the DAG by adding given jobs and their dependencies. """
if visited is None:
visited = set()
producer = None
exceptions = list()
jobs = sorted(jobs, reverse=not self.ignore_ambiguity)
cycles = list()
for job in jobs:
logger.dag_debug(dict(status="candidate", job=job))
if file in job.input:
cycles.append(job)
continue
if job in visited:
cycles.append(job)
continue
try:
self.check_periodic_wildcards(job)
self.update_(
job,
visited=set(visited),
skip_until_dynamic=skip_until_dynamic,
progress=progress,
)
# TODO this might fail if a rule discarded here is needed
# elsewhere
if producer:
if job < producer or self.ignore_ambiguity:
break
elif producer is not None:
raise AmbiguousRuleException(file, job, producer)
producer = job
except (
MissingInputException,
CyclicGraphException,
PeriodicWildcardError,
WorkflowError,
) as ex:
exceptions.append(ex)
except RecursionError as e:
raise WorkflowError(
e,
"If building the DAG exceeds the recursion limit, "
"this is likely due to a cyclic dependency."
"E.g. you might have a sequence of rules that "
"can generate their own input. Try to make "
"the output files more specific. "
"A common pattern is to have different prefixes "
"in the output files of different rules."
+ "\nProblematic file pattern: {}".format(file)
if file
else "",
)
if producer is None:
if cycles:
job = cycles[0]
raise CyclicGraphException(job.rule, file, rule=job.rule)
if len(exceptions) > 1:
raise WorkflowError(*exceptions)
elif len(exceptions) == 1:
raise exceptions[0]
else:
logger.dag_debug(dict(status="selected", job=producer))
logger.dag_debug(
dict(
file=file,
msg="Producer found, hence exceptions are ignored.",
exception=WorkflowError(*exceptions),
)
)
n = len(self.dependencies)
if progress and n % 1000 == 0 and n and self._progress != n:
logger.info("Processed {} potential jobs.".format(n))
self._progress = n
return producer
def update_(self, job, visited=None, skip_until_dynamic=False, progress=False):
""" Update the DAG by adding the given job and its dependencies. """
if job in self.dependencies:
return
if visited is None:
visited = set()
visited.add(job)
dependencies = self.dependencies[job]
potential_dependencies = self.collect_potential_dependencies(job)
skip_until_dynamic = skip_until_dynamic and not job.dynamic_output
missing_input = set()
producer = dict()
exceptions = dict()
for file, jobs in potential_dependencies.items():
# If possible, obtain inventory information starting from
# given file and store it in the IOCache.
# This should provide faster access to existence and mtime information
# than querying file by file. If the file type does not support inventory
# information, this call is a no-op.
file.inventory()
if not jobs:
# no producing job found
if not file.exists:
# file not found, hence missing input
missing_input.add(file)
# file found, no problem
continue
try:
selected_job = self.update(
jobs,
file=file,
visited=visited,
skip_until_dynamic=skip_until_dynamic or file in job.dynamic_input,
progress=progress,
)
producer[file] = selected_job
except (
MissingInputException,
CyclicGraphException,
PeriodicWildcardError,
WorkflowError,
) as ex:
if not file.exists:
self.delete_job(job, recursive=False) # delete job from tree
raise ex
else:
logger.dag_debug(
dict(
file=file,
msg="No producers found, but file is present on disk.",
exception=ex,
)
)
for file, job_ in producer.items():
dependencies[job_].add(file)
self.depending[job_][job].add(file)
if self.is_batch_rule(job.rule) and self.batch.is_final:
# For the final batch, ensure that all input files from
# previous batches are present on disk.
if any(
f for f in job.input if f not in potential_dependencies and not f.exists
):
raise WorkflowError(
"Unable to execute batch {} because not all previous batches "
"have been completed before or files have been deleted.".format(
self.batch
)
)
if missing_input:
self.delete_job(job, recursive=False) # delete job from tree
raise MissingInputException(job.rule, missing_input)
if skip_until_dynamic:
self._dynamic.add(job)
def update_needrun(self):
""" Update the information whether a job needs to be executed. """
output_mintime = dict()
def update_output_mintime(job):
try:
return output_mintime[job]
except KeyError:
for job_ in chain([job], self.depending[job]):
try:
t = output_mintime[job_]
except KeyError:
t = job_.output_mintime
if t is not None:
output_mintime[job] = t
return
output_mintime[job] = None
def update_needrun(job):
reason = self.reason(job)
noinitreason = not reason
updated_subworkflow_input = self.updated_subworkflow_files.intersection(
job.input
)
if (
job not in self.omitforce
and job.rule in self.forcerules
or not self.forcefiles.isdisjoint(job.output)
):
reason.forced = True
elif updated_subworkflow_input:
reason.updated_input.update(updated_subworkflow_input)
elif job in self.targetjobs:
# TODO find a way to handle added/removed input files here?
if not job.output and not job.benchmark:
if job.input:
if job.rule.norun:
reason.updated_input_run.update(
[f for f in job.input if not f.exists]
)
else:
reason.nooutput = True
else:
reason.noio = True
else:
if job.rule in self.targetrules:
missing_output = job.missing_output()
else:
missing_output = job.missing_output(
requested=set(chain(*self.depending[job].values()))
| self.targetfiles
)
reason.missing_output.update(missing_output)
if not reason:
output_mintime_ = output_mintime.get(job)
if output_mintime_:
updated_input = [
f for f in job.input if f.exists and f.is_newer(output_mintime_)
]
reason.updated_input.update(updated_input)
if noinitreason and reason:
reason.derived = False
reason = self.reason
_needrun = self._needrun
dependencies = self.dependencies
depending = self.depending
_needrun.clear()
candidates = list(self.jobs)
# Update the output mintime of all jobs.
# We traverse them in BFS (level order) starting from target jobs.
# Then, we check output mintime of job itself and all direct descendants,
# which have already been visited in the level before.
# This way, we achieve a linear runtime.
for job in candidates:
update_output_mintime(job)
# update prior reason for all candidate jobs
for job in candidates:
update_needrun(job)
queue = list(filter(reason, candidates))
visited = set(queue)
while queue:
job = queue.pop(0)
_needrun.add(job)
for job_, files in dependencies[job].items():
missing_output = job_.missing_output(requested=files)
reason(job_).missing_output.update(missing_output)
if missing_output and not job_ in visited:
visited.add(job_)
queue.append(job_)
for job_, files in depending[job].items():
if job_ in candidates:
reason(job_).updated_input_run.update(files)
if not job_ in visited:
visited.add(job_)
queue.append(job_)
# update len including finished jobs (because they have already increased the job counter)
self._len = len(self._finished | self._needrun)
def in_until(self, job):
"""Return whether given job has been specified via --until."""
return job.rule.name in self.untilrules or not self.untilfiles.isdisjoint(
job.output
)
def in_omitfrom(self, job):
"""Return whether given job has been specified via --omit-from."""
return job.rule.name in self.omitrules or not self.omitfiles.isdisjoint(
job.output
)
def until_jobs(self):
"""Returns a generator of jobs specified by untiljobs."""
return (job for job in self.jobs if self.in_until(job))
def omitfrom_jobs(self):
"""Returns a generator of jobs specified by omitfromjobs."""
return (job for job in self.jobs if self.in_omitfrom(job))
def downstream_of_omitfrom(self):
"""Returns the downstream of --omit-from rules or files and themselves."""
return self.bfs(self.depending, *self.omitfrom_jobs())
def delete_omitfrom_jobs(self):
"""Removes jobs downstream of jobs specified by --omit-from."""
if not self.omitrules and not self.omitfiles:
return
downstream_jobs = list(
self.downstream_of_omitfrom()
) # need to cast as list before deleting jobs
for job in downstream_jobs:
self.delete_job(job, recursive=False, add_dependencies=True)
def set_until_jobs(self):
"""Removes jobs downstream of jobs specified by --omit-from."""
if not self.untilrules and not self.untilfiles:
return
self.targetjobs = set(self.until_jobs())
def update_priority(self):
""" Update job priorities. """
prioritized = (
lambda job: job.rule in self.priorityrules
or not self.priorityfiles.isdisjoint(job.output)
)
for job in self.needrun_jobs:
self._priority[job] = job.rule.priority
for job in self.bfs(
self.dependencies,
*filter(prioritized, self.needrun_jobs),
stop=self.noneedrun_finished,
):
self._priority[job] = Job.HIGHEST_PRIORITY
def update_groups(self):
groups = dict()
for job in self.needrun_jobs:
if job.group is None:
continue
stop = lambda j: j.group != job.group
# BFS into depending needrun jobs if in same group
# Note: never go up here (into depending), because it may contain
# jobs that have been sorted out due to e.g. ruleorder.
group = self.group_job_factory.new(
job.group,
(
job
for job in self.bfs(self.dependencies, job, stop=stop)
if self.needrun(job)
),
)
# merge with previously determined groups if present
for j in group:
if j in groups:
other = groups[j]
other.merge(group)
group = other
# update assignment
for j in group:
if j not in groups:
groups[j] = group
self._group = groups
self._update_group_components()
def _update_group_components(self):
# span connected components if requested
for groupid, conn_components in groupby(
set(self._group.values()), key=lambda group: group.groupid
):
n_components = self.workflow.group_components.get(groupid, 1)
if n_components > 1:
for chunk in group_into_chunks(n_components, conn_components):
if len(chunk) > 1:
primary = chunk[0]
for secondary in chunk[1:]:
primary.merge(secondary)
for j in primary:
self._group[j] = primary
def update_ready(self, jobs=None):
"""Update information whether a job is ready to execute.
Given jobs must be needrun jobs!
"""
if jobs is None:
jobs = self.needrun_jobs
candidate_groups = set()
for job in jobs:
if not self.finished(job) and self._ready(job):
if job.group is None:
self._ready_jobs.add(job)
else:
group = self._group[job]
group.finalize()
candidate_groups.add(group)
self._ready_jobs.update(
group
for group in candidate_groups
if all(self._ready(job) for job in group)
)
def get_jobs_or_groups(self):
visited_groups = set()
for job in self.jobs:
if job.group is None:
yield job
else:
group = self._group[job]
if group in visited_groups:
continue
visited_groups.add(group)
yield group
def close_remote_objects(self):
"""Close all remote objects."""
for job in self.jobs:
if not self.needrun(job):
job.close_remote()
def postprocess(self):
"""Postprocess the DAG. This has to be invoked after any change to the
DAG topology."""
self.update_jobids()
self.update_needrun()
self.update_priority()
self.handle_pipes()
self.update_groups()
self.update_ready()
self.close_remote_objects()
self.update_checkpoint_outputs()
def handle_pipes(self):
"""Use pipes to determine job groups. Check if every pipe has exactly
one consumer"""
for job in self.needrun_jobs:
candidate_groups = set()
if job.group is not None:
candidate_groups.add(job.group)
all_depending = set()
has_pipe = False
for f in job.output:
if is_flagged(f, "pipe"):
if job.is_run:
raise WorkflowError(
"Rule defines pipe output but "
"uses a 'run' directive. This is "
"not possible for technical "
"reasons. Consider using 'shell' or "
"'script'.",
rule=job.rule,
)
has_pipe = True
depending = [
j for j, files in self.depending[job].items() if f in files
]
if len(depending) > 1:
raise WorkflowError(
"Output file {} is marked as pipe "
"but more than one job depends on "
"it. Make sure that any pipe "
"output is only consumed by one "
"job".format(f),
rule=job.rule,
)
elif len(depending) == 0:
raise WorkflowError(
"Output file {} is marked as pipe "
"but it has no consumer. This is "
"invalid because it can lead to "
"a dead lock.".format(f),
rule=job.rule,
)
depending = depending[0]
if depending.is_run:
raise WorkflowError(
"Rule consumes pipe input but "
"uses a 'run' directive. This is "
"not possible for technical "
"reasons. Consider using 'shell' or "
"'script'.",
rule=job.rule,
)
all_depending.add(depending)
if depending.group is not None:
candidate_groups.add(depending.group)
if not has_pipe:
continue
if len(candidate_groups) > 1:
raise WorkflowError(
"An output file is marked as "
"pipe, but consuming jobs "
"are part of conflicting "
"groups.",
rule=job.rule,
)
elif candidate_groups:
# extend the candidate group to all involved jobs
group = candidate_groups.pop()
else:
# generate a random unique group name
group = str(uuid.uuid4())
job.group = group
for j in all_depending:
j.group = group
def _ready(self, job):
"""Return whether the given job is ready to execute."""
group = self._group.get(job, None)
if group is None:
is_external_needrun_dep = self.needrun
else:
def is_external_needrun_dep(j):
g = self._group.get(j, None)
return self.needrun(j) and (g is None or g != group)
return self._finished.issuperset(
filter(is_external_needrun_dep, self.dependencies[job])
)
def update_checkpoint_dependencies(self, jobs=None):
"""Update dependencies of checkpoints."""
updated = False
self.update_checkpoint_outputs()
if jobs is None:
jobs = [job for job in self.jobs if not self.needrun(job)]
for job in jobs:
if job.is_checkpoint:
depending = list(self.depending[job])
# re-evaluate depending jobs, replace and update DAG
for j in depending:
logger.info("Updating job {} ({}).".format(self.jobid(j), j))
newjob = j.updated()
self.replace_job(j, newjob, recursive=False)
updated = True
if updated:
# This has to be done for each checkpoint,
# otherwise, jobs may be missing in the end.
self.postprocess()
return updated
def finish(self, job, update_dynamic=True):
"""Finish a given job (e.g. remove from ready jobs, mark depending jobs
as ready)."""
try:
self._ready_jobs.remove(job)
except KeyError:
pass
if job.is_group():
jobs = job
else:
jobs = [job]
self._finished.update(jobs)
updated_dag = False
if update_dynamic:
updated_dag = self.update_checkpoint_dependencies(jobs)
# mark depending jobs as ready
# skip jobs that are marked as until jobs
self.update_ready(
j
for job in jobs
for j in self.depending[job]
if not self.in_until(job) and self.needrun(j)
)
for job in jobs:
if update_dynamic and job.dynamic_output:
logger.info("Dynamically updating jobs")
newjob = self.update_dynamic(job)
if newjob:
# simulate that this job ran and was finished before
self.omitforce.add(newjob)
self._needrun.add(newjob)
self._finished.add(newjob)
updated_dag = True
self.postprocess()
self.handle_protected(newjob)
self.handle_touch(newjob)
if updated_dag:
# We might have new jobs, so we need to ensure that all conda envs
# and singularity images are set up.
if self.workflow.use_singularity:
self.pull_container_imgs()
if self.workflow.use_conda:
self.create_conda_envs()
def new_job(self, rule, targetfile=None, format_wildcards=None):
"""Create new job for given rule and (optional) targetfile.
This will reuse existing jobs with the same wildcards."""
key = (rule, targetfile)
if key in self.job_cache:
assert targetfile is not None
return self.job_cache[key]
wildcards_dict = rule.get_wildcards(targetfile)
job = self.job_factory.new(
rule,
self,
wildcards_dict=wildcards_dict,
format_wildcards=format_wildcards,
targetfile=targetfile,
)
self.cache_job(job)
return job
def cache_job(self, job):
for f in job.products:
self.job_cache[(job.rule, f)] = job
def update_dynamic(self, job):
"""Update the DAG by evaluating the output of the given job that
contains dynamic output files."""
dynamic_wildcards = job.dynamic_wildcards
if not dynamic_wildcards:
# this happens e.g. in dryrun if output is not yet present
return
depending = list(
filter(lambda job_: not self.finished(job_), self.bfs(self.depending, job))
)
newrule, non_dynamic_wildcards = job.rule.dynamic_branch(
dynamic_wildcards, input=False
)
self.specialize_rule(job.rule, newrule)
# no targetfile needed for job
newjob = self.new_job(newrule, format_wildcards=non_dynamic_wildcards)
self.replace_job(job, newjob)
for job_ in depending:
needs_update = any(
f.get_wildcard_names() & dynamic_wildcards.keys()
for f in job_.rule.dynamic_input
)
if needs_update:
newrule_ = job_.rule.dynamic_branch(dynamic_wildcards)
if newrule_ is not None:
self.specialize_rule(job_.rule, newrule_)
if not self.dynamic(job_):
logger.debug("Updating job {}.".format(job_))
newjob_ = self.new_job(
newrule_, targetfile=job_.output[0] if job_.output else None
)
unexpected_output = self.reason(
job_
).missing_output.intersection(newjob.existing_output)
if unexpected_output:
logger.warning(
"Warning: the following output files of rule {} were not "
"present when the DAG was created:\n{}".format(
newjob_.rule, unexpected_output
)
)
self.replace_job(job_, newjob_)
return newjob
def delete_job(self, job, recursive=True, add_dependencies=False):
"""Delete given job from DAG."""
if job in self.targetjobs:
self.targetjobs.remove(job)
if add_dependencies:
for _job in self.dependencies[job]:
self.targetjobs.add(_job)
for job_ in self.depending[job]:
del self.dependencies[job_][job]
del self.depending[job]
for job_ in self.dependencies[job]:
depending = self.depending[job_]
del depending[job]
if not depending and recursive:
self.delete_job(job_)
del self.dependencies[job]
if job in self._needrun:
self._len -= 1
self._needrun.remove(job)
del self._reason[job]
if job in self._finished:
self._finished.remove(job)
if job in self._dynamic:
self._dynamic.remove(job)
if job in self._ready_jobs:
self._ready_jobs.remove(job)
# remove from cache
for f in job.output:
try:
del self.job_cache[(job.rule, f)]
except KeyError:
pass
def replace_job(self, job, newjob, recursive=True):
"""Replace given job with new job."""
add_to_targetjobs = job in self.targetjobs
depending = list(self.depending[job].items())
if self.finished(job):
self._finished.add(newjob)
self.delete_job(job, recursive=recursive)
if add_to_targetjobs:
self.targetjobs.add(newjob)
self.cache_job(newjob)
self.update([newjob])
logger.debug("Replace {} with dynamic branch {}".format(job, newjob))
for job_, files in depending:
# if not job_.dynamic_input:
logger.debug("updating depending job {}".format(job_))
self.dependencies[job_][newjob].update(files)
self.depending[newjob][job_].update(files)
def specialize_rule(self, rule, newrule):
"""Specialize the given rule by inserting newrule into the DAG."""
assert newrule is not None
self.rules.add(newrule)
self.update_output_index()
def is_batch_rule(self, rule):
"""Return True if the underlying rule is to be used for batching the DAG."""
return self.batch is not None and rule.name == self.batch.rulename
def collect_potential_dependencies(self, job):
"""Collect all potential dependencies of a job. These might contain
ambiguities. The keys of the returned dict represent the files to be considered."""
dependencies = defaultdict(list)
# use a set to circumvent multiple jobs for the same file
# if user specified it twice
file2jobs = self.file2jobs
input_files = list(job.unique_input)
if self.is_batch_rule(job.rule):
# only consider the defined partition of the input files
input_batch = self.batch.get_batch(input_files)
if len(input_batch) != len(input_files):
logger.info(
"Considering only batch {} for DAG computation.\n"
"All jobs beyond the batching rule are omitted until the final batch.\n"
"Don't forget to run the other batches too.".format(self.batch)
)
input_files = input_batch
for file in input_files:
# omit the file if it comes from a subworkflow
if file in job.subworkflow_input:
continue
try:
if file in job.dependencies:
jobs = [self.new_job(job.dependencies[file], targetfile=file)]
else:
jobs = file2jobs(file)
dependencies[file].extend(jobs)
except MissingRuleException as ex:
# no dependency found
dependencies[file] = []
return dependencies
def bfs(self, direction, *jobs, stop=lambda job: False):
"""Perform a breadth-first traversal of the DAG."""
queue = list(jobs)
visited = set(queue)
while queue:
job = queue.pop(0)
if stop(job):
# stop criterion reached for this node
continue
yield job
for job_, _ in direction[job].items():
if not job_ in visited:
queue.append(job_)
visited.add(job_)
def level_bfs(self, direction, *jobs, stop=lambda job: False):
"""Perform a breadth-first traversal of the DAG, but also yield the
level together with each job."""
queue = [(job, 0) for job in jobs]
visited = set(jobs)
while queue:
job, level = queue.pop(0)
if stop(job):
# stop criterion reached for this node
continue
yield level, job
level += 1
for job_, _ in direction[job].items():
if not job_ in visited:
queue.append((job_, level))
visited.add(job_)
def dfs(self, direction, *jobs, stop=lambda job: False, post=True):
"""Perform depth-first traversal of the DAG."""
visited = set()
def _dfs(job):
"""Inner function for DFS traversal."""
if stop(job):
return
if not post:
yield job
for job_ in direction[job]:
if not job_ in visited:
visited.add(job_)
for j in _dfs(job_):
yield j
if post:
yield job
for job in jobs:
for job_ in self._dfs(direction, job, visited, stop=stop, post=post):
yield job_
def new_wildcards(self, job):
"""Return wildcards that are newly introduced in this job,
compared to its ancestors."""
new_wildcards = set(job.wildcards.items())
for job_ in self.dependencies[job]:
if not new_wildcards:
return set()
for wildcard in job_.wildcards.items():
new_wildcards.discard(wildcard)
return new_wildcards
def rule2job(self, targetrule):
"""Generate a new job from a given rule."""
if targetrule.has_wildcards():
raise WorkflowError(
"Target rules may not contain wildcards. Please specify concrete files or a rule without wildcards."
)
return self.new_job(targetrule)
def file2jobs(self, targetfile):
rules = self.output_index.match(targetfile)
jobs = []
exceptions = list()
for rule in rules:
if rule.is_producer(targetfile):
try:
jobs.append(self.new_job(rule, targetfile=targetfile))
except InputFunctionException as e:
exceptions.append(e)
if not jobs:
if exceptions:
raise exceptions[0]
raise MissingRuleException(targetfile)
return jobs
def rule_dot2(self):
dag = defaultdict(list)
visited = set()
preselect = set()
def preselect_parents(job):
for parent in self.depending[job]:
if parent in preselect:
continue
preselect.add(parent)
preselect_parents(parent)
def build_ruledag(job, key=lambda job: job.rule.name):
if job in visited:
return
visited.add(job)
deps = sorted(self.dependencies[job], key=key)
deps = [
(
group[0]
if preselect.isdisjoint(group)
else preselect.intersection(group).pop()
)
for group in (list(g) for _, g in groupby(deps, key))
]
dag[job].extend(deps)
preselect_parents(job)
for dep in deps:
build_ruledag(dep)
for job in self.targetjobs:
build_ruledag(job)
return self._dot(dag.keys(), print_wildcards=False, print_types=False, dag=dag)
def rule_dot(self):
graph = defaultdict(set)
for job in self.jobs:
graph[job.rule].update(dep.rule for dep in self.dependencies[job])
return self._dot(graph)
def dot(self):
def node2style(job):
if not self.needrun(job):
return "rounded,dashed"
if self.dynamic(job) or job.dynamic_input:
return "rounded,dotted"
return "rounded"
def format_wildcard(wildcard):
name, value = wildcard
if DYNAMIC_FILL in value:
value = "..."
return "{}: {}".format(name, value)
node2rule = lambda job: job.rule
node2label = lambda job: "\\n".join(
chain(
[job.rule.name], sorted(map(format_wildcard, self.new_wildcards(job)))
)
)
dag = {job: self.dependencies[job] for job in self.jobs}
return self._dot(
dag, node2rule=node2rule, node2style=node2style, node2label=node2label
)
def _dot(
self,
graph,
node2rule=lambda node: node,
node2style=lambda node: "rounded",
node2label=lambda node: node,
):
# color rules
huefactor = 2 / (3 * len(self.rules))
rulecolor = {
rule: "{:.2f} 0.6 0.85".format(i * huefactor)
for i, rule in enumerate(self.rules)
}
# markup
node_markup = '\t{}[label = "{}", color = "{}", style="{}"];'.format
edge_markup = "\t{} -> {}".format
# node ids
ids = {node: i for i, node in enumerate(graph)}
# calculate nodes
nodes = [
node_markup(
ids[node],
node2label(node),
rulecolor[node2rule(node)],
node2style(node),
)
for node in graph
]
# calculate edges
edges = [
edge_markup(ids[dep], ids[node])
for node, deps in graph.items()
for dep in deps
]
return textwrap.dedent(
"""\
digraph snakemake_dag {{
graph[bgcolor=white, margin=0];
node[shape=box, style=rounded, fontname=sans, \
fontsize=10, penwidth=2];
edge[penwidth=2, color=grey];
{items}
}}\
"""
).format(items="\n".join(nodes + edges))
def filegraph_dot(
self,
node2rule=lambda node: node,
node2style=lambda node: "rounded",
node2label=lambda node: node,
):
# NOTE: This is code from the rule_dot method.
# This method could be split like there as well, however,
# it cannot easily reuse the _dot method due to the different node type
graph = defaultdict(set)
for job in self.jobs:
graph[job.rule].update(dep.rule for dep in self.dependencies[job])
# node ids
ids = {node: i for i, node in enumerate(graph)}
# Compute colors for rules
def hsv_to_htmlhexrgb(h, s, v):
"""Convert hsv colors to hex-encoded rgb colors usable by html."""
import colorsys
hex_r, hex_g, hex_b = (round(255 * x) for x in colorsys.hsv_to_rgb(h, s, v))
return "#{hex_r:0>2X}{hex_g:0>2X}{hex_b:0>2X}".format(
hex_r=hex_r, hex_g=hex_g, hex_b=hex_b
)
huefactor = 2 / (3 * len(self.rules))
rulecolor = {
rule: hsv_to_htmlhexrgb(i * huefactor, 0.6, 0.85)
for i, rule in enumerate(self.rules)
}
def resolve_input_functions(input_files):
"""Iterate over all input files and replace input functions
with a fixed string.
"""
files = []
for f in input_files:
if callable(f):
files.append("<input function>")
# NOTE: This is a workaround. It would be more informative
# to show the code of the input function here (if it is
# short enough). This cannot be easily done with the inspect
# module, since the line numbers in the Snakefile do not
# behave as expected. One (complicated) solution for this
# would be to find the Snakefile and directly extract the
# code of the function.
else:
files.append(repr(f).strip("'"))
return files
def html_node(node_id, node, color):
"""Assemble a html style node for graphviz"""
input_files = resolve_input_functions(node._input)
output_files = [repr(f).strip("'") for f in node._output]
input_header = (
'<b><font point-size="14">↪ input</font></b>'
if input_files
else ""
)
output_header = (
'<b><font point-size="14">output →</font></b>'
if output_files
else ""
)
html_node = [
'{node_id} [ shape=none, margin=0, label=<<table border="2" color="{color}" cellspacing="3" cellborder="0">'.format(
node_id=node_id, color=color
),
"<tr><td>",
'<b><font point-size="18">{node.name}</font></b>'.format(node=node),
"</td></tr>",
"<hr/>",
'<tr><td align="left"> {input_header} </td></tr>'.format(
input_header=input_header
),
]
for filename in sorted(input_files):
# Escape html relevant chars like '<' and '>' in filenames
# These can be added by input functions etc. and cannot be
# displayed in graphviz HTML nodes.
in_file = html.escape(filename)
html_node.extend(
[
"<tr>",
'<td align="left"><font face="monospace">{in_file}</font></td>'.format(
in_file=in_file
),
"</tr>",
]
)
html_node.append("<hr/>")
html_node.append(
'<tr><td align="right"> {output_header} </td> </tr>'.format(
output_header=output_header
)
)
for filename in sorted(output_files):
out_file = html.escape(filename)
html_node.extend(
[
"<tr>",
'<td align="left"><font face="monospace">{out_file}</font></td>'
"</tr>".format(out_file=out_file),
]
)
html_node.append("</table>>]")
return "\n".join(html_node)
nodes = [
html_node(ids[node], node, rulecolor[node2rule(node)]) for node in graph
]
# calculate edges
edge_markup = "\t{} -> {}".format
edges = [
edge_markup(ids[dep], ids[node], ids[dep], ids[node])
for node, deps in graph.items()
for dep in deps
]
return textwrap.dedent(
"""\
digraph snakemake_dag {{
graph[bgcolor=white, margin=0];
node[shape=box, style=rounded, fontname=sans, \
fontsize=10, penwidth=2];
edge[penwidth=2, color=grey];
{items}
}}\
"""
).format(items="\n".join(nodes + edges))
def summary(self, detailed=False):
if detailed:
yield "output_file\tdate\trule\tversion\tlog-file(s)\tinput-file(s)\tshellcmd\tstatus\tplan"
else:
yield "output_file\tdate\trule\tversion\tlog-file(s)\tstatus\tplan"
for job in self.jobs:
output = job.rule.output if self.dynamic(job) else job.expanded_output
for f in output:
rule = self.workflow.persistence.rule(f)
rule = "-" if rule is None else rule
version = self.workflow.persistence.version(f)
version = "-" if version is None else str(version)
date = time.ctime(f.mtime) if f.exists else "-"
pending = "update pending" if self.reason(job) else "no update"
log = self.workflow.persistence.log(f)
log = "-" if log is None else ",".join(log)
input = self.workflow.persistence.input(f)
input = "-" if input is None else ",".join(input)
shellcmd = self.workflow.persistence.shellcmd(f)
shellcmd = "-" if shellcmd is None else shellcmd
# remove new line characters, leading and trailing whitespace
shellcmd = shellcmd.strip().replace("\n", "; ")
status = "ok"
if not f.exists:
status = "missing"
elif self.reason(job).updated_input:
status = "updated input files"
elif self.workflow.persistence.version_changed(job, file=f):
status = "version changed to {}".format(job.rule.version)
elif self.workflow.persistence.code_changed(job, file=f):
status = "rule implementation changed"
elif self.workflow.persistence.input_changed(job, file=f):
status = "set of input files changed"
elif self.workflow.persistence.params_changed(job, file=f):
status = "params changed"
if detailed:
yield "\t".join(
(f, date, rule, version, log, input, shellcmd, status, pending)
)
else:
yield "\t".join((f, date, rule, version, log, status, pending))
def archive(self, path):
"""Archives workflow such that it can be re-run on a different system.
Archiving includes git versioned files (i.e. Snakefiles, config files, ...),
ancestral input files and conda environments.
"""
if path.endswith(".tar"):
mode = "x"
elif path.endswith("tar.bz2"):
mode = "x:bz2"
elif path.endswith("tar.xz"):
mode = "x:xz"
elif path.endswith("tar.gz"):
mode = "x:gz"
else:
raise WorkflowError(
"Unsupported archive format "
"(supported: .tar, .tar.gz, .tar.bz2, .tar.xz)"
)
if os.path.exists(path):
raise WorkflowError("Archive already exists:\n" + path)
self.create_conda_envs(forceall=True)
try:
workdir = Path(os.path.abspath(os.getcwd()))
with tarfile.open(path, mode=mode, dereference=True) as archive:
archived = set()
def add(path):
if workdir not in Path(os.path.abspath(path)).parents:
logger.warning(
"Path {} cannot be archived: "
"not within working directory.".format(path)
)
else:
f = os.path.relpath(path)
if f not in archived:
archive.add(f)
archived.add(f)
logger.info("archived " + f)
logger.info(
"Archiving snakefiles, scripts and files under "
"version control..."
)
for f in self.workflow.get_sources():
add(f)
logger.info("Archiving external input files...")
for job in self.jobs:
# input files
for f in job.input:
if not any(
f in files for files in self.dependencies[job].values()
):
# this is an input file that is not created by any job
add(f)
logger.info("Archiving conda environments...")
envs = set()
for job in self.jobs:
if job.conda_env_file:
env_archive = job.archive_conda_env()
envs.add(env_archive)
for env in envs:
add(env)
except (Exception, BaseException) as e:
os.remove(path)
raise e
def clean(self, only_temp=False, dryrun=False):
"""Removes files generated by the workflow."""
for job in self.jobs:
for f in job.output:
if not only_temp or is_flagged(f, "temp"):
# The reason for the second check is that dangling
# symlinks fail f.exists.
if f.exists or os.path.islink(f):
if f.protected:
logger.error("Skipping write-protected file {}.".format(f))
else:
msg = "Deleting {}" if not dryrun else "Would delete {}"
logger.info(msg.format(f))
if not dryrun:
# Remove non-empty dirs if flagged as temp()
f.remove(remove_non_empty_dir=only_temp)
def list_untracked(self):
"""List files in the workdir that are not in the dag."""
used_files = set()
files_in_cwd = set()
for job in self.jobs:
used_files.update(
os.path.relpath(file)
for file in chain(job.local_input, job.local_output, job.log)
)
for root, dirs, files in os.walk(os.getcwd()):
# Ignore hidden files and don't traverse into hidden dirs
files_in_cwd.update(
[
os.path.relpath(os.path.join(root, f))
for f in files
if not f[0] == "."
]
)
dirs[:] = [d for d in dirs if not d[0] == "."]
for f in sorted(list(files_in_cwd - used_files)):
logger.info(f)
def d3dag(self, max_jobs=10000):
def node(job):
jobid = self.jobid(job)
return {
"id": jobid,
"value": {
"jobid": jobid,
"label": job.rule.name,
"rule": job.rule.name,
},
}
def edge(a, b):
return {"u": self.jobid(a), "v": self.jobid(b)}
jobs = list(self.jobs)
if len(jobs) > max_jobs:
logger.info(
"Job-DAG is too large for visualization (>{} jobs).".format(max_jobs)
)
else:
logger.d3dag(
nodes=[node(job) for job in jobs],
edges=[
edge(dep, job)
for job in jobs
for dep in self.dependencies[job]
if self.needrun(dep)
],
)
def stats(self):
rules = Counter()
rules.update(job.rule for job in self.needrun_jobs)
rules.update(job.rule for job in self.finished_jobs)
yield "Job counts:"
yield "\tcount\tjobs"
for rule, count in sorted(rules.most_common(), key=lambda item: item[0].name):
yield "\t{}\t{}".format(count, rule)
yield "\t{}".format(len(self))
def __str__(self):
return self.dot()
def __len__(self):
return self._len
|
archive
|
Archives workflow such that it can be re-run on a different system.
Archiving includes git versioned files (i.e. Snakefiles, config files, ...),
ancestral input files and conda environments.
|
__author__ = "Johannes Köster"
__copyright__ = "Copyright 2015-2019, Johannes Köster"
__email__ = "[email protected]"
__license__ = "MIT"
import html
import os
import shutil
import textwrap
import time
import tarfile
from collections import defaultdict, Counter
from itertools import chain, filterfalse, groupby
from functools import partial
from pathlib import Path
import uuid
import math
from snakemake.io import PeriodicityDetector, wait_for_files, is_flagged
from snakemake.jobs import Reason, JobFactory, GroupJobFactory, Job
from snakemake.exceptions import MissingInputException
from snakemake.exceptions import MissingRuleException, AmbiguousRuleException
from snakemake.exceptions import CyclicGraphException, MissingOutputException
from snakemake.exceptions import IncompleteFilesException, ImproperOutputException
from snakemake.exceptions import PeriodicWildcardError
from snakemake.exceptions import RemoteFileException, WorkflowError, ChildIOException
from snakemake.exceptions import InputFunctionException
from snakemake.logging import logger
from snakemake.common import DYNAMIC_FILL, group_into_chunks
from snakemake.deployment import conda, singularity
from snakemake.output_index import OutputIndex
from snakemake import workflow
class Batch:
"""Definition of a batch for calculating only a partial DAG."""
def __init__(self, rulename: str, idx: int, batches: int):
assert idx <= batches
assert idx > 0
self.rulename = rulename
self.idx = idx
self.batches = batches
def get_batch(self, items: list):
"""Return the defined batch of the given items.
Items are usually input files."""
# make sure that we always consider items in the same order
if len(items) < self.batches:
raise WorkflowError(
"Batching rule {} has less input files than batches. "
"Please choose a smaller number of batches.".format(self.rulename)
)
items = sorted(items)
batch_len = math.floor(len(items) / self.batches)
# self.batch is one-based, hence we have to subtract 1
idx = self.idx - 1
i = idx * batch_len
if self.is_final:
# extend the last batch to cover rest of list
return items[i:]
else:
return items[i : i + batch_len]
@property
def is_final(self):
return self.idx == self.batches
def __str__(self):
return "{}/{} (rule {})".format(self.idx, self.batches, self.rulename)
class DAG:
"""Directed acyclic graph of jobs."""
def __init__(
self,
workflow,
rules=None,
dryrun=False,
targetfiles=None,
targetrules=None,
forceall=False,
forcerules=None,
forcefiles=None,
priorityfiles=None,
priorityrules=None,
untilfiles=None,
untilrules=None,
omitfiles=None,
omitrules=None,
ignore_ambiguity=False,
force_incomplete=False,
ignore_incomplete=False,
notemp=False,
keep_remote_local=False,
batch=None,
):
self.dryrun = dryrun
self.dependencies = defaultdict(partial(defaultdict, set))
self.depending = defaultdict(partial(defaultdict, set))
self._needrun = set()
self._priority = dict()
self._reason = defaultdict(Reason)
self._finished = set()
self._dynamic = set()
self._len = 0
self.workflow = workflow
self.rules = set(rules)
self.ignore_ambiguity = ignore_ambiguity
self.targetfiles = targetfiles
self.targetrules = targetrules
self.priorityfiles = priorityfiles
self.priorityrules = priorityrules
self.targetjobs = set()
self.prioritytargetjobs = set()
self._ready_jobs = set()
self.notemp = notemp
self.keep_remote_local = keep_remote_local
self._jobid = dict()
self.job_cache = dict()
self.conda_envs = dict()
self.container_imgs = dict()
self._progress = 0
self._group = dict()
self.job_factory = JobFactory()
self.group_job_factory = GroupJobFactory()
self.forcerules = set()
self.forcefiles = set()
self.untilrules = set()
self.untilfiles = set()
self.omitrules = set()
self.omitfiles = set()
self.updated_subworkflow_files = set()
if forceall:
self.forcerules.update(self.rules)
elif forcerules:
self.forcerules.update(forcerules)
if forcefiles:
self.forcefiles.update(forcefiles)
if untilrules:
self.untilrules.update(set(rule.name for rule in untilrules))
if untilfiles:
self.untilfiles.update(untilfiles)
if omitrules:
self.omitrules.update(set(rule.name for rule in omitrules))
if omitfiles:
self.omitfiles.update(omitfiles)
self.has_dynamic_rules = any(rule.dynamic_output for rule in self.rules)
self.omitforce = set()
self.batch = batch
if batch is not None and not batch.is_final:
# Since not all input files of a batching rule are considered, we cannot run
# beyond that rule.
# For the final batch, we do not need to omit anything.
self.omitrules.add(batch.rulename)
self.force_incomplete = force_incomplete
self.ignore_incomplete = ignore_incomplete
self.periodic_wildcard_detector = PeriodicityDetector()
self.update_output_index()
def init(self, progress=False):
""" Initialise the DAG. """
for job in map(self.rule2job, self.targetrules):
job = self.update([job], progress=progress)
self.targetjobs.add(job)
for file in self.targetfiles:
job = self.update(self.file2jobs(file), file=file, progress=progress)
self.targetjobs.add(job)
self.cleanup()
self.update_needrun()
self.set_until_jobs()
self.delete_omitfrom_jobs()
self.update_jobids()
self.check_directory_outputs()
# check if remaining jobs are valid
for i, job in enumerate(self.jobs):
job.is_valid()
def check_directory_outputs(self):
"""Check that no output file is contained in a directory output of the same or another rule."""
outputs = sorted(
{
(path(f), job)
for job in self.jobs
for f in job.output
for path in (os.path.abspath, os.path.realpath)
}
)
for i in range(len(outputs) - 1):
(a, job_a), (b, job_b) = outputs[i : i + 2]
try:
common = os.path.commonpath([a, b])
except ValueError:
# commonpath raises error if windows drives are different.
continue
if a != b and common == os.path.commonpath([a]) and job_a != job_b:
raise ChildIOException(parent=outputs[i], child=outputs[i + 1])
@property
def checkpoint_jobs(self):
for job in self.needrun_jobs:
if job.is_checkpoint:
yield job
def update_checkpoint_outputs(self):
workflow.checkpoints.future_output = set(
f for job in self.checkpoint_jobs for f in job.output
)
def update_jobids(self):
for job in self.jobs:
if job not in self._jobid:
self._jobid[job] = len(self._jobid)
def cleanup_workdir(self):
for io_dir in set(
os.path.dirname(io_file)
for job in self.jobs
for io_file in chain(job.output, job.input)
if not os.path.exists(io_file)
):
if os.path.exists(io_dir) and not len(os.listdir(io_dir)):
os.removedirs(io_dir)
def cleanup(self):
self.job_cache.clear()
final_jobs = set(self.jobs)
todelete = [job for job in self.dependencies if job not in final_jobs]
for job in todelete:
del self.dependencies[job]
try:
del self.depending[job]
except KeyError:
pass
def create_conda_envs(
self, dryrun=False, forceall=False, init_only=False, quiet=False
):
# First deduplicate based on job.conda_env_file
jobs = self.jobs if forceall else self.needrun_jobs
env_set = {
(job.conda_env_file, job.container_img_url)
for job in jobs
if job.conda_env_file
}
# Then based on md5sum values
self.conda_envs = dict()
for (env_file, simg_url) in env_set:
simg = None
if simg_url and self.workflow.use_singularity:
assert (
simg_url in self.container_imgs
), "bug: must first pull singularity images"
simg = self.container_imgs[simg_url]
env = conda.Env(
env_file,
self,
container_img=simg,
cleanup=self.workflow.conda_cleanup_pkgs,
)
self.conda_envs[(env_file, simg_url)] = env
if not init_only:
for env in self.conda_envs.values():
if not dryrun or not quiet:
env.create(dryrun)
def pull_container_imgs(self, dryrun=False, forceall=False, quiet=False):
# First deduplicate based on job.conda_env_file
jobs = self.jobs if forceall else self.needrun_jobs
img_set = {job.container_img_url for job in jobs if job.container_img_url}
for img_url in img_set:
img = singularity.Image(img_url, self)
if not dryrun or not quiet:
img.pull(dryrun)
self.container_imgs[img_url] = img
def update_output_index(self):
"""Update the OutputIndex."""
self.output_index = OutputIndex(self.rules)
def check_incomplete(self):
"""Check if any output files are incomplete. This is done by looking up
markers in the persistence module."""
if not self.ignore_incomplete:
incomplete = self.incomplete_files
if incomplete:
if self.force_incomplete:
logger.debug("Forcing incomplete files:")
logger.debug("\t" + "\n\t".join(incomplete))
self.forcefiles.update(incomplete)
else:
raise IncompleteFilesException(incomplete)
def incomplete_external_jobid(self, job):
"""Return the external jobid of the job if it is marked as incomplete.
Returns None, if job is not incomplete, or if no external jobid has been
registered or if force_incomplete is True.
"""
if self.force_incomplete:
return None
jobids = self.workflow.persistence.external_jobids(job)
if len(jobids) == 1:
return jobids[0]
elif len(jobids) > 1:
raise WorkflowError(
"Multiple different external jobids registered "
"for output files of incomplete job {} ({}). This job "
"cannot be resumed. Execute Snakemake with --rerun-incomplete "
"to fix this issue.".format(job.jobid, jobids)
)
def check_dynamic(self):
"""Check dynamic output and update downstream rules if necessary."""
if self.has_dynamic_rules:
for job in filter(
lambda job: (job.dynamic_output and not self.needrun(job)), self.jobs
):
self.update_dynamic(job)
self.postprocess()
def is_edit_notebook_job(self, job):
return self.workflow.edit_notebook and job.targetfile in self.targetfiles
@property
def dynamic_output_jobs(self):
"""Iterate over all jobs with dynamic output files."""
return (job for job in self.jobs if job.dynamic_output)
@property
def jobs(self):
""" All jobs in the DAG. """
for job in self.bfs(self.dependencies, *self.targetjobs):
yield job
@property
def needrun_jobs(self):
""" Jobs that need to be executed. """
for job in filter(
self.needrun,
self.bfs(self.dependencies, *self.targetjobs, stop=self.noneedrun_finished),
):
yield job
@property
def local_needrun_jobs(self):
"""Iterate over all jobs that need to be run and are marked as local."""
return filter(lambda job: job.is_local, self.needrun_jobs)
@property
def finished_jobs(self):
""" Iterate over all jobs that have been finished."""
for job in filter(self.finished, self.bfs(self.dependencies, *self.targetjobs)):
yield job
@property
def ready_jobs(self):
"""Jobs that are ready to execute."""
return self._ready_jobs
def needrun(self, job):
"""Return whether a given job needs to be executed."""
return job in self._needrun
def priority(self, job):
"""Return priority of given job."""
return self._priority[job]
def noneedrun_finished(self, job):
"""
Return whether a given job is finished or was not
required to run at all.
"""
return not self.needrun(job) or self.finished(job)
def reason(self, job):
""" Return the reason of the job execution. """
return self._reason[job]
def finished(self, job):
""" Return whether a job is finished. """
return job in self._finished
def dynamic(self, job):
"""
Return whether a job is dynamic (i.e. it is only a placeholder
for those that are created after the job with dynamic output has
finished.
"""
if job.is_group():
for j in job:
if j in self._dynamic:
return True
else:
return job in self._dynamic
def requested_files(self, job):
"""Return the files a job requests."""
return set(*self.depending[job].values())
@property
def incomplete_files(self):
"""Return list of incomplete files."""
return list(
chain(
*(
job.output
for job in filter(
self.workflow.persistence.incomplete,
filterfalse(self.needrun, self.jobs),
)
)
)
)
@property
def newversion_files(self):
"""Return list of files where the current version is newer than the
recorded version.
"""
return list(
chain(
*(
job.output
for job in filter(self.workflow.persistence.newversion, self.jobs)
)
)
)
def missing_temp(self, job):
"""
Return whether a temp file that is input of the given job is missing.
"""
for job_, files in self.depending[job].items():
if self.needrun(job_) and any(not f.exists for f in files):
return True
return False
def check_and_touch_output(
self,
job,
wait=3,
ignore_missing_output=False,
no_touch=False,
force_stay_on_remote=False,
):
""" Raise exception if output files of job are missing. """
expanded_output = [job.shadowed_path(path) for path in job.expanded_output]
if job.benchmark:
expanded_output.append(job.benchmark)
if not ignore_missing_output:
try:
wait_for_files(
expanded_output,
latency_wait=wait,
force_stay_on_remote=force_stay_on_remote,
ignore_pipe=True,
)
except IOError as e:
raise MissingOutputException(
str(e) + "\nThis might be due to "
"filesystem latency. If that is the case, consider to increase the "
"wait time with --latency-wait.",
rule=job.rule,
)
# Ensure that outputs are of the correct type (those flagged with directory()
# are directories and not files and vice versa).
for f in expanded_output:
if (f.is_directory and not os.path.isdir(f)) or (
os.path.isdir(f) and not f.is_directory
):
raise ImproperOutputException(job.rule, [f])
# It is possible, due to archive expansion or cluster clock skew, that
# the files appear older than the input. But we know they must be new,
# so touch them to update timestamps. This also serves to touch outputs
# when using the --touch flag.
# Note that if the input files somehow have a future date then this will
# not currently be spotted and the job will always be re-run.
if not no_touch:
for f in expanded_output:
# This won't create normal files if missing, but will create
# the flag file for directories.
if f.exists_local:
f.touch()
def unshadow_output(self, job, only_log=False):
""" Move files from shadow directory to real output paths. """
if not job.shadow_dir or not job.expanded_output:
return
files = job.log if only_log else chain(job.expanded_output, job.log)
for real_output in files:
shadow_output = job.shadowed_path(real_output).file
# Remake absolute symlinks as relative
if os.path.islink(shadow_output):
dest = os.readlink(shadow_output)
if os.path.isabs(dest):
rel_dest = os.path.relpath(dest, job.shadow_dir)
os.remove(shadow_output)
os.symlink(rel_dest, shadow_output)
if os.path.realpath(shadow_output) == os.path.realpath(real_output):
continue
logger.debug(
"Moving shadow output {} to destination {}".format(
shadow_output, real_output
)
)
shutil.move(shadow_output, real_output)
shutil.rmtree(job.shadow_dir)
def check_periodic_wildcards(self, job):
"""Raise an exception if a wildcard of the given job appears to be periodic,
indicating a cyclic dependency."""
for wildcard, value in job.wildcards_dict.items():
periodic_substring = self.periodic_wildcard_detector.is_periodic(value)
if periodic_substring is not None:
raise PeriodicWildcardError(
"The value {} in wildcard {} is periodically repeated ({}). "
"This would lead to an infinite recursion. "
"To avoid this, e.g. restrict the wildcards in this rule to certain values.".format(
periodic_substring, wildcard, value
),
rule=job.rule,
)
def handle_protected(self, job):
""" Write-protect output files that are marked with protected(). """
for f in job.expanded_output:
if f in job.protected_output:
logger.info("Write-protecting output file {}.".format(f))
f.protect()
def handle_touch(self, job):
""" Touches those output files that are marked for touching. """
for f in job.expanded_output:
if f in job.touch_output:
f = job.shadowed_path(f)
logger.info("Touching output file {}.".format(f))
f.touch_or_create()
assert os.path.exists(f)
def temp_input(self, job):
for job_, files in self.dependencies[job].items():
for f in filter(job_.temp_output.__contains__, files):
yield f
def temp_size(self, job):
"""Return the total size of temporary input files of the job.
If none, return 0.
"""
return sum(f.size for f in self.temp_input(job))
def handle_temp(self, job):
""" Remove temp files if they are no longer needed. Update temp_mtimes. """
if self.notemp:
return
is_temp = lambda f: is_flagged(f, "temp")
# handle temp input
needed = lambda job_, f: any(
f in files
for j, files in self.depending[job_].items()
if not self.finished(j) and self.needrun(j) and j != job
)
def unneeded_files():
# temp input
for job_, files in self.dependencies[job].items():
tempfiles = set(f for f in job_.expanded_output if is_temp(f))
yield from filterfalse(partial(needed, job_), tempfiles & files)
# temp output
if not job.dynamic_output and (
job not in self.targetjobs or job.rule.name == self.workflow.first_rule
):
tempfiles = (
f
for f in job.expanded_output
if is_temp(f) and f not in self.targetfiles
)
yield from filterfalse(partial(needed, job), tempfiles)
for f in unneeded_files():
logger.info("Removing temporary output file {}.".format(f))
f.remove(remove_non_empty_dir=True)
def handle_log(self, job, upload_remote=True):
for f in job.log:
if not f.exists_local:
# If log file was not created during job, create an empty one.
f.touch_or_create()
if upload_remote and f.is_remote and not f.should_stay_on_remote:
f.upload_to_remote()
if not f.exists_remote:
raise RemoteFileException(
"The file upload was attempted, but it does not "
"exist on remote. Check that your credentials have "
"read AND write permissions."
)
def handle_remote(self, job, upload=True):
""" Remove local files if they are no longer needed and upload. """
if upload:
# handle output files
files = list(job.expanded_output)
if job.benchmark:
files.append(job.benchmark)
for f in files:
if f.is_remote and not f.should_stay_on_remote:
f.upload_to_remote()
remote_mtime = f.mtime
# immediately force local mtime to match remote,
# since conversions from S3 headers are not 100% reliable
# without this, newness comparisons may fail down the line
f.touch(times=(remote_mtime, remote_mtime))
if not f.exists_remote:
raise RemoteFileException(
"The file upload was attempted, but it does not "
"exist on remote. Check that your credentials have "
"read AND write permissions."
)
if not self.keep_remote_local:
# handle input files
needed = lambda job_, f: any(
f in files
for j, files in self.depending[job_].items()
if not self.finished(j) and self.needrun(j) and j != job
)
def unneeded_files():
putative = (
lambda f: f.is_remote
and not f.protected
and not f.should_keep_local
)
generated_input = set()
for job_, files in self.dependencies[job].items():
generated_input |= files
for f in filter(putative, files):
if not needed(job_, f):
yield f
for f, f_ in zip(job.output, job.rule.output):
if putative(f) and not needed(job, f) and not f in self.targetfiles:
if f in job.dynamic_output:
for f_ in job.expand_dynamic(f_):
yield f_
else:
yield f
for f in filter(putative, job.input):
# TODO what about remote inputs that are used by multiple jobs?
if f not in generated_input:
yield f
for f in unneeded_files():
if f.exists_local:
logger.info("Removing local output file: {}".format(f))
f.remove()
def jobid(self, job):
"""Return job id of given job."""
if job.is_group():
return job.jobid
else:
return self._jobid[job]
def update(
self, jobs, file=None, visited=None, skip_until_dynamic=False, progress=False
):
""" Update the DAG by adding given jobs and their dependencies. """
if visited is None:
visited = set()
producer = None
exceptions = list()
jobs = sorted(jobs, reverse=not self.ignore_ambiguity)
cycles = list()
for job in jobs:
logger.dag_debug(dict(status="candidate", job=job))
if file in job.input:
cycles.append(job)
continue
if job in visited:
cycles.append(job)
continue
try:
self.check_periodic_wildcards(job)
self.update_(
job,
visited=set(visited),
skip_until_dynamic=skip_until_dynamic,
progress=progress,
)
# TODO this might fail if a rule discarded here is needed
# elsewhere
if producer:
if job < producer or self.ignore_ambiguity:
break
elif producer is not None:
raise AmbiguousRuleException(file, job, producer)
producer = job
except (
MissingInputException,
CyclicGraphException,
PeriodicWildcardError,
WorkflowError,
) as ex:
exceptions.append(ex)
except RecursionError as e:
raise WorkflowError(
e,
"If building the DAG exceeds the recursion limit, "
"this is likely due to a cyclic dependency."
"E.g. you might have a sequence of rules that "
"can generate their own input. Try to make "
"the output files more specific. "
"A common pattern is to have different prefixes "
"in the output files of different rules."
+ "\nProblematic file pattern: {}".format(file)
if file
else "",
)
if producer is None:
if cycles:
job = cycles[0]
raise CyclicGraphException(job.rule, file, rule=job.rule)
if len(exceptions) > 1:
raise WorkflowError(*exceptions)
elif len(exceptions) == 1:
raise exceptions[0]
else:
logger.dag_debug(dict(status="selected", job=producer))
logger.dag_debug(
dict(
file=file,
msg="Producer found, hence exceptions are ignored.",
exception=WorkflowError(*exceptions),
)
)
n = len(self.dependencies)
if progress and n % 1000 == 0 and n and self._progress != n:
logger.info("Processed {} potential jobs.".format(n))
self._progress = n
return producer
def update_(self, job, visited=None, skip_until_dynamic=False, progress=False):
""" Update the DAG by adding the given job and its dependencies. """
if job in self.dependencies:
return
if visited is None:
visited = set()
visited.add(job)
dependencies = self.dependencies[job]
potential_dependencies = self.collect_potential_dependencies(job)
skip_until_dynamic = skip_until_dynamic and not job.dynamic_output
missing_input = set()
producer = dict()
exceptions = dict()
for file, jobs in potential_dependencies.items():
# If possible, obtain inventory information starting from
# given file and store it in the IOCache.
# This should provide faster access to existence and mtime information
# than querying file by file. If the file type does not support inventory
# information, this call is a no-op.
file.inventory()
if not jobs:
# no producing job found
if not file.exists:
# file not found, hence missing input
missing_input.add(file)
# file found, no problem
continue
try:
selected_job = self.update(
jobs,
file=file,
visited=visited,
skip_until_dynamic=skip_until_dynamic or file in job.dynamic_input,
progress=progress,
)
producer[file] = selected_job
except (
MissingInputException,
CyclicGraphException,
PeriodicWildcardError,
WorkflowError,
) as ex:
if not file.exists:
self.delete_job(job, recursive=False) # delete job from tree
raise ex
else:
logger.dag_debug(
dict(
file=file,
msg="No producers found, but file is present on disk.",
exception=ex,
)
)
for file, job_ in producer.items():
dependencies[job_].add(file)
self.depending[job_][job].add(file)
if self.is_batch_rule(job.rule) and self.batch.is_final:
# For the final batch, ensure that all input files from
# previous batches are present on disk.
if any(
f for f in job.input if f not in potential_dependencies and not f.exists
):
raise WorkflowError(
"Unable to execute batch {} because not all previous batches "
"have been completed before or files have been deleted.".format(
self.batch
)
)
if missing_input:
self.delete_job(job, recursive=False) # delete job from tree
raise MissingInputException(job.rule, missing_input)
if skip_until_dynamic:
self._dynamic.add(job)
def update_needrun(self):
""" Update the information whether a job needs to be executed. """
output_mintime = dict()
def update_output_mintime(job):
try:
return output_mintime[job]
except KeyError:
for job_ in chain([job], self.depending[job]):
try:
t = output_mintime[job_]
except KeyError:
t = job_.output_mintime
if t is not None:
output_mintime[job] = t
return
output_mintime[job] = None
def update_needrun(job):
reason = self.reason(job)
noinitreason = not reason
updated_subworkflow_input = self.updated_subworkflow_files.intersection(
job.input
)
if (
job not in self.omitforce
and job.rule in self.forcerules
or not self.forcefiles.isdisjoint(job.output)
):
reason.forced = True
elif updated_subworkflow_input:
reason.updated_input.update(updated_subworkflow_input)
elif job in self.targetjobs:
# TODO find a way to handle added/removed input files here?
if not job.output and not job.benchmark:
if job.input:
if job.rule.norun:
reason.updated_input_run.update(
[f for f in job.input if not f.exists]
)
else:
reason.nooutput = True
else:
reason.noio = True
else:
if job.rule in self.targetrules:
missing_output = job.missing_output()
else:
missing_output = job.missing_output(
requested=set(chain(*self.depending[job].values()))
| self.targetfiles
)
reason.missing_output.update(missing_output)
if not reason:
output_mintime_ = output_mintime.get(job)
if output_mintime_:
updated_input = [
f for f in job.input if f.exists and f.is_newer(output_mintime_)
]
reason.updated_input.update(updated_input)
if noinitreason and reason:
reason.derived = False
reason = self.reason
_needrun = self._needrun
dependencies = self.dependencies
depending = self.depending
_needrun.clear()
candidates = list(self.jobs)
# Update the output mintime of all jobs.
# We traverse them in BFS (level order) starting from target jobs.
# Then, we check output mintime of job itself and all direct descendants,
# which have already been visited in the level before.
# This way, we achieve a linear runtime.
for job in candidates:
update_output_mintime(job)
# update prior reason for all candidate jobs
for job in candidates:
update_needrun(job)
queue = list(filter(reason, candidates))
visited = set(queue)
while queue:
job = queue.pop(0)
_needrun.add(job)
for job_, files in dependencies[job].items():
missing_output = job_.missing_output(requested=files)
reason(job_).missing_output.update(missing_output)
if missing_output and not job_ in visited:
visited.add(job_)
queue.append(job_)
for job_, files in depending[job].items():
if job_ in candidates:
reason(job_).updated_input_run.update(files)
if not job_ in visited:
visited.add(job_)
queue.append(job_)
# update len including finished jobs (because they have already increased the job counter)
self._len = len(self._finished | self._needrun)
def in_until(self, job):
"""Return whether given job has been specified via --until."""
return job.rule.name in self.untilrules or not self.untilfiles.isdisjoint(
job.output
)
def in_omitfrom(self, job):
"""Return whether given job has been specified via --omit-from."""
return job.rule.name in self.omitrules or not self.omitfiles.isdisjoint(
job.output
)
def until_jobs(self):
"""Returns a generator of jobs specified by untiljobs."""
return (job for job in self.jobs if self.in_until(job))
def omitfrom_jobs(self):
"""Returns a generator of jobs specified by omitfromjobs."""
return (job for job in self.jobs if self.in_omitfrom(job))
def downstream_of_omitfrom(self):
"""Returns the downstream of --omit-from rules or files and themselves."""
return self.bfs(self.depending, *self.omitfrom_jobs())
def delete_omitfrom_jobs(self):
"""Removes jobs downstream of jobs specified by --omit-from."""
if not self.omitrules and not self.omitfiles:
return
downstream_jobs = list(
self.downstream_of_omitfrom()
) # need to cast as list before deleting jobs
for job in downstream_jobs:
self.delete_job(job, recursive=False, add_dependencies=True)
def set_until_jobs(self):
"""Removes jobs downstream of jobs specified by --omit-from."""
if not self.untilrules and not self.untilfiles:
return
self.targetjobs = set(self.until_jobs())
def update_priority(self):
""" Update job priorities. """
prioritized = (
lambda job: job.rule in self.priorityrules
or not self.priorityfiles.isdisjoint(job.output)
)
for job in self.needrun_jobs:
self._priority[job] = job.rule.priority
for job in self.bfs(
self.dependencies,
*filter(prioritized, self.needrun_jobs),
stop=self.noneedrun_finished,
):
self._priority[job] = Job.HIGHEST_PRIORITY
def update_groups(self):
groups = dict()
for job in self.needrun_jobs:
if job.group is None:
continue
stop = lambda j: j.group != job.group
# BFS into depending needrun jobs if in same group
# Note: never go up here (into depending), because it may contain
# jobs that have been sorted out due to e.g. ruleorder.
group = self.group_job_factory.new(
job.group,
(
job
for job in self.bfs(self.dependencies, job, stop=stop)
if self.needrun(job)
),
)
# merge with previously determined groups if present
for j in group:
if j in groups:
other = groups[j]
other.merge(group)
group = other
# update assignment
for j in group:
if j not in groups:
groups[j] = group
self._group = groups
self._update_group_components()
def _update_group_components(self):
# span connected components if requested
for groupid, conn_components in groupby(
set(self._group.values()), key=lambda group: group.groupid
):
n_components = self.workflow.group_components.get(groupid, 1)
if n_components > 1:
for chunk in group_into_chunks(n_components, conn_components):
if len(chunk) > 1:
primary = chunk[0]
for secondary in chunk[1:]:
primary.merge(secondary)
for j in primary:
self._group[j] = primary
def update_ready(self, jobs=None):
"""Update information whether a job is ready to execute.
Given jobs must be needrun jobs!
"""
if jobs is None:
jobs = self.needrun_jobs
candidate_groups = set()
for job in jobs:
if not self.finished(job) and self._ready(job):
if job.group is None:
self._ready_jobs.add(job)
else:
group = self._group[job]
group.finalize()
candidate_groups.add(group)
self._ready_jobs.update(
group
for group in candidate_groups
if all(self._ready(job) for job in group)
)
def get_jobs_or_groups(self):
visited_groups = set()
for job in self.jobs:
if job.group is None:
yield job
else:
group = self._group[job]
if group in visited_groups:
continue
visited_groups.add(group)
yield group
def close_remote_objects(self):
"""Close all remote objects."""
for job in self.jobs:
if not self.needrun(job):
job.close_remote()
def postprocess(self):
"""Postprocess the DAG. This has to be invoked after any change to the
DAG topology."""
self.update_jobids()
self.update_needrun()
self.update_priority()
self.handle_pipes()
self.update_groups()
self.update_ready()
self.close_remote_objects()
self.update_checkpoint_outputs()
def handle_pipes(self):
"""Use pipes to determine job groups. Check if every pipe has exactly
one consumer"""
for job in self.needrun_jobs:
candidate_groups = set()
if job.group is not None:
candidate_groups.add(job.group)
all_depending = set()
has_pipe = False
for f in job.output:
if is_flagged(f, "pipe"):
if job.is_run:
raise WorkflowError(
"Rule defines pipe output but "
"uses a 'run' directive. This is "
"not possible for technical "
"reasons. Consider using 'shell' or "
"'script'.",
rule=job.rule,
)
has_pipe = True
depending = [
j for j, files in self.depending[job].items() if f in files
]
if len(depending) > 1:
raise WorkflowError(
"Output file {} is marked as pipe "
"but more than one job depends on "
"it. Make sure that any pipe "
"output is only consumed by one "
"job".format(f),
rule=job.rule,
)
elif len(depending) == 0:
raise WorkflowError(
"Output file {} is marked as pipe "
"but it has no consumer. This is "
"invalid because it can lead to "
"a dead lock.".format(f),
rule=job.rule,
)
depending = depending[0]
if depending.is_run:
raise WorkflowError(
"Rule consumes pipe input but "
"uses a 'run' directive. This is "
"not possible for technical "
"reasons. Consider using 'shell' or "
"'script'.",
rule=job.rule,
)
all_depending.add(depending)
if depending.group is not None:
candidate_groups.add(depending.group)
if not has_pipe:
continue
if len(candidate_groups) > 1:
raise WorkflowError(
"An output file is marked as "
"pipe, but consuming jobs "
"are part of conflicting "
"groups.",
rule=job.rule,
)
elif candidate_groups:
# extend the candidate group to all involved jobs
group = candidate_groups.pop()
else:
# generate a random unique group name
group = str(uuid.uuid4())
job.group = group
for j in all_depending:
j.group = group
def _ready(self, job):
"""Return whether the given job is ready to execute."""
group = self._group.get(job, None)
if group is None:
is_external_needrun_dep = self.needrun
else:
def is_external_needrun_dep(j):
g = self._group.get(j, None)
return self.needrun(j) and (g is None or g != group)
return self._finished.issuperset(
filter(is_external_needrun_dep, self.dependencies[job])
)
def update_checkpoint_dependencies(self, jobs=None):
"""Update dependencies of checkpoints."""
updated = False
self.update_checkpoint_outputs()
if jobs is None:
jobs = [job for job in self.jobs if not self.needrun(job)]
for job in jobs:
if job.is_checkpoint:
depending = list(self.depending[job])
# re-evaluate depending jobs, replace and update DAG
for j in depending:
logger.info("Updating job {} ({}).".format(self.jobid(j), j))
newjob = j.updated()
self.replace_job(j, newjob, recursive=False)
updated = True
if updated:
# This has to be done for each checkpoint,
# otherwise, jobs may be missing in the end.
self.postprocess()
return updated
def finish(self, job, update_dynamic=True):
"""Finish a given job (e.g. remove from ready jobs, mark depending jobs
as ready)."""
try:
self._ready_jobs.remove(job)
except KeyError:
pass
if job.is_group():
jobs = job
else:
jobs = [job]
self._finished.update(jobs)
updated_dag = False
if update_dynamic:
updated_dag = self.update_checkpoint_dependencies(jobs)
# mark depending jobs as ready
# skip jobs that are marked as until jobs
self.update_ready(
j
for job in jobs
for j in self.depending[job]
if not self.in_until(job) and self.needrun(j)
)
for job in jobs:
if update_dynamic and job.dynamic_output:
logger.info("Dynamically updating jobs")
newjob = self.update_dynamic(job)
if newjob:
# simulate that this job ran and was finished before
self.omitforce.add(newjob)
self._needrun.add(newjob)
self._finished.add(newjob)
updated_dag = True
self.postprocess()
self.handle_protected(newjob)
self.handle_touch(newjob)
if updated_dag:
# We might have new jobs, so we need to ensure that all conda envs
# and singularity images are set up.
if self.workflow.use_singularity:
self.pull_container_imgs()
if self.workflow.use_conda:
self.create_conda_envs()
def new_job(self, rule, targetfile=None, format_wildcards=None):
"""Create new job for given rule and (optional) targetfile.
This will reuse existing jobs with the same wildcards."""
key = (rule, targetfile)
if key in self.job_cache:
assert targetfile is not None
return self.job_cache[key]
wildcards_dict = rule.get_wildcards(targetfile)
job = self.job_factory.new(
rule,
self,
wildcards_dict=wildcards_dict,
format_wildcards=format_wildcards,
targetfile=targetfile,
)
self.cache_job(job)
return job
def cache_job(self, job):
for f in job.products:
self.job_cache[(job.rule, f)] = job
def update_dynamic(self, job):
"""Update the DAG by evaluating the output of the given job that
contains dynamic output files."""
dynamic_wildcards = job.dynamic_wildcards
if not dynamic_wildcards:
# this happens e.g. in dryrun if output is not yet present
return
depending = list(
filter(lambda job_: not self.finished(job_), self.bfs(self.depending, job))
)
newrule, non_dynamic_wildcards = job.rule.dynamic_branch(
dynamic_wildcards, input=False
)
self.specialize_rule(job.rule, newrule)
# no targetfile needed for job
newjob = self.new_job(newrule, format_wildcards=non_dynamic_wildcards)
self.replace_job(job, newjob)
for job_ in depending:
needs_update = any(
f.get_wildcard_names() & dynamic_wildcards.keys()
for f in job_.rule.dynamic_input
)
if needs_update:
newrule_ = job_.rule.dynamic_branch(dynamic_wildcards)
if newrule_ is not None:
self.specialize_rule(job_.rule, newrule_)
if not self.dynamic(job_):
logger.debug("Updating job {}.".format(job_))
newjob_ = self.new_job(
newrule_, targetfile=job_.output[0] if job_.output else None
)
unexpected_output = self.reason(
job_
).missing_output.intersection(newjob.existing_output)
if unexpected_output:
logger.warning(
"Warning: the following output files of rule {} were not "
"present when the DAG was created:\n{}".format(
newjob_.rule, unexpected_output
)
)
self.replace_job(job_, newjob_)
return newjob
def delete_job(self, job, recursive=True, add_dependencies=False):
"""Delete given job from DAG."""
if job in self.targetjobs:
self.targetjobs.remove(job)
if add_dependencies:
for _job in self.dependencies[job]:
self.targetjobs.add(_job)
for job_ in self.depending[job]:
del self.dependencies[job_][job]
del self.depending[job]
for job_ in self.dependencies[job]:
depending = self.depending[job_]
del depending[job]
if not depending and recursive:
self.delete_job(job_)
del self.dependencies[job]
if job in self._needrun:
self._len -= 1
self._needrun.remove(job)
del self._reason[job]
if job in self._finished:
self._finished.remove(job)
if job in self._dynamic:
self._dynamic.remove(job)
if job in self._ready_jobs:
self._ready_jobs.remove(job)
# remove from cache
for f in job.output:
try:
del self.job_cache[(job.rule, f)]
except KeyError:
pass
def replace_job(self, job, newjob, recursive=True):
"""Replace given job with new job."""
add_to_targetjobs = job in self.targetjobs
depending = list(self.depending[job].items())
if self.finished(job):
self._finished.add(newjob)
self.delete_job(job, recursive=recursive)
if add_to_targetjobs:
self.targetjobs.add(newjob)
self.cache_job(newjob)
self.update([newjob])
logger.debug("Replace {} with dynamic branch {}".format(job, newjob))
for job_, files in depending:
# if not job_.dynamic_input:
logger.debug("updating depending job {}".format(job_))
self.dependencies[job_][newjob].update(files)
self.depending[newjob][job_].update(files)
def specialize_rule(self, rule, newrule):
"""Specialize the given rule by inserting newrule into the DAG."""
assert newrule is not None
self.rules.add(newrule)
self.update_output_index()
def is_batch_rule(self, rule):
"""Return True if the underlying rule is to be used for batching the DAG."""
return self.batch is not None and rule.name == self.batch.rulename
def collect_potential_dependencies(self, job):
"""Collect all potential dependencies of a job. These might contain
ambiguities. The keys of the returned dict represent the files to be considered."""
dependencies = defaultdict(list)
# use a set to circumvent multiple jobs for the same file
# if user specified it twice
file2jobs = self.file2jobs
input_files = list(job.unique_input)
if self.is_batch_rule(job.rule):
# only consider the defined partition of the input files
input_batch = self.batch.get_batch(input_files)
if len(input_batch) != len(input_files):
logger.info(
"Considering only batch {} for DAG computation.\n"
"All jobs beyond the batching rule are omitted until the final batch.\n"
"Don't forget to run the other batches too.".format(self.batch)
)
input_files = input_batch
for file in input_files:
# omit the file if it comes from a subworkflow
if file in job.subworkflow_input:
continue
try:
if file in job.dependencies:
jobs = [self.new_job(job.dependencies[file], targetfile=file)]
else:
jobs = file2jobs(file)
dependencies[file].extend(jobs)
except MissingRuleException as ex:
# no dependency found
dependencies[file] = []
return dependencies
def bfs(self, direction, *jobs, stop=lambda job: False):
"""Perform a breadth-first traversal of the DAG."""
queue = list(jobs)
visited = set(queue)
while queue:
job = queue.pop(0)
if stop(job):
# stop criterion reached for this node
continue
yield job
for job_, _ in direction[job].items():
if not job_ in visited:
queue.append(job_)
visited.add(job_)
def level_bfs(self, direction, *jobs, stop=lambda job: False):
"""Perform a breadth-first traversal of the DAG, but also yield the
level together with each job."""
queue = [(job, 0) for job in jobs]
visited = set(jobs)
while queue:
job, level = queue.pop(0)
if stop(job):
# stop criterion reached for this node
continue
yield level, job
level += 1
for job_, _ in direction[job].items():
if not job_ in visited:
queue.append((job_, level))
visited.add(job_)
def dfs(self, direction, *jobs, stop=lambda job: False, post=True):
"""Perform depth-first traversal of the DAG."""
visited = set()
def _dfs(job):
"""Inner function for DFS traversal."""
if stop(job):
return
if not post:
yield job
for job_ in direction[job]:
if not job_ in visited:
visited.add(job_)
for j in _dfs(job_):
yield j
if post:
yield job
for job in jobs:
for job_ in self._dfs(direction, job, visited, stop=stop, post=post):
yield job_
def new_wildcards(self, job):
"""Return wildcards that are newly introduced in this job,
compared to its ancestors."""
new_wildcards = set(job.wildcards.items())
for job_ in self.dependencies[job]:
if not new_wildcards:
return set()
for wildcard in job_.wildcards.items():
new_wildcards.discard(wildcard)
return new_wildcards
def rule2job(self, targetrule):
"""Generate a new job from a given rule."""
if targetrule.has_wildcards():
raise WorkflowError(
"Target rules may not contain wildcards. Please specify concrete files or a rule without wildcards."
)
return self.new_job(targetrule)
def file2jobs(self, targetfile):
rules = self.output_index.match(targetfile)
jobs = []
exceptions = list()
for rule in rules:
if rule.is_producer(targetfile):
try:
jobs.append(self.new_job(rule, targetfile=targetfile))
except InputFunctionException as e:
exceptions.append(e)
if not jobs:
if exceptions:
raise exceptions[0]
raise MissingRuleException(targetfile)
return jobs
def rule_dot2(self):
dag = defaultdict(list)
visited = set()
preselect = set()
def preselect_parents(job):
for parent in self.depending[job]:
if parent in preselect:
continue
preselect.add(parent)
preselect_parents(parent)
def build_ruledag(job, key=lambda job: job.rule.name):
if job in visited:
return
visited.add(job)
deps = sorted(self.dependencies[job], key=key)
deps = [
(
group[0]
if preselect.isdisjoint(group)
else preselect.intersection(group).pop()
)
for group in (list(g) for _, g in groupby(deps, key))
]
dag[job].extend(deps)
preselect_parents(job)
for dep in deps:
build_ruledag(dep)
for job in self.targetjobs:
build_ruledag(job)
return self._dot(dag.keys(), print_wildcards=False, print_types=False, dag=dag)
def rule_dot(self):
graph = defaultdict(set)
for job in self.jobs:
graph[job.rule].update(dep.rule for dep in self.dependencies[job])
return self._dot(graph)
def dot(self):
def node2style(job):
if not self.needrun(job):
return "rounded,dashed"
if self.dynamic(job) or job.dynamic_input:
return "rounded,dotted"
return "rounded"
def format_wildcard(wildcard):
name, value = wildcard
if DYNAMIC_FILL in value:
value = "..."
return "{}: {}".format(name, value)
node2rule = lambda job: job.rule
node2label = lambda job: "\\n".join(
chain(
[job.rule.name], sorted(map(format_wildcard, self.new_wildcards(job)))
)
)
dag = {job: self.dependencies[job] for job in self.jobs}
return self._dot(
dag, node2rule=node2rule, node2style=node2style, node2label=node2label
)
def _dot(
self,
graph,
node2rule=lambda node: node,
node2style=lambda node: "rounded",
node2label=lambda node: node,
):
# color rules
huefactor = 2 / (3 * len(self.rules))
rulecolor = {
rule: "{:.2f} 0.6 0.85".format(i * huefactor)
for i, rule in enumerate(self.rules)
}
# markup
node_markup = '\t{}[label = "{}", color = "{}", style="{}"];'.format
edge_markup = "\t{} -> {}".format
# node ids
ids = {node: i for i, node in enumerate(graph)}
# calculate nodes
nodes = [
node_markup(
ids[node],
node2label(node),
rulecolor[node2rule(node)],
node2style(node),
)
for node in graph
]
# calculate edges
edges = [
edge_markup(ids[dep], ids[node])
for node, deps in graph.items()
for dep in deps
]
return textwrap.dedent(
"""\
digraph snakemake_dag {{
graph[bgcolor=white, margin=0];
node[shape=box, style=rounded, fontname=sans, \
fontsize=10, penwidth=2];
edge[penwidth=2, color=grey];
{items}
}}\
"""
).format(items="\n".join(nodes + edges))
def filegraph_dot(
self,
node2rule=lambda node: node,
node2style=lambda node: "rounded",
node2label=lambda node: node,
):
# NOTE: This is code from the rule_dot method.
# This method could be split like there as well, however,
# it cannot easily reuse the _dot method due to the different node type
graph = defaultdict(set)
for job in self.jobs:
graph[job.rule].update(dep.rule for dep in self.dependencies[job])
# node ids
ids = {node: i for i, node in enumerate(graph)}
# Compute colors for rules
def hsv_to_htmlhexrgb(h, s, v):
"""Convert hsv colors to hex-encoded rgb colors usable by html."""
import colorsys
hex_r, hex_g, hex_b = (round(255 * x) for x in colorsys.hsv_to_rgb(h, s, v))
return "#{hex_r:0>2X}{hex_g:0>2X}{hex_b:0>2X}".format(
hex_r=hex_r, hex_g=hex_g, hex_b=hex_b
)
huefactor = 2 / (3 * len(self.rules))
rulecolor = {
rule: hsv_to_htmlhexrgb(i * huefactor, 0.6, 0.85)
for i, rule in enumerate(self.rules)
}
def resolve_input_functions(input_files):
"""Iterate over all input files and replace input functions
with a fixed string.
"""
files = []
for f in input_files:
if callable(f):
files.append("<input function>")
# NOTE: This is a workaround. It would be more informative
# to show the code of the input function here (if it is
# short enough). This cannot be easily done with the inspect
# module, since the line numbers in the Snakefile do not
# behave as expected. One (complicated) solution for this
# would be to find the Snakefile and directly extract the
# code of the function.
else:
files.append(repr(f).strip("'"))
return files
def html_node(node_id, node, color):
"""Assemble a html style node for graphviz"""
input_files = resolve_input_functions(node._input)
output_files = [repr(f).strip("'") for f in node._output]
input_header = (
'<b><font point-size="14">↪ input</font></b>'
if input_files
else ""
)
output_header = (
'<b><font point-size="14">output →</font></b>'
if output_files
else ""
)
html_node = [
'{node_id} [ shape=none, margin=0, label=<<table border="2" color="{color}" cellspacing="3" cellborder="0">'.format(
node_id=node_id, color=color
),
"<tr><td>",
'<b><font point-size="18">{node.name}</font></b>'.format(node=node),
"</td></tr>",
"<hr/>",
'<tr><td align="left"> {input_header} </td></tr>'.format(
input_header=input_header
),
]
for filename in sorted(input_files):
# Escape html relevant chars like '<' and '>' in filenames
# These can be added by input functions etc. and cannot be
# displayed in graphviz HTML nodes.
in_file = html.escape(filename)
html_node.extend(
[
"<tr>",
'<td align="left"><font face="monospace">{in_file}</font></td>'.format(
in_file=in_file
),
"</tr>",
]
)
html_node.append("<hr/>")
html_node.append(
'<tr><td align="right"> {output_header} </td> </tr>'.format(
output_header=output_header
)
)
for filename in sorted(output_files):
out_file = html.escape(filename)
html_node.extend(
[
"<tr>",
'<td align="left"><font face="monospace">{out_file}</font></td>'
"</tr>".format(out_file=out_file),
]
)
html_node.append("</table>>]")
return "\n".join(html_node)
nodes = [
html_node(ids[node], node, rulecolor[node2rule(node)]) for node in graph
]
# calculate edges
edge_markup = "\t{} -> {}".format
edges = [
edge_markup(ids[dep], ids[node], ids[dep], ids[node])
for node, deps in graph.items()
for dep in deps
]
return textwrap.dedent(
"""\
digraph snakemake_dag {{
graph[bgcolor=white, margin=0];
node[shape=box, style=rounded, fontname=sans, \
fontsize=10, penwidth=2];
edge[penwidth=2, color=grey];
{items}
}}\
"""
).format(items="\n".join(nodes + edges))
def summary(self, detailed=False):
if detailed:
yield "output_file\tdate\trule\tversion\tlog-file(s)\tinput-file(s)\tshellcmd\tstatus\tplan"
else:
yield "output_file\tdate\trule\tversion\tlog-file(s)\tstatus\tplan"
for job in self.jobs:
output = job.rule.output if self.dynamic(job) else job.expanded_output
for f in output:
rule = self.workflow.persistence.rule(f)
rule = "-" if rule is None else rule
version = self.workflow.persistence.version(f)
version = "-" if version is None else str(version)
date = time.ctime(f.mtime) if f.exists else "-"
pending = "update pending" if self.reason(job) else "no update"
log = self.workflow.persistence.log(f)
log = "-" if log is None else ",".join(log)
input = self.workflow.persistence.input(f)
input = "-" if input is None else ",".join(input)
shellcmd = self.workflow.persistence.shellcmd(f)
shellcmd = "-" if shellcmd is None else shellcmd
# remove new line characters, leading and trailing whitespace
shellcmd = shellcmd.strip().replace("\n", "; ")
status = "ok"
if not f.exists:
status = "missing"
elif self.reason(job).updated_input:
status = "updated input files"
elif self.workflow.persistence.version_changed(job, file=f):
status = "version changed to {}".format(job.rule.version)
elif self.workflow.persistence.code_changed(job, file=f):
status = "rule implementation changed"
elif self.workflow.persistence.input_changed(job, file=f):
status = "set of input files changed"
elif self.workflow.persistence.params_changed(job, file=f):
status = "params changed"
if detailed:
yield "\t".join(
(f, date, rule, version, log, input, shellcmd, status, pending)
)
else:
yield "\t".join((f, date, rule, version, log, status, pending))
# MASKED: archive function (lines 1856-1926)
def clean(self, only_temp=False, dryrun=False):
"""Removes files generated by the workflow."""
for job in self.jobs:
for f in job.output:
if not only_temp or is_flagged(f, "temp"):
# The reason for the second check is that dangling
# symlinks fail f.exists.
if f.exists or os.path.islink(f):
if f.protected:
logger.error("Skipping write-protected file {}.".format(f))
else:
msg = "Deleting {}" if not dryrun else "Would delete {}"
logger.info(msg.format(f))
if not dryrun:
# Remove non-empty dirs if flagged as temp()
f.remove(remove_non_empty_dir=only_temp)
def list_untracked(self):
"""List files in the workdir that are not in the dag."""
used_files = set()
files_in_cwd = set()
for job in self.jobs:
used_files.update(
os.path.relpath(file)
for file in chain(job.local_input, job.local_output, job.log)
)
for root, dirs, files in os.walk(os.getcwd()):
# Ignore hidden files and don't traverse into hidden dirs
files_in_cwd.update(
[
os.path.relpath(os.path.join(root, f))
for f in files
if not f[0] == "."
]
)
dirs[:] = [d for d in dirs if not d[0] == "."]
for f in sorted(list(files_in_cwd - used_files)):
logger.info(f)
def d3dag(self, max_jobs=10000):
def node(job):
jobid = self.jobid(job)
return {
"id": jobid,
"value": {
"jobid": jobid,
"label": job.rule.name,
"rule": job.rule.name,
},
}
def edge(a, b):
return {"u": self.jobid(a), "v": self.jobid(b)}
jobs = list(self.jobs)
if len(jobs) > max_jobs:
logger.info(
"Job-DAG is too large for visualization (>{} jobs).".format(max_jobs)
)
else:
logger.d3dag(
nodes=[node(job) for job in jobs],
edges=[
edge(dep, job)
for job in jobs
for dep in self.dependencies[job]
if self.needrun(dep)
],
)
def stats(self):
rules = Counter()
rules.update(job.rule for job in self.needrun_jobs)
rules.update(job.rule for job in self.finished_jobs)
yield "Job counts:"
yield "\tcount\tjobs"
for rule, count in sorted(rules.most_common(), key=lambda item: item[0].name):
yield "\t{}\t{}".format(count, rule)
yield "\t{}".format(len(self))
def __str__(self):
return self.dot()
def __len__(self):
return self._len
|
def archive(self, path):
"""Archives workflow such that it can be re-run on a different system.
Archiving includes git versioned files (i.e. Snakefiles, config files, ...),
ancestral input files and conda environments.
"""
if path.endswith(".tar"):
mode = "x"
elif path.endswith("tar.bz2"):
mode = "x:bz2"
elif path.endswith("tar.xz"):
mode = "x:xz"
elif path.endswith("tar.gz"):
mode = "x:gz"
else:
raise WorkflowError(
"Unsupported archive format "
"(supported: .tar, .tar.gz, .tar.bz2, .tar.xz)"
)
if os.path.exists(path):
raise WorkflowError("Archive already exists:\n" + path)
self.create_conda_envs(forceall=True)
try:
workdir = Path(os.path.abspath(os.getcwd()))
with tarfile.open(path, mode=mode, dereference=True) as archive:
archived = set()
def add(path):
if workdir not in Path(os.path.abspath(path)).parents:
logger.warning(
"Path {} cannot be archived: "
"not within working directory.".format(path)
)
else:
f = os.path.relpath(path)
if f not in archived:
archive.add(f)
archived.add(f)
logger.info("archived " + f)
logger.info(
"Archiving snakefiles, scripts and files under "
"version control..."
)
for f in self.workflow.get_sources():
add(f)
logger.info("Archiving external input files...")
for job in self.jobs:
# input files
for f in job.input:
if not any(
f in files for files in self.dependencies[job].values()
):
# this is an input file that is not created by any job
add(f)
logger.info("Archiving conda environments...")
envs = set()
for job in self.jobs:
if job.conda_env_file:
env_archive = job.archive_conda_env()
envs.add(env_archive)
for env in envs:
add(env)
except (Exception, BaseException) as e:
os.remove(path)
raise e
| 1,856 | 1,926 |
__author__ = "Johannes Köster"
__copyright__ = "Copyright 2015-2019, Johannes Köster"
__email__ = "[email protected]"
__license__ = "MIT"
import html
import os
import shutil
import textwrap
import time
import tarfile
from collections import defaultdict, Counter
from itertools import chain, filterfalse, groupby
from functools import partial
from pathlib import Path
import uuid
import math
from snakemake.io import PeriodicityDetector, wait_for_files, is_flagged
from snakemake.jobs import Reason, JobFactory, GroupJobFactory, Job
from snakemake.exceptions import MissingInputException
from snakemake.exceptions import MissingRuleException, AmbiguousRuleException
from snakemake.exceptions import CyclicGraphException, MissingOutputException
from snakemake.exceptions import IncompleteFilesException, ImproperOutputException
from snakemake.exceptions import PeriodicWildcardError
from snakemake.exceptions import RemoteFileException, WorkflowError, ChildIOException
from snakemake.exceptions import InputFunctionException
from snakemake.logging import logger
from snakemake.common import DYNAMIC_FILL, group_into_chunks
from snakemake.deployment import conda, singularity
from snakemake.output_index import OutputIndex
from snakemake import workflow
class Batch:
"""Definition of a batch for calculating only a partial DAG."""
def __init__(self, rulename: str, idx: int, batches: int):
assert idx <= batches
assert idx > 0
self.rulename = rulename
self.idx = idx
self.batches = batches
def get_batch(self, items: list):
"""Return the defined batch of the given items.
Items are usually input files."""
# make sure that we always consider items in the same order
if len(items) < self.batches:
raise WorkflowError(
"Batching rule {} has less input files than batches. "
"Please choose a smaller number of batches.".format(self.rulename)
)
items = sorted(items)
batch_len = math.floor(len(items) / self.batches)
# self.batch is one-based, hence we have to subtract 1
idx = self.idx - 1
i = idx * batch_len
if self.is_final:
# extend the last batch to cover rest of list
return items[i:]
else:
return items[i : i + batch_len]
@property
def is_final(self):
return self.idx == self.batches
def __str__(self):
return "{}/{} (rule {})".format(self.idx, self.batches, self.rulename)
class DAG:
"""Directed acyclic graph of jobs."""
def __init__(
self,
workflow,
rules=None,
dryrun=False,
targetfiles=None,
targetrules=None,
forceall=False,
forcerules=None,
forcefiles=None,
priorityfiles=None,
priorityrules=None,
untilfiles=None,
untilrules=None,
omitfiles=None,
omitrules=None,
ignore_ambiguity=False,
force_incomplete=False,
ignore_incomplete=False,
notemp=False,
keep_remote_local=False,
batch=None,
):
self.dryrun = dryrun
self.dependencies = defaultdict(partial(defaultdict, set))
self.depending = defaultdict(partial(defaultdict, set))
self._needrun = set()
self._priority = dict()
self._reason = defaultdict(Reason)
self._finished = set()
self._dynamic = set()
self._len = 0
self.workflow = workflow
self.rules = set(rules)
self.ignore_ambiguity = ignore_ambiguity
self.targetfiles = targetfiles
self.targetrules = targetrules
self.priorityfiles = priorityfiles
self.priorityrules = priorityrules
self.targetjobs = set()
self.prioritytargetjobs = set()
self._ready_jobs = set()
self.notemp = notemp
self.keep_remote_local = keep_remote_local
self._jobid = dict()
self.job_cache = dict()
self.conda_envs = dict()
self.container_imgs = dict()
self._progress = 0
self._group = dict()
self.job_factory = JobFactory()
self.group_job_factory = GroupJobFactory()
self.forcerules = set()
self.forcefiles = set()
self.untilrules = set()
self.untilfiles = set()
self.omitrules = set()
self.omitfiles = set()
self.updated_subworkflow_files = set()
if forceall:
self.forcerules.update(self.rules)
elif forcerules:
self.forcerules.update(forcerules)
if forcefiles:
self.forcefiles.update(forcefiles)
if untilrules:
self.untilrules.update(set(rule.name for rule in untilrules))
if untilfiles:
self.untilfiles.update(untilfiles)
if omitrules:
self.omitrules.update(set(rule.name for rule in omitrules))
if omitfiles:
self.omitfiles.update(omitfiles)
self.has_dynamic_rules = any(rule.dynamic_output for rule in self.rules)
self.omitforce = set()
self.batch = batch
if batch is not None and not batch.is_final:
# Since not all input files of a batching rule are considered, we cannot run
# beyond that rule.
# For the final batch, we do not need to omit anything.
self.omitrules.add(batch.rulename)
self.force_incomplete = force_incomplete
self.ignore_incomplete = ignore_incomplete
self.periodic_wildcard_detector = PeriodicityDetector()
self.update_output_index()
def init(self, progress=False):
""" Initialise the DAG. """
for job in map(self.rule2job, self.targetrules):
job = self.update([job], progress=progress)
self.targetjobs.add(job)
for file in self.targetfiles:
job = self.update(self.file2jobs(file), file=file, progress=progress)
self.targetjobs.add(job)
self.cleanup()
self.update_needrun()
self.set_until_jobs()
self.delete_omitfrom_jobs()
self.update_jobids()
self.check_directory_outputs()
# check if remaining jobs are valid
for i, job in enumerate(self.jobs):
job.is_valid()
def check_directory_outputs(self):
"""Check that no output file is contained in a directory output of the same or another rule."""
outputs = sorted(
{
(path(f), job)
for job in self.jobs
for f in job.output
for path in (os.path.abspath, os.path.realpath)
}
)
for i in range(len(outputs) - 1):
(a, job_a), (b, job_b) = outputs[i : i + 2]
try:
common = os.path.commonpath([a, b])
except ValueError:
# commonpath raises error if windows drives are different.
continue
if a != b and common == os.path.commonpath([a]) and job_a != job_b:
raise ChildIOException(parent=outputs[i], child=outputs[i + 1])
@property
def checkpoint_jobs(self):
for job in self.needrun_jobs:
if job.is_checkpoint:
yield job
def update_checkpoint_outputs(self):
workflow.checkpoints.future_output = set(
f for job in self.checkpoint_jobs for f in job.output
)
def update_jobids(self):
for job in self.jobs:
if job not in self._jobid:
self._jobid[job] = len(self._jobid)
def cleanup_workdir(self):
for io_dir in set(
os.path.dirname(io_file)
for job in self.jobs
for io_file in chain(job.output, job.input)
if not os.path.exists(io_file)
):
if os.path.exists(io_dir) and not len(os.listdir(io_dir)):
os.removedirs(io_dir)
def cleanup(self):
self.job_cache.clear()
final_jobs = set(self.jobs)
todelete = [job for job in self.dependencies if job not in final_jobs]
for job in todelete:
del self.dependencies[job]
try:
del self.depending[job]
except KeyError:
pass
def create_conda_envs(
self, dryrun=False, forceall=False, init_only=False, quiet=False
):
# First deduplicate based on job.conda_env_file
jobs = self.jobs if forceall else self.needrun_jobs
env_set = {
(job.conda_env_file, job.container_img_url)
for job in jobs
if job.conda_env_file
}
# Then based on md5sum values
self.conda_envs = dict()
for (env_file, simg_url) in env_set:
simg = None
if simg_url and self.workflow.use_singularity:
assert (
simg_url in self.container_imgs
), "bug: must first pull singularity images"
simg = self.container_imgs[simg_url]
env = conda.Env(
env_file,
self,
container_img=simg,
cleanup=self.workflow.conda_cleanup_pkgs,
)
self.conda_envs[(env_file, simg_url)] = env
if not init_only:
for env in self.conda_envs.values():
if not dryrun or not quiet:
env.create(dryrun)
def pull_container_imgs(self, dryrun=False, forceall=False, quiet=False):
# First deduplicate based on job.conda_env_file
jobs = self.jobs if forceall else self.needrun_jobs
img_set = {job.container_img_url for job in jobs if job.container_img_url}
for img_url in img_set:
img = singularity.Image(img_url, self)
if not dryrun or not quiet:
img.pull(dryrun)
self.container_imgs[img_url] = img
def update_output_index(self):
"""Update the OutputIndex."""
self.output_index = OutputIndex(self.rules)
def check_incomplete(self):
"""Check if any output files are incomplete. This is done by looking up
markers in the persistence module."""
if not self.ignore_incomplete:
incomplete = self.incomplete_files
if incomplete:
if self.force_incomplete:
logger.debug("Forcing incomplete files:")
logger.debug("\t" + "\n\t".join(incomplete))
self.forcefiles.update(incomplete)
else:
raise IncompleteFilesException(incomplete)
def incomplete_external_jobid(self, job):
"""Return the external jobid of the job if it is marked as incomplete.
Returns None, if job is not incomplete, or if no external jobid has been
registered or if force_incomplete is True.
"""
if self.force_incomplete:
return None
jobids = self.workflow.persistence.external_jobids(job)
if len(jobids) == 1:
return jobids[0]
elif len(jobids) > 1:
raise WorkflowError(
"Multiple different external jobids registered "
"for output files of incomplete job {} ({}). This job "
"cannot be resumed. Execute Snakemake with --rerun-incomplete "
"to fix this issue.".format(job.jobid, jobids)
)
def check_dynamic(self):
"""Check dynamic output and update downstream rules if necessary."""
if self.has_dynamic_rules:
for job in filter(
lambda job: (job.dynamic_output and not self.needrun(job)), self.jobs
):
self.update_dynamic(job)
self.postprocess()
def is_edit_notebook_job(self, job):
return self.workflow.edit_notebook and job.targetfile in self.targetfiles
@property
def dynamic_output_jobs(self):
"""Iterate over all jobs with dynamic output files."""
return (job for job in self.jobs if job.dynamic_output)
@property
def jobs(self):
""" All jobs in the DAG. """
for job in self.bfs(self.dependencies, *self.targetjobs):
yield job
@property
def needrun_jobs(self):
""" Jobs that need to be executed. """
for job in filter(
self.needrun,
self.bfs(self.dependencies, *self.targetjobs, stop=self.noneedrun_finished),
):
yield job
@property
def local_needrun_jobs(self):
"""Iterate over all jobs that need to be run and are marked as local."""
return filter(lambda job: job.is_local, self.needrun_jobs)
@property
def finished_jobs(self):
""" Iterate over all jobs that have been finished."""
for job in filter(self.finished, self.bfs(self.dependencies, *self.targetjobs)):
yield job
@property
def ready_jobs(self):
"""Jobs that are ready to execute."""
return self._ready_jobs
def needrun(self, job):
"""Return whether a given job needs to be executed."""
return job in self._needrun
def priority(self, job):
"""Return priority of given job."""
return self._priority[job]
def noneedrun_finished(self, job):
"""
Return whether a given job is finished or was not
required to run at all.
"""
return not self.needrun(job) or self.finished(job)
def reason(self, job):
""" Return the reason of the job execution. """
return self._reason[job]
def finished(self, job):
""" Return whether a job is finished. """
return job in self._finished
def dynamic(self, job):
"""
Return whether a job is dynamic (i.e. it is only a placeholder
for those that are created after the job with dynamic output has
finished.
"""
if job.is_group():
for j in job:
if j in self._dynamic:
return True
else:
return job in self._dynamic
def requested_files(self, job):
"""Return the files a job requests."""
return set(*self.depending[job].values())
@property
def incomplete_files(self):
"""Return list of incomplete files."""
return list(
chain(
*(
job.output
for job in filter(
self.workflow.persistence.incomplete,
filterfalse(self.needrun, self.jobs),
)
)
)
)
@property
def newversion_files(self):
"""Return list of files where the current version is newer than the
recorded version.
"""
return list(
chain(
*(
job.output
for job in filter(self.workflow.persistence.newversion, self.jobs)
)
)
)
def missing_temp(self, job):
"""
Return whether a temp file that is input of the given job is missing.
"""
for job_, files in self.depending[job].items():
if self.needrun(job_) and any(not f.exists for f in files):
return True
return False
def check_and_touch_output(
self,
job,
wait=3,
ignore_missing_output=False,
no_touch=False,
force_stay_on_remote=False,
):
""" Raise exception if output files of job are missing. """
expanded_output = [job.shadowed_path(path) for path in job.expanded_output]
if job.benchmark:
expanded_output.append(job.benchmark)
if not ignore_missing_output:
try:
wait_for_files(
expanded_output,
latency_wait=wait,
force_stay_on_remote=force_stay_on_remote,
ignore_pipe=True,
)
except IOError as e:
raise MissingOutputException(
str(e) + "\nThis might be due to "
"filesystem latency. If that is the case, consider to increase the "
"wait time with --latency-wait.",
rule=job.rule,
)
# Ensure that outputs are of the correct type (those flagged with directory()
# are directories and not files and vice versa).
for f in expanded_output:
if (f.is_directory and not os.path.isdir(f)) or (
os.path.isdir(f) and not f.is_directory
):
raise ImproperOutputException(job.rule, [f])
# It is possible, due to archive expansion or cluster clock skew, that
# the files appear older than the input. But we know they must be new,
# so touch them to update timestamps. This also serves to touch outputs
# when using the --touch flag.
# Note that if the input files somehow have a future date then this will
# not currently be spotted and the job will always be re-run.
if not no_touch:
for f in expanded_output:
# This won't create normal files if missing, but will create
# the flag file for directories.
if f.exists_local:
f.touch()
def unshadow_output(self, job, only_log=False):
""" Move files from shadow directory to real output paths. """
if not job.shadow_dir or not job.expanded_output:
return
files = job.log if only_log else chain(job.expanded_output, job.log)
for real_output in files:
shadow_output = job.shadowed_path(real_output).file
# Remake absolute symlinks as relative
if os.path.islink(shadow_output):
dest = os.readlink(shadow_output)
if os.path.isabs(dest):
rel_dest = os.path.relpath(dest, job.shadow_dir)
os.remove(shadow_output)
os.symlink(rel_dest, shadow_output)
if os.path.realpath(shadow_output) == os.path.realpath(real_output):
continue
logger.debug(
"Moving shadow output {} to destination {}".format(
shadow_output, real_output
)
)
shutil.move(shadow_output, real_output)
shutil.rmtree(job.shadow_dir)
def check_periodic_wildcards(self, job):
"""Raise an exception if a wildcard of the given job appears to be periodic,
indicating a cyclic dependency."""
for wildcard, value in job.wildcards_dict.items():
periodic_substring = self.periodic_wildcard_detector.is_periodic(value)
if periodic_substring is not None:
raise PeriodicWildcardError(
"The value {} in wildcard {} is periodically repeated ({}). "
"This would lead to an infinite recursion. "
"To avoid this, e.g. restrict the wildcards in this rule to certain values.".format(
periodic_substring, wildcard, value
),
rule=job.rule,
)
def handle_protected(self, job):
""" Write-protect output files that are marked with protected(). """
for f in job.expanded_output:
if f in job.protected_output:
logger.info("Write-protecting output file {}.".format(f))
f.protect()
def handle_touch(self, job):
""" Touches those output files that are marked for touching. """
for f in job.expanded_output:
if f in job.touch_output:
f = job.shadowed_path(f)
logger.info("Touching output file {}.".format(f))
f.touch_or_create()
assert os.path.exists(f)
def temp_input(self, job):
for job_, files in self.dependencies[job].items():
for f in filter(job_.temp_output.__contains__, files):
yield f
def temp_size(self, job):
"""Return the total size of temporary input files of the job.
If none, return 0.
"""
return sum(f.size for f in self.temp_input(job))
def handle_temp(self, job):
""" Remove temp files if they are no longer needed. Update temp_mtimes. """
if self.notemp:
return
is_temp = lambda f: is_flagged(f, "temp")
# handle temp input
needed = lambda job_, f: any(
f in files
for j, files in self.depending[job_].items()
if not self.finished(j) and self.needrun(j) and j != job
)
def unneeded_files():
# temp input
for job_, files in self.dependencies[job].items():
tempfiles = set(f for f in job_.expanded_output if is_temp(f))
yield from filterfalse(partial(needed, job_), tempfiles & files)
# temp output
if not job.dynamic_output and (
job not in self.targetjobs or job.rule.name == self.workflow.first_rule
):
tempfiles = (
f
for f in job.expanded_output
if is_temp(f) and f not in self.targetfiles
)
yield from filterfalse(partial(needed, job), tempfiles)
for f in unneeded_files():
logger.info("Removing temporary output file {}.".format(f))
f.remove(remove_non_empty_dir=True)
def handle_log(self, job, upload_remote=True):
for f in job.log:
if not f.exists_local:
# If log file was not created during job, create an empty one.
f.touch_or_create()
if upload_remote and f.is_remote and not f.should_stay_on_remote:
f.upload_to_remote()
if not f.exists_remote:
raise RemoteFileException(
"The file upload was attempted, but it does not "
"exist on remote. Check that your credentials have "
"read AND write permissions."
)
def handle_remote(self, job, upload=True):
""" Remove local files if they are no longer needed and upload. """
if upload:
# handle output files
files = list(job.expanded_output)
if job.benchmark:
files.append(job.benchmark)
for f in files:
if f.is_remote and not f.should_stay_on_remote:
f.upload_to_remote()
remote_mtime = f.mtime
# immediately force local mtime to match remote,
# since conversions from S3 headers are not 100% reliable
# without this, newness comparisons may fail down the line
f.touch(times=(remote_mtime, remote_mtime))
if not f.exists_remote:
raise RemoteFileException(
"The file upload was attempted, but it does not "
"exist on remote. Check that your credentials have "
"read AND write permissions."
)
if not self.keep_remote_local:
# handle input files
needed = lambda job_, f: any(
f in files
for j, files in self.depending[job_].items()
if not self.finished(j) and self.needrun(j) and j != job
)
def unneeded_files():
putative = (
lambda f: f.is_remote
and not f.protected
and not f.should_keep_local
)
generated_input = set()
for job_, files in self.dependencies[job].items():
generated_input |= files
for f in filter(putative, files):
if not needed(job_, f):
yield f
for f, f_ in zip(job.output, job.rule.output):
if putative(f) and not needed(job, f) and not f in self.targetfiles:
if f in job.dynamic_output:
for f_ in job.expand_dynamic(f_):
yield f_
else:
yield f
for f in filter(putative, job.input):
# TODO what about remote inputs that are used by multiple jobs?
if f not in generated_input:
yield f
for f in unneeded_files():
if f.exists_local:
logger.info("Removing local output file: {}".format(f))
f.remove()
def jobid(self, job):
"""Return job id of given job."""
if job.is_group():
return job.jobid
else:
return self._jobid[job]
def update(
self, jobs, file=None, visited=None, skip_until_dynamic=False, progress=False
):
""" Update the DAG by adding given jobs and their dependencies. """
if visited is None:
visited = set()
producer = None
exceptions = list()
jobs = sorted(jobs, reverse=not self.ignore_ambiguity)
cycles = list()
for job in jobs:
logger.dag_debug(dict(status="candidate", job=job))
if file in job.input:
cycles.append(job)
continue
if job in visited:
cycles.append(job)
continue
try:
self.check_periodic_wildcards(job)
self.update_(
job,
visited=set(visited),
skip_until_dynamic=skip_until_dynamic,
progress=progress,
)
# TODO this might fail if a rule discarded here is needed
# elsewhere
if producer:
if job < producer or self.ignore_ambiguity:
break
elif producer is not None:
raise AmbiguousRuleException(file, job, producer)
producer = job
except (
MissingInputException,
CyclicGraphException,
PeriodicWildcardError,
WorkflowError,
) as ex:
exceptions.append(ex)
except RecursionError as e:
raise WorkflowError(
e,
"If building the DAG exceeds the recursion limit, "
"this is likely due to a cyclic dependency."
"E.g. you might have a sequence of rules that "
"can generate their own input. Try to make "
"the output files more specific. "
"A common pattern is to have different prefixes "
"in the output files of different rules."
+ "\nProblematic file pattern: {}".format(file)
if file
else "",
)
if producer is None:
if cycles:
job = cycles[0]
raise CyclicGraphException(job.rule, file, rule=job.rule)
if len(exceptions) > 1:
raise WorkflowError(*exceptions)
elif len(exceptions) == 1:
raise exceptions[0]
else:
logger.dag_debug(dict(status="selected", job=producer))
logger.dag_debug(
dict(
file=file,
msg="Producer found, hence exceptions are ignored.",
exception=WorkflowError(*exceptions),
)
)
n = len(self.dependencies)
if progress and n % 1000 == 0 and n and self._progress != n:
logger.info("Processed {} potential jobs.".format(n))
self._progress = n
return producer
def update_(self, job, visited=None, skip_until_dynamic=False, progress=False):
""" Update the DAG by adding the given job and its dependencies. """
if job in self.dependencies:
return
if visited is None:
visited = set()
visited.add(job)
dependencies = self.dependencies[job]
potential_dependencies = self.collect_potential_dependencies(job)
skip_until_dynamic = skip_until_dynamic and not job.dynamic_output
missing_input = set()
producer = dict()
exceptions = dict()
for file, jobs in potential_dependencies.items():
# If possible, obtain inventory information starting from
# given file and store it in the IOCache.
# This should provide faster access to existence and mtime information
# than querying file by file. If the file type does not support inventory
# information, this call is a no-op.
file.inventory()
if not jobs:
# no producing job found
if not file.exists:
# file not found, hence missing input
missing_input.add(file)
# file found, no problem
continue
try:
selected_job = self.update(
jobs,
file=file,
visited=visited,
skip_until_dynamic=skip_until_dynamic or file in job.dynamic_input,
progress=progress,
)
producer[file] = selected_job
except (
MissingInputException,
CyclicGraphException,
PeriodicWildcardError,
WorkflowError,
) as ex:
if not file.exists:
self.delete_job(job, recursive=False) # delete job from tree
raise ex
else:
logger.dag_debug(
dict(
file=file,
msg="No producers found, but file is present on disk.",
exception=ex,
)
)
for file, job_ in producer.items():
dependencies[job_].add(file)
self.depending[job_][job].add(file)
if self.is_batch_rule(job.rule) and self.batch.is_final:
# For the final batch, ensure that all input files from
# previous batches are present on disk.
if any(
f for f in job.input if f not in potential_dependencies and not f.exists
):
raise WorkflowError(
"Unable to execute batch {} because not all previous batches "
"have been completed before or files have been deleted.".format(
self.batch
)
)
if missing_input:
self.delete_job(job, recursive=False) # delete job from tree
raise MissingInputException(job.rule, missing_input)
if skip_until_dynamic:
self._dynamic.add(job)
def update_needrun(self):
""" Update the information whether a job needs to be executed. """
output_mintime = dict()
def update_output_mintime(job):
try:
return output_mintime[job]
except KeyError:
for job_ in chain([job], self.depending[job]):
try:
t = output_mintime[job_]
except KeyError:
t = job_.output_mintime
if t is not None:
output_mintime[job] = t
return
output_mintime[job] = None
def update_needrun(job):
reason = self.reason(job)
noinitreason = not reason
updated_subworkflow_input = self.updated_subworkflow_files.intersection(
job.input
)
if (
job not in self.omitforce
and job.rule in self.forcerules
or not self.forcefiles.isdisjoint(job.output)
):
reason.forced = True
elif updated_subworkflow_input:
reason.updated_input.update(updated_subworkflow_input)
elif job in self.targetjobs:
# TODO find a way to handle added/removed input files here?
if not job.output and not job.benchmark:
if job.input:
if job.rule.norun:
reason.updated_input_run.update(
[f for f in job.input if not f.exists]
)
else:
reason.nooutput = True
else:
reason.noio = True
else:
if job.rule in self.targetrules:
missing_output = job.missing_output()
else:
missing_output = job.missing_output(
requested=set(chain(*self.depending[job].values()))
| self.targetfiles
)
reason.missing_output.update(missing_output)
if not reason:
output_mintime_ = output_mintime.get(job)
if output_mintime_:
updated_input = [
f for f in job.input if f.exists and f.is_newer(output_mintime_)
]
reason.updated_input.update(updated_input)
if noinitreason and reason:
reason.derived = False
reason = self.reason
_needrun = self._needrun
dependencies = self.dependencies
depending = self.depending
_needrun.clear()
candidates = list(self.jobs)
# Update the output mintime of all jobs.
# We traverse them in BFS (level order) starting from target jobs.
# Then, we check output mintime of job itself and all direct descendants,
# which have already been visited in the level before.
# This way, we achieve a linear runtime.
for job in candidates:
update_output_mintime(job)
# update prior reason for all candidate jobs
for job in candidates:
update_needrun(job)
queue = list(filter(reason, candidates))
visited = set(queue)
while queue:
job = queue.pop(0)
_needrun.add(job)
for job_, files in dependencies[job].items():
missing_output = job_.missing_output(requested=files)
reason(job_).missing_output.update(missing_output)
if missing_output and not job_ in visited:
visited.add(job_)
queue.append(job_)
for job_, files in depending[job].items():
if job_ in candidates:
reason(job_).updated_input_run.update(files)
if not job_ in visited:
visited.add(job_)
queue.append(job_)
# update len including finished jobs (because they have already increased the job counter)
self._len = len(self._finished | self._needrun)
def in_until(self, job):
"""Return whether given job has been specified via --until."""
return job.rule.name in self.untilrules or not self.untilfiles.isdisjoint(
job.output
)
def in_omitfrom(self, job):
"""Return whether given job has been specified via --omit-from."""
return job.rule.name in self.omitrules or not self.omitfiles.isdisjoint(
job.output
)
def until_jobs(self):
"""Returns a generator of jobs specified by untiljobs."""
return (job for job in self.jobs if self.in_until(job))
def omitfrom_jobs(self):
"""Returns a generator of jobs specified by omitfromjobs."""
return (job for job in self.jobs if self.in_omitfrom(job))
def downstream_of_omitfrom(self):
"""Returns the downstream of --omit-from rules or files and themselves."""
return self.bfs(self.depending, *self.omitfrom_jobs())
def delete_omitfrom_jobs(self):
"""Removes jobs downstream of jobs specified by --omit-from."""
if not self.omitrules and not self.omitfiles:
return
downstream_jobs = list(
self.downstream_of_omitfrom()
) # need to cast as list before deleting jobs
for job in downstream_jobs:
self.delete_job(job, recursive=False, add_dependencies=True)
def set_until_jobs(self):
"""Removes jobs downstream of jobs specified by --omit-from."""
if not self.untilrules and not self.untilfiles:
return
self.targetjobs = set(self.until_jobs())
def update_priority(self):
""" Update job priorities. """
prioritized = (
lambda job: job.rule in self.priorityrules
or not self.priorityfiles.isdisjoint(job.output)
)
for job in self.needrun_jobs:
self._priority[job] = job.rule.priority
for job in self.bfs(
self.dependencies,
*filter(prioritized, self.needrun_jobs),
stop=self.noneedrun_finished,
):
self._priority[job] = Job.HIGHEST_PRIORITY
def update_groups(self):
groups = dict()
for job in self.needrun_jobs:
if job.group is None:
continue
stop = lambda j: j.group != job.group
# BFS into depending needrun jobs if in same group
# Note: never go up here (into depending), because it may contain
# jobs that have been sorted out due to e.g. ruleorder.
group = self.group_job_factory.new(
job.group,
(
job
for job in self.bfs(self.dependencies, job, stop=stop)
if self.needrun(job)
),
)
# merge with previously determined groups if present
for j in group:
if j in groups:
other = groups[j]
other.merge(group)
group = other
# update assignment
for j in group:
if j not in groups:
groups[j] = group
self._group = groups
self._update_group_components()
def _update_group_components(self):
# span connected components if requested
for groupid, conn_components in groupby(
set(self._group.values()), key=lambda group: group.groupid
):
n_components = self.workflow.group_components.get(groupid, 1)
if n_components > 1:
for chunk in group_into_chunks(n_components, conn_components):
if len(chunk) > 1:
primary = chunk[0]
for secondary in chunk[1:]:
primary.merge(secondary)
for j in primary:
self._group[j] = primary
def update_ready(self, jobs=None):
"""Update information whether a job is ready to execute.
Given jobs must be needrun jobs!
"""
if jobs is None:
jobs = self.needrun_jobs
candidate_groups = set()
for job in jobs:
if not self.finished(job) and self._ready(job):
if job.group is None:
self._ready_jobs.add(job)
else:
group = self._group[job]
group.finalize()
candidate_groups.add(group)
self._ready_jobs.update(
group
for group in candidate_groups
if all(self._ready(job) for job in group)
)
def get_jobs_or_groups(self):
visited_groups = set()
for job in self.jobs:
if job.group is None:
yield job
else:
group = self._group[job]
if group in visited_groups:
continue
visited_groups.add(group)
yield group
def close_remote_objects(self):
"""Close all remote objects."""
for job in self.jobs:
if not self.needrun(job):
job.close_remote()
def postprocess(self):
"""Postprocess the DAG. This has to be invoked after any change to the
DAG topology."""
self.update_jobids()
self.update_needrun()
self.update_priority()
self.handle_pipes()
self.update_groups()
self.update_ready()
self.close_remote_objects()
self.update_checkpoint_outputs()
def handle_pipes(self):
"""Use pipes to determine job groups. Check if every pipe has exactly
one consumer"""
for job in self.needrun_jobs:
candidate_groups = set()
if job.group is not None:
candidate_groups.add(job.group)
all_depending = set()
has_pipe = False
for f in job.output:
if is_flagged(f, "pipe"):
if job.is_run:
raise WorkflowError(
"Rule defines pipe output but "
"uses a 'run' directive. This is "
"not possible for technical "
"reasons. Consider using 'shell' or "
"'script'.",
rule=job.rule,
)
has_pipe = True
depending = [
j for j, files in self.depending[job].items() if f in files
]
if len(depending) > 1:
raise WorkflowError(
"Output file {} is marked as pipe "
"but more than one job depends on "
"it. Make sure that any pipe "
"output is only consumed by one "
"job".format(f),
rule=job.rule,
)
elif len(depending) == 0:
raise WorkflowError(
"Output file {} is marked as pipe "
"but it has no consumer. This is "
"invalid because it can lead to "
"a dead lock.".format(f),
rule=job.rule,
)
depending = depending[0]
if depending.is_run:
raise WorkflowError(
"Rule consumes pipe input but "
"uses a 'run' directive. This is "
"not possible for technical "
"reasons. Consider using 'shell' or "
"'script'.",
rule=job.rule,
)
all_depending.add(depending)
if depending.group is not None:
candidate_groups.add(depending.group)
if not has_pipe:
continue
if len(candidate_groups) > 1:
raise WorkflowError(
"An output file is marked as "
"pipe, but consuming jobs "
"are part of conflicting "
"groups.",
rule=job.rule,
)
elif candidate_groups:
# extend the candidate group to all involved jobs
group = candidate_groups.pop()
else:
# generate a random unique group name
group = str(uuid.uuid4())
job.group = group
for j in all_depending:
j.group = group
def _ready(self, job):
"""Return whether the given job is ready to execute."""
group = self._group.get(job, None)
if group is None:
is_external_needrun_dep = self.needrun
else:
def is_external_needrun_dep(j):
g = self._group.get(j, None)
return self.needrun(j) and (g is None or g != group)
return self._finished.issuperset(
filter(is_external_needrun_dep, self.dependencies[job])
)
def update_checkpoint_dependencies(self, jobs=None):
"""Update dependencies of checkpoints."""
updated = False
self.update_checkpoint_outputs()
if jobs is None:
jobs = [job for job in self.jobs if not self.needrun(job)]
for job in jobs:
if job.is_checkpoint:
depending = list(self.depending[job])
# re-evaluate depending jobs, replace and update DAG
for j in depending:
logger.info("Updating job {} ({}).".format(self.jobid(j), j))
newjob = j.updated()
self.replace_job(j, newjob, recursive=False)
updated = True
if updated:
# This has to be done for each checkpoint,
# otherwise, jobs may be missing in the end.
self.postprocess()
return updated
def finish(self, job, update_dynamic=True):
"""Finish a given job (e.g. remove from ready jobs, mark depending jobs
as ready)."""
try:
self._ready_jobs.remove(job)
except KeyError:
pass
if job.is_group():
jobs = job
else:
jobs = [job]
self._finished.update(jobs)
updated_dag = False
if update_dynamic:
updated_dag = self.update_checkpoint_dependencies(jobs)
# mark depending jobs as ready
# skip jobs that are marked as until jobs
self.update_ready(
j
for job in jobs
for j in self.depending[job]
if not self.in_until(job) and self.needrun(j)
)
for job in jobs:
if update_dynamic and job.dynamic_output:
logger.info("Dynamically updating jobs")
newjob = self.update_dynamic(job)
if newjob:
# simulate that this job ran and was finished before
self.omitforce.add(newjob)
self._needrun.add(newjob)
self._finished.add(newjob)
updated_dag = True
self.postprocess()
self.handle_protected(newjob)
self.handle_touch(newjob)
if updated_dag:
# We might have new jobs, so we need to ensure that all conda envs
# and singularity images are set up.
if self.workflow.use_singularity:
self.pull_container_imgs()
if self.workflow.use_conda:
self.create_conda_envs()
def new_job(self, rule, targetfile=None, format_wildcards=None):
"""Create new job for given rule and (optional) targetfile.
This will reuse existing jobs with the same wildcards."""
key = (rule, targetfile)
if key in self.job_cache:
assert targetfile is not None
return self.job_cache[key]
wildcards_dict = rule.get_wildcards(targetfile)
job = self.job_factory.new(
rule,
self,
wildcards_dict=wildcards_dict,
format_wildcards=format_wildcards,
targetfile=targetfile,
)
self.cache_job(job)
return job
def cache_job(self, job):
for f in job.products:
self.job_cache[(job.rule, f)] = job
def update_dynamic(self, job):
"""Update the DAG by evaluating the output of the given job that
contains dynamic output files."""
dynamic_wildcards = job.dynamic_wildcards
if not dynamic_wildcards:
# this happens e.g. in dryrun if output is not yet present
return
depending = list(
filter(lambda job_: not self.finished(job_), self.bfs(self.depending, job))
)
newrule, non_dynamic_wildcards = job.rule.dynamic_branch(
dynamic_wildcards, input=False
)
self.specialize_rule(job.rule, newrule)
# no targetfile needed for job
newjob = self.new_job(newrule, format_wildcards=non_dynamic_wildcards)
self.replace_job(job, newjob)
for job_ in depending:
needs_update = any(
f.get_wildcard_names() & dynamic_wildcards.keys()
for f in job_.rule.dynamic_input
)
if needs_update:
newrule_ = job_.rule.dynamic_branch(dynamic_wildcards)
if newrule_ is not None:
self.specialize_rule(job_.rule, newrule_)
if not self.dynamic(job_):
logger.debug("Updating job {}.".format(job_))
newjob_ = self.new_job(
newrule_, targetfile=job_.output[0] if job_.output else None
)
unexpected_output = self.reason(
job_
).missing_output.intersection(newjob.existing_output)
if unexpected_output:
logger.warning(
"Warning: the following output files of rule {} were not "
"present when the DAG was created:\n{}".format(
newjob_.rule, unexpected_output
)
)
self.replace_job(job_, newjob_)
return newjob
def delete_job(self, job, recursive=True, add_dependencies=False):
"""Delete given job from DAG."""
if job in self.targetjobs:
self.targetjobs.remove(job)
if add_dependencies:
for _job in self.dependencies[job]:
self.targetjobs.add(_job)
for job_ in self.depending[job]:
del self.dependencies[job_][job]
del self.depending[job]
for job_ in self.dependencies[job]:
depending = self.depending[job_]
del depending[job]
if not depending and recursive:
self.delete_job(job_)
del self.dependencies[job]
if job in self._needrun:
self._len -= 1
self._needrun.remove(job)
del self._reason[job]
if job in self._finished:
self._finished.remove(job)
if job in self._dynamic:
self._dynamic.remove(job)
if job in self._ready_jobs:
self._ready_jobs.remove(job)
# remove from cache
for f in job.output:
try:
del self.job_cache[(job.rule, f)]
except KeyError:
pass
def replace_job(self, job, newjob, recursive=True):
"""Replace given job with new job."""
add_to_targetjobs = job in self.targetjobs
depending = list(self.depending[job].items())
if self.finished(job):
self._finished.add(newjob)
self.delete_job(job, recursive=recursive)
if add_to_targetjobs:
self.targetjobs.add(newjob)
self.cache_job(newjob)
self.update([newjob])
logger.debug("Replace {} with dynamic branch {}".format(job, newjob))
for job_, files in depending:
# if not job_.dynamic_input:
logger.debug("updating depending job {}".format(job_))
self.dependencies[job_][newjob].update(files)
self.depending[newjob][job_].update(files)
def specialize_rule(self, rule, newrule):
"""Specialize the given rule by inserting newrule into the DAG."""
assert newrule is not None
self.rules.add(newrule)
self.update_output_index()
def is_batch_rule(self, rule):
"""Return True if the underlying rule is to be used for batching the DAG."""
return self.batch is not None and rule.name == self.batch.rulename
def collect_potential_dependencies(self, job):
"""Collect all potential dependencies of a job. These might contain
ambiguities. The keys of the returned dict represent the files to be considered."""
dependencies = defaultdict(list)
# use a set to circumvent multiple jobs for the same file
# if user specified it twice
file2jobs = self.file2jobs
input_files = list(job.unique_input)
if self.is_batch_rule(job.rule):
# only consider the defined partition of the input files
input_batch = self.batch.get_batch(input_files)
if len(input_batch) != len(input_files):
logger.info(
"Considering only batch {} for DAG computation.\n"
"All jobs beyond the batching rule are omitted until the final batch.\n"
"Don't forget to run the other batches too.".format(self.batch)
)
input_files = input_batch
for file in input_files:
# omit the file if it comes from a subworkflow
if file in job.subworkflow_input:
continue
try:
if file in job.dependencies:
jobs = [self.new_job(job.dependencies[file], targetfile=file)]
else:
jobs = file2jobs(file)
dependencies[file].extend(jobs)
except MissingRuleException as ex:
# no dependency found
dependencies[file] = []
return dependencies
def bfs(self, direction, *jobs, stop=lambda job: False):
"""Perform a breadth-first traversal of the DAG."""
queue = list(jobs)
visited = set(queue)
while queue:
job = queue.pop(0)
if stop(job):
# stop criterion reached for this node
continue
yield job
for job_, _ in direction[job].items():
if not job_ in visited:
queue.append(job_)
visited.add(job_)
def level_bfs(self, direction, *jobs, stop=lambda job: False):
"""Perform a breadth-first traversal of the DAG, but also yield the
level together with each job."""
queue = [(job, 0) for job in jobs]
visited = set(jobs)
while queue:
job, level = queue.pop(0)
if stop(job):
# stop criterion reached for this node
continue
yield level, job
level += 1
for job_, _ in direction[job].items():
if not job_ in visited:
queue.append((job_, level))
visited.add(job_)
def dfs(self, direction, *jobs, stop=lambda job: False, post=True):
"""Perform depth-first traversal of the DAG."""
visited = set()
def _dfs(job):
"""Inner function for DFS traversal."""
if stop(job):
return
if not post:
yield job
for job_ in direction[job]:
if not job_ in visited:
visited.add(job_)
for j in _dfs(job_):
yield j
if post:
yield job
for job in jobs:
for job_ in self._dfs(direction, job, visited, stop=stop, post=post):
yield job_
def new_wildcards(self, job):
"""Return wildcards that are newly introduced in this job,
compared to its ancestors."""
new_wildcards = set(job.wildcards.items())
for job_ in self.dependencies[job]:
if not new_wildcards:
return set()
for wildcard in job_.wildcards.items():
new_wildcards.discard(wildcard)
return new_wildcards
def rule2job(self, targetrule):
"""Generate a new job from a given rule."""
if targetrule.has_wildcards():
raise WorkflowError(
"Target rules may not contain wildcards. Please specify concrete files or a rule without wildcards."
)
return self.new_job(targetrule)
def file2jobs(self, targetfile):
rules = self.output_index.match(targetfile)
jobs = []
exceptions = list()
for rule in rules:
if rule.is_producer(targetfile):
try:
jobs.append(self.new_job(rule, targetfile=targetfile))
except InputFunctionException as e:
exceptions.append(e)
if not jobs:
if exceptions:
raise exceptions[0]
raise MissingRuleException(targetfile)
return jobs
def rule_dot2(self):
dag = defaultdict(list)
visited = set()
preselect = set()
def preselect_parents(job):
for parent in self.depending[job]:
if parent in preselect:
continue
preselect.add(parent)
preselect_parents(parent)
def build_ruledag(job, key=lambda job: job.rule.name):
if job in visited:
return
visited.add(job)
deps = sorted(self.dependencies[job], key=key)
deps = [
(
group[0]
if preselect.isdisjoint(group)
else preselect.intersection(group).pop()
)
for group in (list(g) for _, g in groupby(deps, key))
]
dag[job].extend(deps)
preselect_parents(job)
for dep in deps:
build_ruledag(dep)
for job in self.targetjobs:
build_ruledag(job)
return self._dot(dag.keys(), print_wildcards=False, print_types=False, dag=dag)
def rule_dot(self):
graph = defaultdict(set)
for job in self.jobs:
graph[job.rule].update(dep.rule for dep in self.dependencies[job])
return self._dot(graph)
def dot(self):
def node2style(job):
if not self.needrun(job):
return "rounded,dashed"
if self.dynamic(job) or job.dynamic_input:
return "rounded,dotted"
return "rounded"
def format_wildcard(wildcard):
name, value = wildcard
if DYNAMIC_FILL in value:
value = "..."
return "{}: {}".format(name, value)
node2rule = lambda job: job.rule
node2label = lambda job: "\\n".join(
chain(
[job.rule.name], sorted(map(format_wildcard, self.new_wildcards(job)))
)
)
dag = {job: self.dependencies[job] for job in self.jobs}
return self._dot(
dag, node2rule=node2rule, node2style=node2style, node2label=node2label
)
def _dot(
self,
graph,
node2rule=lambda node: node,
node2style=lambda node: "rounded",
node2label=lambda node: node,
):
# color rules
huefactor = 2 / (3 * len(self.rules))
rulecolor = {
rule: "{:.2f} 0.6 0.85".format(i * huefactor)
for i, rule in enumerate(self.rules)
}
# markup
node_markup = '\t{}[label = "{}", color = "{}", style="{}"];'.format
edge_markup = "\t{} -> {}".format
# node ids
ids = {node: i for i, node in enumerate(graph)}
# calculate nodes
nodes = [
node_markup(
ids[node],
node2label(node),
rulecolor[node2rule(node)],
node2style(node),
)
for node in graph
]
# calculate edges
edges = [
edge_markup(ids[dep], ids[node])
for node, deps in graph.items()
for dep in deps
]
return textwrap.dedent(
"""\
digraph snakemake_dag {{
graph[bgcolor=white, margin=0];
node[shape=box, style=rounded, fontname=sans, \
fontsize=10, penwidth=2];
edge[penwidth=2, color=grey];
{items}
}}\
"""
).format(items="\n".join(nodes + edges))
def filegraph_dot(
self,
node2rule=lambda node: node,
node2style=lambda node: "rounded",
node2label=lambda node: node,
):
# NOTE: This is code from the rule_dot method.
# This method could be split like there as well, however,
# it cannot easily reuse the _dot method due to the different node type
graph = defaultdict(set)
for job in self.jobs:
graph[job.rule].update(dep.rule for dep in self.dependencies[job])
# node ids
ids = {node: i for i, node in enumerate(graph)}
# Compute colors for rules
def hsv_to_htmlhexrgb(h, s, v):
"""Convert hsv colors to hex-encoded rgb colors usable by html."""
import colorsys
hex_r, hex_g, hex_b = (round(255 * x) for x in colorsys.hsv_to_rgb(h, s, v))
return "#{hex_r:0>2X}{hex_g:0>2X}{hex_b:0>2X}".format(
hex_r=hex_r, hex_g=hex_g, hex_b=hex_b
)
huefactor = 2 / (3 * len(self.rules))
rulecolor = {
rule: hsv_to_htmlhexrgb(i * huefactor, 0.6, 0.85)
for i, rule in enumerate(self.rules)
}
def resolve_input_functions(input_files):
"""Iterate over all input files and replace input functions
with a fixed string.
"""
files = []
for f in input_files:
if callable(f):
files.append("<input function>")
# NOTE: This is a workaround. It would be more informative
# to show the code of the input function here (if it is
# short enough). This cannot be easily done with the inspect
# module, since the line numbers in the Snakefile do not
# behave as expected. One (complicated) solution for this
# would be to find the Snakefile and directly extract the
# code of the function.
else:
files.append(repr(f).strip("'"))
return files
def html_node(node_id, node, color):
"""Assemble a html style node for graphviz"""
input_files = resolve_input_functions(node._input)
output_files = [repr(f).strip("'") for f in node._output]
input_header = (
'<b><font point-size="14">↪ input</font></b>'
if input_files
else ""
)
output_header = (
'<b><font point-size="14">output →</font></b>'
if output_files
else ""
)
html_node = [
'{node_id} [ shape=none, margin=0, label=<<table border="2" color="{color}" cellspacing="3" cellborder="0">'.format(
node_id=node_id, color=color
),
"<tr><td>",
'<b><font point-size="18">{node.name}</font></b>'.format(node=node),
"</td></tr>",
"<hr/>",
'<tr><td align="left"> {input_header} </td></tr>'.format(
input_header=input_header
),
]
for filename in sorted(input_files):
# Escape html relevant chars like '<' and '>' in filenames
# These can be added by input functions etc. and cannot be
# displayed in graphviz HTML nodes.
in_file = html.escape(filename)
html_node.extend(
[
"<tr>",
'<td align="left"><font face="monospace">{in_file}</font></td>'.format(
in_file=in_file
),
"</tr>",
]
)
html_node.append("<hr/>")
html_node.append(
'<tr><td align="right"> {output_header} </td> </tr>'.format(
output_header=output_header
)
)
for filename in sorted(output_files):
out_file = html.escape(filename)
html_node.extend(
[
"<tr>",
'<td align="left"><font face="monospace">{out_file}</font></td>'
"</tr>".format(out_file=out_file),
]
)
html_node.append("</table>>]")
return "\n".join(html_node)
nodes = [
html_node(ids[node], node, rulecolor[node2rule(node)]) for node in graph
]
# calculate edges
edge_markup = "\t{} -> {}".format
edges = [
edge_markup(ids[dep], ids[node], ids[dep], ids[node])
for node, deps in graph.items()
for dep in deps
]
return textwrap.dedent(
"""\
digraph snakemake_dag {{
graph[bgcolor=white, margin=0];
node[shape=box, style=rounded, fontname=sans, \
fontsize=10, penwidth=2];
edge[penwidth=2, color=grey];
{items}
}}\
"""
).format(items="\n".join(nodes + edges))
def summary(self, detailed=False):
if detailed:
yield "output_file\tdate\trule\tversion\tlog-file(s)\tinput-file(s)\tshellcmd\tstatus\tplan"
else:
yield "output_file\tdate\trule\tversion\tlog-file(s)\tstatus\tplan"
for job in self.jobs:
output = job.rule.output if self.dynamic(job) else job.expanded_output
for f in output:
rule = self.workflow.persistence.rule(f)
rule = "-" if rule is None else rule
version = self.workflow.persistence.version(f)
version = "-" if version is None else str(version)
date = time.ctime(f.mtime) if f.exists else "-"
pending = "update pending" if self.reason(job) else "no update"
log = self.workflow.persistence.log(f)
log = "-" if log is None else ",".join(log)
input = self.workflow.persistence.input(f)
input = "-" if input is None else ",".join(input)
shellcmd = self.workflow.persistence.shellcmd(f)
shellcmd = "-" if shellcmd is None else shellcmd
# remove new line characters, leading and trailing whitespace
shellcmd = shellcmd.strip().replace("\n", "; ")
status = "ok"
if not f.exists:
status = "missing"
elif self.reason(job).updated_input:
status = "updated input files"
elif self.workflow.persistence.version_changed(job, file=f):
status = "version changed to {}".format(job.rule.version)
elif self.workflow.persistence.code_changed(job, file=f):
status = "rule implementation changed"
elif self.workflow.persistence.input_changed(job, file=f):
status = "set of input files changed"
elif self.workflow.persistence.params_changed(job, file=f):
status = "params changed"
if detailed:
yield "\t".join(
(f, date, rule, version, log, input, shellcmd, status, pending)
)
else:
yield "\t".join((f, date, rule, version, log, status, pending))
def archive(self, path):
"""Archives workflow such that it can be re-run on a different system.
Archiving includes git versioned files (i.e. Snakefiles, config files, ...),
ancestral input files and conda environments.
"""
if path.endswith(".tar"):
mode = "x"
elif path.endswith("tar.bz2"):
mode = "x:bz2"
elif path.endswith("tar.xz"):
mode = "x:xz"
elif path.endswith("tar.gz"):
mode = "x:gz"
else:
raise WorkflowError(
"Unsupported archive format "
"(supported: .tar, .tar.gz, .tar.bz2, .tar.xz)"
)
if os.path.exists(path):
raise WorkflowError("Archive already exists:\n" + path)
self.create_conda_envs(forceall=True)
try:
workdir = Path(os.path.abspath(os.getcwd()))
with tarfile.open(path, mode=mode, dereference=True) as archive:
archived = set()
def add(path):
if workdir not in Path(os.path.abspath(path)).parents:
logger.warning(
"Path {} cannot be archived: "
"not within working directory.".format(path)
)
else:
f = os.path.relpath(path)
if f not in archived:
archive.add(f)
archived.add(f)
logger.info("archived " + f)
logger.info(
"Archiving snakefiles, scripts and files under "
"version control..."
)
for f in self.workflow.get_sources():
add(f)
logger.info("Archiving external input files...")
for job in self.jobs:
# input files
for f in job.input:
if not any(
f in files for files in self.dependencies[job].values()
):
# this is an input file that is not created by any job
add(f)
logger.info("Archiving conda environments...")
envs = set()
for job in self.jobs:
if job.conda_env_file:
env_archive = job.archive_conda_env()
envs.add(env_archive)
for env in envs:
add(env)
except (Exception, BaseException) as e:
os.remove(path)
raise e
def clean(self, only_temp=False, dryrun=False):
"""Removes files generated by the workflow."""
for job in self.jobs:
for f in job.output:
if not only_temp or is_flagged(f, "temp"):
# The reason for the second check is that dangling
# symlinks fail f.exists.
if f.exists or os.path.islink(f):
if f.protected:
logger.error("Skipping write-protected file {}.".format(f))
else:
msg = "Deleting {}" if not dryrun else "Would delete {}"
logger.info(msg.format(f))
if not dryrun:
# Remove non-empty dirs if flagged as temp()
f.remove(remove_non_empty_dir=only_temp)
def list_untracked(self):
"""List files in the workdir that are not in the dag."""
used_files = set()
files_in_cwd = set()
for job in self.jobs:
used_files.update(
os.path.relpath(file)
for file in chain(job.local_input, job.local_output, job.log)
)
for root, dirs, files in os.walk(os.getcwd()):
# Ignore hidden files and don't traverse into hidden dirs
files_in_cwd.update(
[
os.path.relpath(os.path.join(root, f))
for f in files
if not f[0] == "."
]
)
dirs[:] = [d for d in dirs if not d[0] == "."]
for f in sorted(list(files_in_cwd - used_files)):
logger.info(f)
def d3dag(self, max_jobs=10000):
def node(job):
jobid = self.jobid(job)
return {
"id": jobid,
"value": {
"jobid": jobid,
"label": job.rule.name,
"rule": job.rule.name,
},
}
def edge(a, b):
return {"u": self.jobid(a), "v": self.jobid(b)}
jobs = list(self.jobs)
if len(jobs) > max_jobs:
logger.info(
"Job-DAG is too large for visualization (>{} jobs).".format(max_jobs)
)
else:
logger.d3dag(
nodes=[node(job) for job in jobs],
edges=[
edge(dep, job)
for job in jobs
for dep in self.dependencies[job]
if self.needrun(dep)
],
)
def stats(self):
rules = Counter()
rules.update(job.rule for job in self.needrun_jobs)
rules.update(job.rule for job in self.finished_jobs)
yield "Job counts:"
yield "\tcount\tjobs"
for rule, count in sorted(rules.most_common(), key=lambda item: item[0].name):
yield "\t{}\t{}".format(count, rule)
yield "\t{}".format(len(self))
def __str__(self):
return self.dot()
def __len__(self):
return self._len
|
_min_norm_2d
|
Find the minimum norm solution as combination of two points
This is correct only in 2D
ie. min_c |\sum c_i x_i|_2^2 st. \sum c_i = 1 , 1 >= c_1 >= 0
for all i, c_i + c_j = 1.0 for some i, j
|
# Credits to Ozan Sener
# https://github.com/intel-isl/MultiObjectiveOptimization
import numpy as np
import torch
class MGDASolver:
MAX_ITER = 250
STOP_CRIT = 1e-5
@staticmethod
def _min_norm_element_from2(v1v1, v1v2, v2v2):
"""
Analytical solution for min_{c} |cx_1 + (1-c)x_2|_2^2
d is the distance (objective) optimzed
v1v1 = <x1,x1>
v1v2 = <x1,x2>
v2v2 = <x2,x2>
"""
if v1v2 >= v1v1:
# Case: Fig 1, third column
gamma = 0.999
cost = v1v1
return gamma, cost
if v1v2 >= v2v2:
# Case: Fig 1, first column
gamma = 0.001
cost = v2v2
return gamma, cost
# Case: Fig 1, second column
gamma = -1.0 * ((v1v2 - v2v2) / (v1v1 + v2v2 - 2 * v1v2))
cost = v2v2 + gamma * (v1v2 - v2v2)
return gamma, cost
# MASKED: _min_norm_2d function (lines 36-70)
@staticmethod
def _projection2simplex(y):
"""
Given y, it solves argmin_z |y-z|_2 st \sum z = 1 , 1 >= z_i >= 0 for all i
"""
m = len(y)
sorted_y = np.flip(np.sort(y), axis=0)
tmpsum = 0.0
tmax_f = (np.sum(y) - 1.0) / m
for i in range(m - 1):
tmpsum += sorted_y[i]
tmax = (tmpsum - 1) / (i + 1.0)
if tmax > sorted_y[i + 1]:
tmax_f = tmax
break
return np.maximum(y - tmax_f, np.zeros(y.shape))
@staticmethod
def _next_point(cur_val, grad, n):
proj_grad = grad - (np.sum(grad) / n)
tm1 = -1.0 * cur_val[proj_grad < 0] / proj_grad[proj_grad < 0]
tm2 = (1.0 - cur_val[proj_grad > 0]) / (proj_grad[proj_grad > 0])
skippers = np.sum(tm1 < 1e-7) + np.sum(tm2 < 1e-7)
t = 1
if len(tm1[tm1 > 1e-7]) > 0:
t = np.min(tm1[tm1 > 1e-7])
if len(tm2[tm2 > 1e-7]) > 0:
t = min(t, np.min(tm2[tm2 > 1e-7]))
next_point = proj_grad * t + cur_val
next_point = MGDASolver._projection2simplex(next_point)
return next_point
@staticmethod
def find_min_norm_element(vecs: list):
"""
Given a list of vectors (vecs), this method finds the minimum norm
element in the convex hull as min |u|_2 st. u = \sum c_i vecs[i]
and \sum c_i = 1. It is quite geometric, and the main idea is the
fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution
lies in (0, d_{i,j})Hence, we find the best 2-task solution , and
then run the projected gradient descent until convergence
"""
# Solution lying at the combination of two points
dps = {}
init_sol, dps = MGDASolver._min_norm_2d(vecs, dps)
n = len(vecs)
sol_vec = np.zeros(n)
sol_vec[init_sol[0][0]] = init_sol[1]
sol_vec[init_sol[0][1]] = 1 - init_sol[1]
if n < 3:
# This is optimal for n=2, so return the solution
return sol_vec, init_sol[2]
iter_count = 0
grad_mat = np.zeros((n, n))
for i in range(n):
for j in range(n):
grad_mat[i, j] = dps[(i, j)]
while iter_count < MGDASolver.MAX_ITER:
grad_dir = -1.0 * np.dot(grad_mat, sol_vec)
new_point = MGDASolver._next_point(sol_vec, grad_dir, n)
# Re-compute the inner products for line search
v1v1 = 0.0
v1v2 = 0.0
v2v2 = 0.0
for i in range(n):
for j in range(n):
v1v1 += sol_vec[i] * sol_vec[j] * dps[(i, j)]
v1v2 += sol_vec[i] * new_point[j] * dps[(i, j)]
v2v2 += new_point[i] * new_point[j] * dps[(i, j)]
nc, nd = MGDASolver._min_norm_element_from2(v1v1.item(),
v1v2.item(),
v2v2.item())
# try:
new_sol_vec = nc * sol_vec + (1 - nc) * new_point
# except AttributeError:
# print(sol_vec)
change = new_sol_vec - sol_vec
if np.sum(np.abs(change)) < MGDASolver.STOP_CRIT:
return sol_vec, nd
sol_vec = new_sol_vec
@staticmethod
def find_min_norm_element_FW(vecs):
"""
Given a list of vectors (vecs), this method finds the minimum norm
element in the convex hull
as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1.
It is quite geometric, and the main idea is the fact that if
d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies
in (0, d_{i,j})Hence, we find the best 2-task solution, and then
run the Frank Wolfe until convergence
"""
# Solution lying at the combination of two points
dps = {}
init_sol, dps = MGDASolver._min_norm_2d(vecs, dps)
n = len(vecs)
sol_vec = np.zeros(n)
sol_vec[init_sol[0][0]] = init_sol[1]
sol_vec[init_sol[0][1]] = 1 - init_sol[1]
if n < 3:
# This is optimal for n=2, so return the solution
return sol_vec, init_sol[2]
iter_count = 0
grad_mat = np.zeros((n, n))
for i in range(n):
for j in range(n):
grad_mat[i, j] = dps[(i, j)]
while iter_count < MGDASolver.MAX_ITER:
t_iter = np.argmin(np.dot(grad_mat, sol_vec))
v1v1 = np.dot(sol_vec, np.dot(grad_mat, sol_vec))
v1v2 = np.dot(sol_vec, grad_mat[:, t_iter])
v2v2 = grad_mat[t_iter, t_iter]
nc, nd = MGDASolver._min_norm_element_from2(v1v1, v1v2, v2v2)
new_sol_vec = nc * sol_vec
new_sol_vec[t_iter] += 1 - nc
change = new_sol_vec - sol_vec
if np.sum(np.abs(change)) < MGDASolver.STOP_CRIT:
return sol_vec, nd
sol_vec = new_sol_vec
@classmethod
def get_scales(cls, grads, losses, normalization_type, tasks):
scale = {}
gn = gradient_normalizers(grads, losses, normalization_type)
for t in tasks:
for gr_i in range(len(grads[t])):
grads[t][gr_i] = grads[t][gr_i] / (gn[t] + 1e-5)
sol, min_norm = cls.find_min_norm_element([grads[t] for t in tasks])
for zi, t in enumerate(tasks):
scale[t] = float(sol[zi])
return scale
def gradient_normalizers(grads, losses, normalization_type):
gn = {}
if normalization_type == 'l2':
for t in grads:
gn[t] = torch.sqrt(
torch.stack([gr.pow(2).sum().data for gr in grads[t]]).sum())
elif normalization_type == 'loss':
for t in grads:
gn[t] = min(losses[t].mean(), 10.0)
elif normalization_type == 'loss+':
for t in grads:
gn[t] = min(losses[t].mean() * torch.sqrt(
torch.stack([gr.pow(2).sum().data for gr in grads[t]]).sum()),
10)
elif normalization_type == 'none' or normalization_type == 'eq':
for t in grads:
gn[t] = 1.0
else:
raise ValueError('ERROR: Invalid Normalization Type')
return gn
|
@staticmethod
def _min_norm_2d(vecs: list, dps):
"""
Find the minimum norm solution as combination of two points
This is correct only in 2D
ie. min_c |\sum c_i x_i|_2^2 st. \sum c_i = 1 , 1 >= c_1 >= 0
for all i, c_i + c_j = 1.0 for some i, j
"""
dmin = 1e8
sol = 0
for i in range(len(vecs)):
for j in range(i + 1, len(vecs)):
if (i, j) not in dps:
dps[(i, j)] = 0.0
for k in range(len(vecs[i])):
dps[(i, j)] += torch.dot(vecs[i][k].view(-1),
vecs[j][k].view(-1)).detach()
dps[(j, i)] = dps[(i, j)]
if (i, i) not in dps:
dps[(i, i)] = 0.0
for k in range(len(vecs[i])):
dps[(i, i)] += torch.dot(vecs[i][k].view(-1),
vecs[i][k].view(-1)).detach()
if (j, j) not in dps:
dps[(j, j)] = 0.0
for k in range(len(vecs[i])):
dps[(j, j)] += torch.dot(vecs[j][k].view(-1),
vecs[j][k].view(-1)).detach()
c, d = MGDASolver._min_norm_element_from2(dps[(i, i)],
dps[(i, j)],
dps[(j, j)])
if d < dmin:
dmin = d
sol = [(i, j), c, d]
return sol, dps
| 36 | 70 |
# Credits to Ozan Sener
# https://github.com/intel-isl/MultiObjectiveOptimization
import numpy as np
import torch
class MGDASolver:
MAX_ITER = 250
STOP_CRIT = 1e-5
@staticmethod
def _min_norm_element_from2(v1v1, v1v2, v2v2):
"""
Analytical solution for min_{c} |cx_1 + (1-c)x_2|_2^2
d is the distance (objective) optimzed
v1v1 = <x1,x1>
v1v2 = <x1,x2>
v2v2 = <x2,x2>
"""
if v1v2 >= v1v1:
# Case: Fig 1, third column
gamma = 0.999
cost = v1v1
return gamma, cost
if v1v2 >= v2v2:
# Case: Fig 1, first column
gamma = 0.001
cost = v2v2
return gamma, cost
# Case: Fig 1, second column
gamma = -1.0 * ((v1v2 - v2v2) / (v1v1 + v2v2 - 2 * v1v2))
cost = v2v2 + gamma * (v1v2 - v2v2)
return gamma, cost
@staticmethod
def _min_norm_2d(vecs: list, dps):
"""
Find the minimum norm solution as combination of two points
This is correct only in 2D
ie. min_c |\sum c_i x_i|_2^2 st. \sum c_i = 1 , 1 >= c_1 >= 0
for all i, c_i + c_j = 1.0 for some i, j
"""
dmin = 1e8
sol = 0
for i in range(len(vecs)):
for j in range(i + 1, len(vecs)):
if (i, j) not in dps:
dps[(i, j)] = 0.0
for k in range(len(vecs[i])):
dps[(i, j)] += torch.dot(vecs[i][k].view(-1),
vecs[j][k].view(-1)).detach()
dps[(j, i)] = dps[(i, j)]
if (i, i) not in dps:
dps[(i, i)] = 0.0
for k in range(len(vecs[i])):
dps[(i, i)] += torch.dot(vecs[i][k].view(-1),
vecs[i][k].view(-1)).detach()
if (j, j) not in dps:
dps[(j, j)] = 0.0
for k in range(len(vecs[i])):
dps[(j, j)] += torch.dot(vecs[j][k].view(-1),
vecs[j][k].view(-1)).detach()
c, d = MGDASolver._min_norm_element_from2(dps[(i, i)],
dps[(i, j)],
dps[(j, j)])
if d < dmin:
dmin = d
sol = [(i, j), c, d]
return sol, dps
@staticmethod
def _projection2simplex(y):
"""
Given y, it solves argmin_z |y-z|_2 st \sum z = 1 , 1 >= z_i >= 0 for all i
"""
m = len(y)
sorted_y = np.flip(np.sort(y), axis=0)
tmpsum = 0.0
tmax_f = (np.sum(y) - 1.0) / m
for i in range(m - 1):
tmpsum += sorted_y[i]
tmax = (tmpsum - 1) / (i + 1.0)
if tmax > sorted_y[i + 1]:
tmax_f = tmax
break
return np.maximum(y - tmax_f, np.zeros(y.shape))
@staticmethod
def _next_point(cur_val, grad, n):
proj_grad = grad - (np.sum(grad) / n)
tm1 = -1.0 * cur_val[proj_grad < 0] / proj_grad[proj_grad < 0]
tm2 = (1.0 - cur_val[proj_grad > 0]) / (proj_grad[proj_grad > 0])
skippers = np.sum(tm1 < 1e-7) + np.sum(tm2 < 1e-7)
t = 1
if len(tm1[tm1 > 1e-7]) > 0:
t = np.min(tm1[tm1 > 1e-7])
if len(tm2[tm2 > 1e-7]) > 0:
t = min(t, np.min(tm2[tm2 > 1e-7]))
next_point = proj_grad * t + cur_val
next_point = MGDASolver._projection2simplex(next_point)
return next_point
@staticmethod
def find_min_norm_element(vecs: list):
"""
Given a list of vectors (vecs), this method finds the minimum norm
element in the convex hull as min |u|_2 st. u = \sum c_i vecs[i]
and \sum c_i = 1. It is quite geometric, and the main idea is the
fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution
lies in (0, d_{i,j})Hence, we find the best 2-task solution , and
then run the projected gradient descent until convergence
"""
# Solution lying at the combination of two points
dps = {}
init_sol, dps = MGDASolver._min_norm_2d(vecs, dps)
n = len(vecs)
sol_vec = np.zeros(n)
sol_vec[init_sol[0][0]] = init_sol[1]
sol_vec[init_sol[0][1]] = 1 - init_sol[1]
if n < 3:
# This is optimal for n=2, so return the solution
return sol_vec, init_sol[2]
iter_count = 0
grad_mat = np.zeros((n, n))
for i in range(n):
for j in range(n):
grad_mat[i, j] = dps[(i, j)]
while iter_count < MGDASolver.MAX_ITER:
grad_dir = -1.0 * np.dot(grad_mat, sol_vec)
new_point = MGDASolver._next_point(sol_vec, grad_dir, n)
# Re-compute the inner products for line search
v1v1 = 0.0
v1v2 = 0.0
v2v2 = 0.0
for i in range(n):
for j in range(n):
v1v1 += sol_vec[i] * sol_vec[j] * dps[(i, j)]
v1v2 += sol_vec[i] * new_point[j] * dps[(i, j)]
v2v2 += new_point[i] * new_point[j] * dps[(i, j)]
nc, nd = MGDASolver._min_norm_element_from2(v1v1.item(),
v1v2.item(),
v2v2.item())
# try:
new_sol_vec = nc * sol_vec + (1 - nc) * new_point
# except AttributeError:
# print(sol_vec)
change = new_sol_vec - sol_vec
if np.sum(np.abs(change)) < MGDASolver.STOP_CRIT:
return sol_vec, nd
sol_vec = new_sol_vec
@staticmethod
def find_min_norm_element_FW(vecs):
"""
Given a list of vectors (vecs), this method finds the minimum norm
element in the convex hull
as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1.
It is quite geometric, and the main idea is the fact that if
d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies
in (0, d_{i,j})Hence, we find the best 2-task solution, and then
run the Frank Wolfe until convergence
"""
# Solution lying at the combination of two points
dps = {}
init_sol, dps = MGDASolver._min_norm_2d(vecs, dps)
n = len(vecs)
sol_vec = np.zeros(n)
sol_vec[init_sol[0][0]] = init_sol[1]
sol_vec[init_sol[0][1]] = 1 - init_sol[1]
if n < 3:
# This is optimal for n=2, so return the solution
return sol_vec, init_sol[2]
iter_count = 0
grad_mat = np.zeros((n, n))
for i in range(n):
for j in range(n):
grad_mat[i, j] = dps[(i, j)]
while iter_count < MGDASolver.MAX_ITER:
t_iter = np.argmin(np.dot(grad_mat, sol_vec))
v1v1 = np.dot(sol_vec, np.dot(grad_mat, sol_vec))
v1v2 = np.dot(sol_vec, grad_mat[:, t_iter])
v2v2 = grad_mat[t_iter, t_iter]
nc, nd = MGDASolver._min_norm_element_from2(v1v1, v1v2, v2v2)
new_sol_vec = nc * sol_vec
new_sol_vec[t_iter] += 1 - nc
change = new_sol_vec - sol_vec
if np.sum(np.abs(change)) < MGDASolver.STOP_CRIT:
return sol_vec, nd
sol_vec = new_sol_vec
@classmethod
def get_scales(cls, grads, losses, normalization_type, tasks):
scale = {}
gn = gradient_normalizers(grads, losses, normalization_type)
for t in tasks:
for gr_i in range(len(grads[t])):
grads[t][gr_i] = grads[t][gr_i] / (gn[t] + 1e-5)
sol, min_norm = cls.find_min_norm_element([grads[t] for t in tasks])
for zi, t in enumerate(tasks):
scale[t] = float(sol[zi])
return scale
def gradient_normalizers(grads, losses, normalization_type):
gn = {}
if normalization_type == 'l2':
for t in grads:
gn[t] = torch.sqrt(
torch.stack([gr.pow(2).sum().data for gr in grads[t]]).sum())
elif normalization_type == 'loss':
for t in grads:
gn[t] = min(losses[t].mean(), 10.0)
elif normalization_type == 'loss+':
for t in grads:
gn[t] = min(losses[t].mean() * torch.sqrt(
torch.stack([gr.pow(2).sum().data for gr in grads[t]]).sum()),
10)
elif normalization_type == 'none' or normalization_type == 'eq':
for t in grads:
gn[t] = 1.0
else:
raise ValueError('ERROR: Invalid Normalization Type')
return gn
|
refresh
|
Refresh materialized views.
First, this method finds the namespaces being replicated, by referring to the
config for schemas and tables.
Then it finds any materialized views in the namespaces.
Then it refreshes the materialized views.
|
"""This module contains logic for refreshing materialized views.
Materialized views don't get refreshed automatically after a bucardo initial
sync. This module detects them and refreshes them.
Classes exported:
MatViews: Identify materialized views and refresh them on the secondary database.
"""
import psycopg2
from psycopg2 import sql
from plugins import Plugin
class MatViews(Plugin):
"""Identify materialized views and refresh them on the secondary database.
Materialized views are identified based on the namespaces specified in the
config.
Methods exported:
refresh: find and refresh materialized views
"""
def __init__(self, cfg):
"""Create configuration settings that may not already be set.
The user can either define the relevant namespaces specifically for the
mat_views plugin, or the mat_views plugin can draw on the settings in the
bucardo section of the config. If neither exists, the script will throw an
error.
Keyword arguments:
cfg: contents of the config file as a dictionary
"""
super(MatViews, self).__init__(cfg)
# Override or inherit certain params from the parent, depending on the config.
self._set_inheritable_params('mat_views')
# MASKED: refresh function (lines 41-73)
|
def refresh(self):
"""Refresh materialized views.
First, this method finds the namespaces being replicated, by referring to the
config for schemas and tables.
Then it finds any materialized views in the namespaces.
Then it refreshes the materialized views.
"""
print('Finding materialized views.')
# 'm' is for "materialized view".
views = self._find_objects('m', self.repl_objects)
if views:
conn = psycopg2.connect(self.secondary_schema_owner_conn_pg_format)
for view in views:
print(f'Refreshing {view[0]}.{view[1]}')
query = sql.SQL('REFRESH MATERIALIZED VIEW {schema}.{table}').format(
schema=sql.Identifier(view[0]),
table=sql.Identifier(view[1])
)
try:
with conn.cursor() as cur:
cur.execute(query)
conn.commit()
except Exception:
conn.close()
raise
conn.close()
print('Done refreshing views.')
else:
print('No materialized views found.')
| 41 | 73 |
"""This module contains logic for refreshing materialized views.
Materialized views don't get refreshed automatically after a bucardo initial
sync. This module detects them and refreshes them.
Classes exported:
MatViews: Identify materialized views and refresh them on the secondary database.
"""
import psycopg2
from psycopg2 import sql
from plugins import Plugin
class MatViews(Plugin):
"""Identify materialized views and refresh them on the secondary database.
Materialized views are identified based on the namespaces specified in the
config.
Methods exported:
refresh: find and refresh materialized views
"""
def __init__(self, cfg):
"""Create configuration settings that may not already be set.
The user can either define the relevant namespaces specifically for the
mat_views plugin, or the mat_views plugin can draw on the settings in the
bucardo section of the config. If neither exists, the script will throw an
error.
Keyword arguments:
cfg: contents of the config file as a dictionary
"""
super(MatViews, self).__init__(cfg)
# Override or inherit certain params from the parent, depending on the config.
self._set_inheritable_params('mat_views')
def refresh(self):
"""Refresh materialized views.
First, this method finds the namespaces being replicated, by referring to the
config for schemas and tables.
Then it finds any materialized views in the namespaces.
Then it refreshes the materialized views.
"""
print('Finding materialized views.')
# 'm' is for "materialized view".
views = self._find_objects('m', self.repl_objects)
if views:
conn = psycopg2.connect(self.secondary_schema_owner_conn_pg_format)
for view in views:
print(f'Refreshing {view[0]}.{view[1]}')
query = sql.SQL('REFRESH MATERIALIZED VIEW {schema}.{table}').format(
schema=sql.Identifier(view[0]),
table=sql.Identifier(view[1])
)
try:
with conn.cursor() as cur:
cur.execute(query)
conn.commit()
except Exception:
conn.close()
raise
conn.close()
print('Done refreshing views.')
else:
print('No materialized views found.')
|
get_scattering_phase_function
|
Return the scattering phase function in function of wavelength for the
corresponding dust type (SMC or MW). The scattering phase
function gives the angle at which the photon scatters.
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
Returns
-------
g: np array (float)
g(x) scattering phase function
Raises
------
ValueError
Input x values outside of defined range
|
# -*- coding: utf-8 -*-
import numpy as np
import astropy.units as u
import pkg_resources
from astropy.io import ascii
from astropy.modeling.tabular import tabular_model
from .baseclasses import BaseAtttauVModel
from .helpers import _test_valid_x_range
__all__ = ["WG00"]
x_range_WG00 = [0.1, 3.0001]
class WG00(BaseAtttauVModel):
r"""
Attenuation curve of Witt & Gordon (2000)
Parameters
----------
tau_v: float
optical depth in V band
Raises
------
InputParameterError
Input Av values outside of defined range
Notes
-----
From Witt & Gordon (2000, ApJ, Volume 528, pp. 799-816)
Example:
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
from dust_attenuation.radiative_transfer import WG00
fig, ax = plt.subplots(1,2, figsize=(10,6))
# generate the curves and plot them
# Use 1/microns for a better sampling
x = np.arange(0.35,10.0,0.1)/u.micron
x_Vband = 0.55 # microns
tau_Vs = [0.25,0.4,1.1,17.0,46.0]
for tau_V in tau_Vs[::-1]:
att_model = WG00(tau_V = tau_V, geometry = 'cloudy',
dust_type = 'mw', dust_distribution = 'clumpy')
ax[0].plot(x,att_model(1/x),label=r'$\tau_V$ = %.2f mag' % (tau_V))
ax[1].plot(x,att_model(1/x)/att_model(x_Vband),
label=r'$\tau_V$ = %.2f mag' % (tau_V))
ax[0].set_xlabel(r'$x$ [$\mu m^{-1}$]')
ax[0].set_ylabel(r'$Att(x)$ [mag]')
ax[1].set_xlabel(r'$x$ [$\mu m^{-1}$]')
ax[1].set_ylabel(r'$Att(x)/Att_V$')
ax[0].legend(loc='best')
ax[1].legend(loc='best')
fig.suptitle(r'CLOUDY / MW / clumpy model',size=15)
plt.tight_layout()
fig.subplots_adjust(top=0.88)
plt.show()
"""
tau_V_range = [0.25, 50.0]
x_range = x_range_WG00
def __init__(
self, tau_V, geometry="dusty", dust_type="mw", dust_distribution="clumpy"
):
"""
Load the attenuation curves for a given geometry, dust type and
dust distribution.
Parameters
----------
tau_V: float
optical depth in V band
geometry: string
'shell', 'cloudy' or 'dusty'
dust_type: string
'mw' or 'smc'
dust_distribution: string
'homogeneous' or 'clumpy'
Returns
-------
Attx: np array (float)
Att(x) attenuation curve [mag]
"""
# Ensure strings are lower cases
self.geometry = geometry.lower()
self.dust_type = dust_type.lower()
self.dust_distribution = dust_distribution.lower()
data_path = pkg_resources.resource_filename("dust_attenuation", "data/WG00/")
data = ascii.read(data_path + self.geometry + ".txt", header_start=0)
if self.dust_type == "mw":
start = 0
elif self.dust_type == "smc":
start = 25
# Column names
tau_colname = "tau"
tau_att_colname = "tau_att"
fsca_colname = "f(sca)"
fdir_colname = "f(dir)"
fesc_colname = "f(esc)"
if self.dust_distribution == "clumpy":
tau_att_colname += "_c"
fsca_colname += "_c"
fdir_colname += "_c"
fesc_colname += "_c"
elif self.dust_distribution == "homogeneous":
tau_att_colname += "_h"
fsca_colname += "_h"
fdir_colname += "_h"
fesc_colname += "_h"
tau_att_list = []
tau_list = []
fsca_list = []
fdir_list = []
fesc_list = []
len_data = len(data["lambda"])
# number of lines between 2 models
steps = 25
counter = start
while counter < len_data:
tau_att_list.append(
np.array(data[tau_att_colname][counter : counter + steps])
)
tau_list.append(np.array(data[tau_colname][counter : counter + steps]))
fsca_list.append(np.array(data[fsca_colname][counter : counter + steps]))
fdir_list.append(np.array(data[fdir_colname][counter : counter + steps]))
fesc_list.append(np.array(data[fesc_colname][counter : counter + steps]))
counter += int(2 * steps)
# Convert to np.array and take transpose to have (wvl, tau_V)
tau_att_table = np.array(tau_att_list).T
tau_table = np.array(tau_list).T
fsca_table = np.array(fsca_list).T
fdir_table = np.array(fdir_list).T
fesc_table = np.array(fesc_list).T
# wavelength grid. It is the same for all the models
wvl = np.array(data["lambda"][0:25])
self.wvl_grid = wvl
# Grid for the optical depth
tau_V_grid = np.array(
[
0.25,
0.5,
0.75,
1.0,
1.5,
2.0,
2.5,
3.0,
3.5,
4.0,
4.5,
5.0,
5.5,
6.0,
7.0,
8.0,
9.0,
10.0,
15.0,
20.0,
25.0,
30.0,
35.0,
40.0,
45.0,
50.0,
]
)
# Create a 2D tabular model for tau_att and all flux fraction
tab = tabular_model(2, name="2D_table")
# Values corresponding to the x and y grid points
gridpoints = (wvl, tau_V_grid)
self.model = tab(
gridpoints,
lookup_table=tau_att_table,
name="tau_att_WG00",
bounds_error=False,
fill_value=None,
method="linear",
)
self.tau = tab(
gridpoints,
lookup_table=tau_table,
name="tau_WG00",
bounds_error=False,
fill_value=None,
method="linear",
)
self.fsca = tab(
gridpoints,
lookup_table=fsca_table,
name="fsca_WG00",
bounds_error=False,
fill_value=None,
method="linear",
)
self.fdir = tab(
gridpoints,
lookup_table=fdir_table,
name="fdir_WG00",
bounds_error=False,
fill_value=None,
method="linear",
)
self.fesc = tab(
gridpoints,
lookup_table=fesc_table,
name="fesc_WG00",
bounds_error=False,
fill_value=None,
method="linear",
)
# In Python 2: super(WG00, self)
# In Python 3: super() but super(WG00, self) still works
super(WG00, self).__init__(tau_V=tau_V)
def evaluate(self, x, tau_V):
"""
WG00 function
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
tau_V: float
optical depth in V band
Returns
-------
Attx: np array (float)
Att(x) attenuation curve [mag]
Raises
------
ValueError
Input x values outside of defined range
"""
# convert to wavenumbers (1/micron) if x input in units
# otherwise, assume x in appropriate wavenumber units
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
# strip the quantity to avoid needing to add units to all the
# polynomical coefficients
x = x_quant.value
# check that the wavenumbers are within the defined range
_test_valid_x_range(x, self.x_range, "WG00")
# setup the ax vectors
n_x = len(x)
xinterp = 1e4 * x
yinterp = tau_V * np.ones(n_x)
taux = self.model(xinterp, yinterp)
# Convert optical depth to attenuation
Attx = 1.086 * taux
return Attx
def get_extinction(self, x, tau_V):
"""
Return the extinction at a given wavelength and
V-band optical depth.
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
tau_V: float
optical depth in V band
Returns
-------
ext: np array (float)
ext(x) extinction curve [mag]
Raises
------
ValueError
Input x values outside of defined range
"""
# convert to wavenumbers (1/micron) if x input in units
# otherwise, assume x in appropriate wavenumber units
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
# strip the quantity to avoid needing to add units to all the
# polynomical coefficients
x = x_quant.value
# check that the wavenumbers are within the defined range
_test_valid_x_range(x, self.x_range, "WG00")
# setup the ax vectors
x = np.atleast_1d(x)
n_x = len(x)
xinterp = 1e4 * x
yinterp = tau_V * np.ones(n_x)
return self.tau(xinterp, yinterp) * 1.086
def get_fsca(self, x, tau_V):
"""
Return the scattered flux fraction at a given wavelength and
V-band optical depth.
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
tau_V: float
optical depth in V band
Returns
-------
fsca: np array (float)
fsca(x) scattered flux fraction
Raises
------
ValueError
Input x values outside of defined range
"""
# convert to wavenumbers (1/micron) if x input in units
# otherwise, assume x in appropriate wavenumber units
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
# strip the quantity to avoid needing to add units to all the
# polynomical coefficients
x = x_quant.value
# check that the wavenumbers are within the defined range
_test_valid_x_range(x, self.x_range, "WG00")
# setup the ax vectors
x = np.atleast_1d(x)
n_x = len(x)
xinterp = 1e4 * x
yinterp = tau_V * np.ones(n_x)
return self.fsca(xinterp, yinterp)
def get_fdir(self, x, tau_V):
"""
Return the direct attenuated stellar flux fraction at a given
wavelength and V-band optical depth.
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
tau_V: float
optical depth in V band
Returns
-------
fsca: np array (float)
fsca(x) scattered flux fraction
Raises
------
ValueError
Input x values outside of defined range
"""
# convert to wavenumbers (1/micron) if x input in units
# otherwise, assume x in appropriate wavenumber units
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
# strip the quantity to avoid needing to add units to all the
# polynomical coefficients
x = x_quant.value
# check that the wavenumbers are within the defined range
_test_valid_x_range(x, self.x_range, "WG00")
# setup the ax vectors
x = np.atleast_1d(x)
n_x = len(x)
xinterp = 1e4 * x
yinterp = tau_V * np.ones(n_x)
return self.fdir(xinterp, yinterp)
def get_fesc(self, x, tau_V):
"""
Return the total escaping flux fraction at a given wavelength and
V-band optical depth.
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
tau_V: float
optical depth in V band
Returns
-------
fsca: np array (float)
fsca(x) scattered flux fraction
Raises
------
ValueError
Input x values outside of defined range
"""
# convert to wavenumbers (1/micron) if x input in units
# otherwise, assume x in appropriate wavenumber units
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
# strip the quantity to avoid needing to add units to all the
# polynomical coefficients
x = x_quant.value
# check that the wavenumbers are within the defined range
_test_valid_x_range(x, self.x_range, "WG00")
# setup the ax vectors
x = np.atleast_1d(x)
n_x = len(x)
xinterp = 1e4 * x
yinterp = tau_V * np.ones(n_x)
return self.fesc(xinterp, yinterp)
def get_albedo(self, x):
"""
Return the albedo in function of wavelength for the corresponding
dust type (SMC or MW). The albedo gives the probability a photon
is scattered from a dust grain.
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
Returns
-------
albedo: np array (float)
alb(x) albedo
Raises
------
ValueError
Input x values outside of defined range
"""
# convert to wavenumbers (1/micron) if x input in units
# otherwise, assume x in appropriate wavenumber units
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
# strip the quantity to avoid needing to add units to all the
# polynomical coefficients
x = x_quant.value
# check that the wavenumbers are within the defined range
_test_valid_x_range(x, self.x_range, "WG00")
# setup the ax vectors
x = np.atleast_1d(x)
alb_MW = np.array(
[
0.320,
0.409,
0.481,
0.526,
0.542,
0.536,
0.503,
0.432,
0.371,
0.389,
0.437,
0.470,
0.486,
0.499,
0.506,
0.498,
0.502,
0.491,
0.481,
0.500,
0.473,
0.457,
0.448,
0.424,
0.400,
]
)
alb_SMC = np.array(
[
0.400,
0.449,
0.473,
0.494,
0.508,
0.524,
0.529,
0.528,
0.523,
0.520,
0.516,
0.511,
0.505,
0.513,
0.515,
0.498,
0.494,
0.489,
0.484,
0.493,
0.475,
0.465,
0.439,
0.417,
0.400,
]
)
if self.dust_type == "smc":
albedo = alb_SMC
elif self.dust_type == "mw":
albedo = alb_MW
tab = tabular_model(1, name="Tabular1D")
alb_fit = tab(
self.wvl_grid,
lookup_table=albedo,
name="albedo",
bounds_error=False,
fill_value=None,
method="linear",
)
xinterp = 1e4 * x
return alb_fit(xinterp)
# MASKED: get_scattering_phase_function function (lines 619-735)
|
def get_scattering_phase_function(self, x):
"""
Return the scattering phase function in function of wavelength for the
corresponding dust type (SMC or MW). The scattering phase
function gives the angle at which the photon scatters.
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
Returns
-------
g: np array (float)
g(x) scattering phase function
Raises
------
ValueError
Input x values outside of defined range
"""
# convert to wavenumbers (1/micron) if x input in units
# otherwise, assume x in appropriate wavenumber units
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
# strip the quantity to avoid needing to add units to all the
# polynomical coefficients
x = x_quant.value
# check that the wavenumbers are within the defined range
_test_valid_x_range(x, self.x_range, "WG00")
# setup the ax vectors
x = np.atleast_1d(x)
g_MW = np.array(
[
0.800,
0.783,
0.767,
0.756,
0.745,
0.736,
0.727,
0.720,
0.712,
0.707,
0.702,
0.697,
0.691,
0.685,
0.678,
0.646,
0.624,
0.597,
0.563,
0.545,
0.533,
0.511,
0.480,
0.445,
0.420,
]
)
g_SMC = np.array(
[
0.800,
0.783,
0.767,
0.756,
0.745,
0.736,
0.727,
0.720,
0.712,
0.707,
0.702,
0.697,
0.691,
0.685,
0.678,
0.646,
0.624,
0.597,
0.563,
0.545,
0.533,
0.511,
0.480,
0.445,
0.420,
]
)
if self.dust_type == "smc":
g = g_SMC
elif self.dust_type == "mw":
g = g_MW
tab = tabular_model(1, name="Tabular1D")
g_fit = tab(
self.wvl_grid,
lookup_table=g,
name="albedo",
bounds_error=False,
fill_value=None,
method="linear",
)
xinterp = 1e4 * x
return g_fit(xinterp)
| 619 | 735 |
# -*- coding: utf-8 -*-
import numpy as np
import astropy.units as u
import pkg_resources
from astropy.io import ascii
from astropy.modeling.tabular import tabular_model
from .baseclasses import BaseAtttauVModel
from .helpers import _test_valid_x_range
__all__ = ["WG00"]
x_range_WG00 = [0.1, 3.0001]
class WG00(BaseAtttauVModel):
r"""
Attenuation curve of Witt & Gordon (2000)
Parameters
----------
tau_v: float
optical depth in V band
Raises
------
InputParameterError
Input Av values outside of defined range
Notes
-----
From Witt & Gordon (2000, ApJ, Volume 528, pp. 799-816)
Example:
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
from dust_attenuation.radiative_transfer import WG00
fig, ax = plt.subplots(1,2, figsize=(10,6))
# generate the curves and plot them
# Use 1/microns for a better sampling
x = np.arange(0.35,10.0,0.1)/u.micron
x_Vband = 0.55 # microns
tau_Vs = [0.25,0.4,1.1,17.0,46.0]
for tau_V in tau_Vs[::-1]:
att_model = WG00(tau_V = tau_V, geometry = 'cloudy',
dust_type = 'mw', dust_distribution = 'clumpy')
ax[0].plot(x,att_model(1/x),label=r'$\tau_V$ = %.2f mag' % (tau_V))
ax[1].plot(x,att_model(1/x)/att_model(x_Vband),
label=r'$\tau_V$ = %.2f mag' % (tau_V))
ax[0].set_xlabel(r'$x$ [$\mu m^{-1}$]')
ax[0].set_ylabel(r'$Att(x)$ [mag]')
ax[1].set_xlabel(r'$x$ [$\mu m^{-1}$]')
ax[1].set_ylabel(r'$Att(x)/Att_V$')
ax[0].legend(loc='best')
ax[1].legend(loc='best')
fig.suptitle(r'CLOUDY / MW / clumpy model',size=15)
plt.tight_layout()
fig.subplots_adjust(top=0.88)
plt.show()
"""
tau_V_range = [0.25, 50.0]
x_range = x_range_WG00
def __init__(
self, tau_V, geometry="dusty", dust_type="mw", dust_distribution="clumpy"
):
"""
Load the attenuation curves for a given geometry, dust type and
dust distribution.
Parameters
----------
tau_V: float
optical depth in V band
geometry: string
'shell', 'cloudy' or 'dusty'
dust_type: string
'mw' or 'smc'
dust_distribution: string
'homogeneous' or 'clumpy'
Returns
-------
Attx: np array (float)
Att(x) attenuation curve [mag]
"""
# Ensure strings are lower cases
self.geometry = geometry.lower()
self.dust_type = dust_type.lower()
self.dust_distribution = dust_distribution.lower()
data_path = pkg_resources.resource_filename("dust_attenuation", "data/WG00/")
data = ascii.read(data_path + self.geometry + ".txt", header_start=0)
if self.dust_type == "mw":
start = 0
elif self.dust_type == "smc":
start = 25
# Column names
tau_colname = "tau"
tau_att_colname = "tau_att"
fsca_colname = "f(sca)"
fdir_colname = "f(dir)"
fesc_colname = "f(esc)"
if self.dust_distribution == "clumpy":
tau_att_colname += "_c"
fsca_colname += "_c"
fdir_colname += "_c"
fesc_colname += "_c"
elif self.dust_distribution == "homogeneous":
tau_att_colname += "_h"
fsca_colname += "_h"
fdir_colname += "_h"
fesc_colname += "_h"
tau_att_list = []
tau_list = []
fsca_list = []
fdir_list = []
fesc_list = []
len_data = len(data["lambda"])
# number of lines between 2 models
steps = 25
counter = start
while counter < len_data:
tau_att_list.append(
np.array(data[tau_att_colname][counter : counter + steps])
)
tau_list.append(np.array(data[tau_colname][counter : counter + steps]))
fsca_list.append(np.array(data[fsca_colname][counter : counter + steps]))
fdir_list.append(np.array(data[fdir_colname][counter : counter + steps]))
fesc_list.append(np.array(data[fesc_colname][counter : counter + steps]))
counter += int(2 * steps)
# Convert to np.array and take transpose to have (wvl, tau_V)
tau_att_table = np.array(tau_att_list).T
tau_table = np.array(tau_list).T
fsca_table = np.array(fsca_list).T
fdir_table = np.array(fdir_list).T
fesc_table = np.array(fesc_list).T
# wavelength grid. It is the same for all the models
wvl = np.array(data["lambda"][0:25])
self.wvl_grid = wvl
# Grid for the optical depth
tau_V_grid = np.array(
[
0.25,
0.5,
0.75,
1.0,
1.5,
2.0,
2.5,
3.0,
3.5,
4.0,
4.5,
5.0,
5.5,
6.0,
7.0,
8.0,
9.0,
10.0,
15.0,
20.0,
25.0,
30.0,
35.0,
40.0,
45.0,
50.0,
]
)
# Create a 2D tabular model for tau_att and all flux fraction
tab = tabular_model(2, name="2D_table")
# Values corresponding to the x and y grid points
gridpoints = (wvl, tau_V_grid)
self.model = tab(
gridpoints,
lookup_table=tau_att_table,
name="tau_att_WG00",
bounds_error=False,
fill_value=None,
method="linear",
)
self.tau = tab(
gridpoints,
lookup_table=tau_table,
name="tau_WG00",
bounds_error=False,
fill_value=None,
method="linear",
)
self.fsca = tab(
gridpoints,
lookup_table=fsca_table,
name="fsca_WG00",
bounds_error=False,
fill_value=None,
method="linear",
)
self.fdir = tab(
gridpoints,
lookup_table=fdir_table,
name="fdir_WG00",
bounds_error=False,
fill_value=None,
method="linear",
)
self.fesc = tab(
gridpoints,
lookup_table=fesc_table,
name="fesc_WG00",
bounds_error=False,
fill_value=None,
method="linear",
)
# In Python 2: super(WG00, self)
# In Python 3: super() but super(WG00, self) still works
super(WG00, self).__init__(tau_V=tau_V)
def evaluate(self, x, tau_V):
"""
WG00 function
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
tau_V: float
optical depth in V band
Returns
-------
Attx: np array (float)
Att(x) attenuation curve [mag]
Raises
------
ValueError
Input x values outside of defined range
"""
# convert to wavenumbers (1/micron) if x input in units
# otherwise, assume x in appropriate wavenumber units
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
# strip the quantity to avoid needing to add units to all the
# polynomical coefficients
x = x_quant.value
# check that the wavenumbers are within the defined range
_test_valid_x_range(x, self.x_range, "WG00")
# setup the ax vectors
n_x = len(x)
xinterp = 1e4 * x
yinterp = tau_V * np.ones(n_x)
taux = self.model(xinterp, yinterp)
# Convert optical depth to attenuation
Attx = 1.086 * taux
return Attx
def get_extinction(self, x, tau_V):
"""
Return the extinction at a given wavelength and
V-band optical depth.
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
tau_V: float
optical depth in V band
Returns
-------
ext: np array (float)
ext(x) extinction curve [mag]
Raises
------
ValueError
Input x values outside of defined range
"""
# convert to wavenumbers (1/micron) if x input in units
# otherwise, assume x in appropriate wavenumber units
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
# strip the quantity to avoid needing to add units to all the
# polynomical coefficients
x = x_quant.value
# check that the wavenumbers are within the defined range
_test_valid_x_range(x, self.x_range, "WG00")
# setup the ax vectors
x = np.atleast_1d(x)
n_x = len(x)
xinterp = 1e4 * x
yinterp = tau_V * np.ones(n_x)
return self.tau(xinterp, yinterp) * 1.086
def get_fsca(self, x, tau_V):
"""
Return the scattered flux fraction at a given wavelength and
V-band optical depth.
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
tau_V: float
optical depth in V band
Returns
-------
fsca: np array (float)
fsca(x) scattered flux fraction
Raises
------
ValueError
Input x values outside of defined range
"""
# convert to wavenumbers (1/micron) if x input in units
# otherwise, assume x in appropriate wavenumber units
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
# strip the quantity to avoid needing to add units to all the
# polynomical coefficients
x = x_quant.value
# check that the wavenumbers are within the defined range
_test_valid_x_range(x, self.x_range, "WG00")
# setup the ax vectors
x = np.atleast_1d(x)
n_x = len(x)
xinterp = 1e4 * x
yinterp = tau_V * np.ones(n_x)
return self.fsca(xinterp, yinterp)
def get_fdir(self, x, tau_V):
"""
Return the direct attenuated stellar flux fraction at a given
wavelength and V-band optical depth.
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
tau_V: float
optical depth in V band
Returns
-------
fsca: np array (float)
fsca(x) scattered flux fraction
Raises
------
ValueError
Input x values outside of defined range
"""
# convert to wavenumbers (1/micron) if x input in units
# otherwise, assume x in appropriate wavenumber units
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
# strip the quantity to avoid needing to add units to all the
# polynomical coefficients
x = x_quant.value
# check that the wavenumbers are within the defined range
_test_valid_x_range(x, self.x_range, "WG00")
# setup the ax vectors
x = np.atleast_1d(x)
n_x = len(x)
xinterp = 1e4 * x
yinterp = tau_V * np.ones(n_x)
return self.fdir(xinterp, yinterp)
def get_fesc(self, x, tau_V):
"""
Return the total escaping flux fraction at a given wavelength and
V-band optical depth.
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
tau_V: float
optical depth in V band
Returns
-------
fsca: np array (float)
fsca(x) scattered flux fraction
Raises
------
ValueError
Input x values outside of defined range
"""
# convert to wavenumbers (1/micron) if x input in units
# otherwise, assume x in appropriate wavenumber units
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
# strip the quantity to avoid needing to add units to all the
# polynomical coefficients
x = x_quant.value
# check that the wavenumbers are within the defined range
_test_valid_x_range(x, self.x_range, "WG00")
# setup the ax vectors
x = np.atleast_1d(x)
n_x = len(x)
xinterp = 1e4 * x
yinterp = tau_V * np.ones(n_x)
return self.fesc(xinterp, yinterp)
def get_albedo(self, x):
"""
Return the albedo in function of wavelength for the corresponding
dust type (SMC or MW). The albedo gives the probability a photon
is scattered from a dust grain.
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
Returns
-------
albedo: np array (float)
alb(x) albedo
Raises
------
ValueError
Input x values outside of defined range
"""
# convert to wavenumbers (1/micron) if x input in units
# otherwise, assume x in appropriate wavenumber units
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
# strip the quantity to avoid needing to add units to all the
# polynomical coefficients
x = x_quant.value
# check that the wavenumbers are within the defined range
_test_valid_x_range(x, self.x_range, "WG00")
# setup the ax vectors
x = np.atleast_1d(x)
alb_MW = np.array(
[
0.320,
0.409,
0.481,
0.526,
0.542,
0.536,
0.503,
0.432,
0.371,
0.389,
0.437,
0.470,
0.486,
0.499,
0.506,
0.498,
0.502,
0.491,
0.481,
0.500,
0.473,
0.457,
0.448,
0.424,
0.400,
]
)
alb_SMC = np.array(
[
0.400,
0.449,
0.473,
0.494,
0.508,
0.524,
0.529,
0.528,
0.523,
0.520,
0.516,
0.511,
0.505,
0.513,
0.515,
0.498,
0.494,
0.489,
0.484,
0.493,
0.475,
0.465,
0.439,
0.417,
0.400,
]
)
if self.dust_type == "smc":
albedo = alb_SMC
elif self.dust_type == "mw":
albedo = alb_MW
tab = tabular_model(1, name="Tabular1D")
alb_fit = tab(
self.wvl_grid,
lookup_table=albedo,
name="albedo",
bounds_error=False,
fill_value=None,
method="linear",
)
xinterp = 1e4 * x
return alb_fit(xinterp)
def get_scattering_phase_function(self, x):
"""
Return the scattering phase function in function of wavelength for the
corresponding dust type (SMC or MW). The scattering phase
function gives the angle at which the photon scatters.
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
Returns
-------
g: np array (float)
g(x) scattering phase function
Raises
------
ValueError
Input x values outside of defined range
"""
# convert to wavenumbers (1/micron) if x input in units
# otherwise, assume x in appropriate wavenumber units
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
# strip the quantity to avoid needing to add units to all the
# polynomical coefficients
x = x_quant.value
# check that the wavenumbers are within the defined range
_test_valid_x_range(x, self.x_range, "WG00")
# setup the ax vectors
x = np.atleast_1d(x)
g_MW = np.array(
[
0.800,
0.783,
0.767,
0.756,
0.745,
0.736,
0.727,
0.720,
0.712,
0.707,
0.702,
0.697,
0.691,
0.685,
0.678,
0.646,
0.624,
0.597,
0.563,
0.545,
0.533,
0.511,
0.480,
0.445,
0.420,
]
)
g_SMC = np.array(
[
0.800,
0.783,
0.767,
0.756,
0.745,
0.736,
0.727,
0.720,
0.712,
0.707,
0.702,
0.697,
0.691,
0.685,
0.678,
0.646,
0.624,
0.597,
0.563,
0.545,
0.533,
0.511,
0.480,
0.445,
0.420,
]
)
if self.dust_type == "smc":
g = g_SMC
elif self.dust_type == "mw":
g = g_MW
tab = tabular_model(1, name="Tabular1D")
g_fit = tab(
self.wvl_grid,
lookup_table=g,
name="albedo",
bounds_error=False,
fill_value=None,
method="linear",
)
xinterp = 1e4 * x
return g_fit(xinterp)
|
load_raster_tile_lookup
|
Load in the preprocessed raster tile lookup.
Parameters
----------
iso3 : string
Country iso3 code.
Returns
-------
lookup : dict
A lookup table containing raster tile boundary coordinates
as the keys, and the file paths as the values.
|
"""
Extract CLOS / NLOS lookup.
Written by Ed Oughton.
March 2021
"""
import os
import configparser
import json
import math
import glob
import random
import numpy as np
import pandas as pd
import geopandas as gpd
import pyproj
from shapely.geometry import Point, Polygon, box, LineString
from shapely.ops import transform
import rasterio
# import networkx as nx
from rasterio.warp import calculate_default_transform, reproject, Resampling
from rasterio.mask import mask
from rasterstats import zonal_stats, gen_zonal_stats
from tqdm import tqdm
grass7bin = r'"C:\Program Files\GRASS GIS 7.8\grass78.bat"'
os.environ['GRASSBIN'] = grass7bin
os.environ['PATH'] += ';' + r"C:\Program Files\GRASS GIS 7.8\lib"
from grass_session import Session
from grass.script import core as gcore
CONFIG = configparser.ConfigParser()
CONFIG.read(os.path.join(os.path.dirname(__file__), "script_config.ini"))
BASE_PATH = CONFIG["file_locations"]["base_path"]
DATA_RAW = os.path.join(BASE_PATH, "raw")
DATA_INTERMEDIATE = os.path.join(BASE_PATH, "intermediate")
DATA_PROCESSED = os.path.join(BASE_PATH, "processed")
# MASKED: load_raster_tile_lookup function (lines 44-72)
def generate_grid(iso3, side_length):
"""
Generate a spatial grid for the chosen country.
"""
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'grid')
if not os.path.exists(directory):
os.makedirs(directory)
filename = 'grid_{}_{}_km.shp'.format(side_length, side_length)
path_output = os.path.join(directory, filename)
if os.path.exists(path_output):
return
filename = 'national_outline.shp'
path = os.path.join(DATA_INTERMEDIATE, iso3, filename)
country_outline = gpd.read_file(path, crs="epsg:4326")
country_outline.crs = "epsg:4326"
country_outline = country_outline.to_crs("epsg:3857")
xmin, ymin, xmax, ymax = country_outline.total_bounds
polygons = manually_create_grid(
xmin, ymin, xmax, ymax, side_length, side_length
)
grid = gpd.GeoDataFrame({'geometry': polygons}, crs="epsg:3857")#[:100]
intersection = gpd.overlay(grid, country_outline, how='intersection')
intersection.crs = "epsg:3857"
intersection['area_km2'] = intersection['geometry'].area / 1e6
intersection = intersection.to_crs("epsg:4326")
intersection.to_file(path_output, crs="epsg:4326")
return intersection
def manually_create_grid(xmin, ymin, xmax, ymax, length, wide):
"""
"""
cols = list(range(int(np.floor(xmin)), int(np.ceil(xmax - int(wide))), int(wide)))
rows = list(range(int(np.floor(ymin)), int(np.ceil(ymax)), int(length)))
polygons = []
for x in cols:
for y in rows:
polygons.append(
Polygon([(x, y), (x+wide, y), (x+wide, y-length), (x, y-length)])
)
return polygons
def find_tile(polygon, tile_lookup):
"""
Parameters
----------
polygon : tuple
The bounds of the modeling region.
tile_lookup : dict
A lookup table containing raster tile boundary coordinates
as the keys, and the file paths as the values.
Return
------
output : list
Contains the file path to the correct raster tile. Note:
only the first element is returned and if there are more than
one paths, an error is returned.
"""
output = []
poly_bbox = box(polygon[0], polygon[1], polygon[2], polygon[3])
for key, value in tile_lookup.items():
bbox = box(key[0], key[1], key[2], key[3])
if bbox.intersects(poly_bbox):
output.append(value)
if len(output) == 1:
return output[0]
elif len(output) > 1:
print('Problem with find_tile returning more than 1 path')
return output[0]
else:
print('Problem with find_tile: Unable to find raster path')
def add_id_range_data_to_grid(iso3, tile_lookup, side_length):
"""
Query the Digital Elevation Model to get an estimated interdecile
range for each grid square.
"""
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'grid')
filename = 'grid_final.shp'
path_output = os.path.join(directory, filename)
if os.path.exists(path_output):
return gpd.read_file(path_output, crs='epsg:4328')
filename = 'grid_{}_{}_km.shp'.format(side_length, side_length)
path = os.path.join(directory, filename)
grid = gpd.read_file(path, crs='epsg:4328')
output = []
for idx, grid_tile in grid.iterrows():
path_input = find_tile(
grid_tile['geometry'].bounds,
tile_lookup
)
stats = next(gen_zonal_stats(
grid_tile['geometry'],
path_input,
add_stats={
'interdecile_range': interdecile_range
},
nodata=0
))
id_range_m = stats['interdecile_range']
output.append({
'type': 'Feature',
'geometry': grid_tile['geometry'],
'properties': {
'id_range_m': id_range_m,
'area_km2': grid_tile['area_km2'],
# 'pop_density_km2': grid_tile['pop_densit'],
# 'population': grid_tile['population'],
}
})
output = gpd.GeoDataFrame.from_features(output, crs='epsg:4326')
output = output.replace([np.inf, -np.inf], np.nan)
output = output[output.geometry.notnull()]
output.to_file(path_output, crs="epsg:4326")
return output
def interdecile_range(x):
"""
Get range between bottom 10% and top 10% of values.
This is from the Longley-Rice Irregular Terrain Model.
Code here: https://github.com/edwardoughton/itmlogic
Paper here: https://joss.theoj.org/papers/10.21105/joss.02266.pdf
Parameters
----------
x : list
Terrain profile values.
Returns
-------
interdecile_range : int
The terrain irregularity parameter.
"""
q90, q10 = np.percentile(x, [90, 10])
interdecile_range = int(round(q90 - q10, 0))
return interdecile_range
def estimate_terrain_deciles(grid):
"""
"""
# terrain_lookup = grid.loc[grid['area_km2'] > 1000].reset_index()
terrain_lookup = grid
terrain_lookup['decile'] = pd.qcut(terrain_lookup['id_range_m'], 10, labels=False)
terrain_lookup = terrain_lookup[['decile', 'id_range_m']]
terrain_lookup = terrain_lookup.groupby(['decile']).min()
terrain_lookup = terrain_lookup['id_range_m'].to_list()
return terrain_lookup
def select_grid_sampling_areas(iso3, grid, lut):
"""
"""
for i in range(1, 11):
if i == 1:
grid.loc[(grid['id_range_m'] < lut[1]), 'decile'] = str(i)
value_name = '0-{}'.format(str(lut[1]))
grid.loc[(grid['id_range_m'] < lut[1]), 'value'] = value_name
elif i <= 9:
grid.loc[(
grid['id_range_m'] >= lut[i-1]) &
(grid['id_range_m'] <= lut[i]), 'decile'] = str(i)
value_name = '{}-{}'.format(str(lut[i-1]), str(lut[i]))
grid.loc[(
grid['id_range_m'] >= lut[i-1]) &
(grid['id_range_m'] <= lut[i]), 'value'] = value_name
elif i == 10:
grid.loc[(grid['id_range_m'] > lut[i-1]), 'decile'] = str(i)
value_name = '>{}'.format(str(lut[i-1]))
grid.loc[(grid['id_range_m'] > lut[i-1]), 'value'] = value_name
else:
continue
np.random.seed(2)
grid = grid.loc[grid['area_km2'] > 2400].reset_index()
sampling_areas = grid.groupby(['decile']).apply(lambda x: x.sample(1)).reset_index(drop=True)
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'sampling_area')
if not os.path.exists(directory):
os.makedirs(directory)
sampling_areas.to_file(os.path.join(directory, 'sampling_areas.shp'))
sampling_areas.crs = 'epsg:4326'
return sampling_areas
def get_points(iso3, sampling_areas, tile_lookup, point_spacing):
"""
"""
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'sampling_points')
if not os.path.exists(directory):
os.makedirs(directory)
sampling_areas = sampling_areas.to_crs("epsg:3857")
for idx, sampling_area in sampling_areas.iterrows():
lon = sampling_area['geometry'].representative_point().coords[0][0]
lat = sampling_area['geometry'].representative_point().coords[0][1]
filename = "{}-{}".format(lon, lat)
xmin, ymin, xmax, ymax = sampling_area['geometry'].bounds
polygons = manually_create_grid(xmin, ymin, xmax, ymax, point_spacing, point_spacing)
#make geopandas dataframes
grid_sample = gpd.GeoDataFrame({'geometry': polygons}, crs="epsg:3857")
boundary = gpd.GeoDataFrame({'geometry': sampling_area['geometry']},
crs="epsg:3857", index=[0])
#only get points within the tile boundary
grid_sample = gpd.overlay(grid_sample, boundary, how='intersection')
grid_sample = grid_sample.to_crs("epsg:4326") #convert to lon lat
##get the highest points in each grid sample tile
sampling_points = find_points(iso3, grid_sample, tile_lookup, filename)#[:1]
##convert to projected for viewsheding
sampling_points = sampling_points.to_crs("epsg:4326")
path_output = os.path.join(directory, filename + '.shp')
sampling_points.to_file(path_output)
return sampling_points
def find_points(iso3, grid_sample, tile_lookup, filename):
"""
"""
filename_2 = filename + '.shp'
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'sampling_points')
path_output = os.path.join(directory, filename_2)
if os.path.exists(path_output):
return gpd.read_file(path_output, crs='epsg:4326')
output = []
for idx, grid_tile in grid_sample.iterrows():
min_x, min_y, max_x, max_y = grid_tile['geometry'].bounds
geom = Point(random.uniform(min_x, max_x), random.uniform(min_y, max_y))
output.append({
'type': 'Feature',
'geometry': geom,
'properties': {
}
})
output = gpd.GeoDataFrame.from_features(output, crs='epsg:4326')
return output
def generate_viewsheds(iso3, sampling_areas, sampling_points):
"""
"""
sampling_areas = sampling_areas.to_crs("epsg:3857")
#set output folder
folder_out_viewsheds = os.path.join(DATA_INTERMEDIATE, iso3, 'viewsheds')
if not os.path.exists(folder_out_viewsheds):
os.makedirs(folder_out_viewsheds)
for idx, sampling_area in tqdm(sampling_areas.iterrows(),
total=sampling_areas.shape[0]):
output = []
lon = sampling_area['geometry'].representative_point().coords[0][0]
lat = sampling_area['geometry'].representative_point().coords[0][1]
area_filename = "{}-{}".format(lon, lat)
print('--Working on {}'.format(area_filename))
##load sampling points
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'sampling_points')
points = gpd.read_file(os.path.join(directory, area_filename + '.shp'))#[:2]
##convert to lon lat to get correct raster tile
sampling_area_df = gpd.GeoDataFrame({'geometry': sampling_area['geometry']},
crs="epsg:3857", index=[0])
sampling_area_df = sampling_area_df.to_crs("epsg:4326")
for idx, item in sampling_area_df.iterrows():
#needs a loop because the data structure needs a series
path_input = find_tile(item['geometry'].bounds, tile_lookup)
for idx, point in tqdm(points.iterrows(), total=points.shape[0]):
results = []
lon = point['geometry'].representative_point().coords[0][0]
lat = point['geometry'].representative_point().coords[0][1]
filename2 = "{}-{}".format(lon, lat)
path_output = os.path.join(folder_out_viewsheds, filename2)
file_path = os.path.join(path_output, 'location', 'PERMANENT',
'viewsheds', filename2 + '.tif')
x = point['geometry'].coords[0][0]
y = point['geometry'].coords[0][1]
if not os.path.exists(file_path):
try:
viewshed((x, y), path_input, path_output, filename2, 45000, 'epsg:4326')
except:
print('--Viewshed already exists')
seen = set()
for idx, node in tqdm(points.iterrows(), total=points.shape[0]):
x2 = node['geometry'].coords[0][0]
y2 = node['geometry'].coords[0][1]
link = '{}_{}_{}_{}'.format(x, y, x2, y2)
if link in seen:
continue
dist = find_distance((x, y), (x2, y2))
if dist < 10:
continue
los = check_los(file_path, (x2, y2))
results.append({
'sampling_area': area_filename,
'point_id': filename2,
'node_id': '{}_{}'.format(x2, y2),
'distance': dist,
'id_range_m': sampling_area['id_range_m'],
'decile': sampling_area['decile'],
'los': los,
})
seen.add('{}_{}_{}_{}'.format(x, y, x2, y2))
seen.add('{}_{}_{}_{}'.format(x2, y2, x, y))
output = output + results
output = pd.DataFrame(output)
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'los_results')
if not os.path.exists(folder):
os.makedirs(folder)
output.to_csv(os.path.join(folder, area_filename + '.csv'), index=False)
def viewshed(point, path_input, path_output, tile_name, max_distance, crs):
"""
Perform a viewshed using GRASS.
Parameters
---------
point : tuple
The point being queried.
tile_lookup : dict
A lookup table containing raster tile boundary coordinates
as the keys, and the file paths as the values.
path_output : string
The directory path for the output folder.
tile_name : string
The name allocated to the viewshed tile.
max_distance : int
The maximum distance a path can be.
crs : string
The coordinate reference system in use.
Returns
-------
grid : dataframe
A geopandas dataframe containing the created grid.
"""
with Session(gisdb=path_output, location="location", create_opts=crs):
# print('parse command')
# print(gcore.parse_command("g.gisenv", flags="s"))#, set="DEBUG=3"
# print('r.external')
# now link a GDAL supported raster file to a binary raster map layer,
# from any GDAL supported raster map format, with an optional title.
# The file is not imported but just registered as GRASS raster map.
gcore.run_command('r.external', input=path_input, output=tile_name, overwrite=True)
# print('r.external.out')
#write out as geotiff
gcore.run_command('r.external.out', directory='viewsheds', format="GTiff")
# print('r.region')
#manage the settings of the current geographic region
gcore.run_command('g.region', raster=tile_name)
# print('r.viewshed')
#for each point in the output that is NULL: No LOS
gcore.run_command('r.viewshed', #flags='e',
input=tile_name,
output='{}.tif'.format(tile_name),
coordinate= [point[0], point[1]],
observer_elevation=30,
target_elevation=30,
memory=5000,
overwrite=True,
quiet=True,
max_distance=max_distance,
# verbose=True
)
def check_los(path_input, point):
"""
Find potential LOS high points.
Parameters
----------
path_input : string
File path for the digital elevation raster tile.
point : tuple
Coordinate point being queried.
Returns
-------
los : string
The Line of Sight (los) of the path queried.
"""
with rasterio.open(path_input) as src:
x = point[0]
y = point[1]
for val in src.sample([(x, y)]):
if np.isnan(val):
# print('is nan: {} therefore nlos'.format(val))
los = 'nlos'
return los
else:
# print('is not nan: {} therefore los'.format(val))
los ='clos'
return los
def find_distance(point1, point2):
"""
"""
point1 = Point(point1)
point1 = gpd.GeoDataFrame({'geometry': [point1]}, index=[0])
point1 = point1.set_crs('epsg:4326')
point1 = point1.to_crs('epsg:3857')
point2 = Point(point2)
point2 = gpd.GeoDataFrame({'geometry': [point2]}, index=[0])
point2 = point2.set_crs('epsg:4326')
point2 = point2.to_crs('epsg:3857')
dist = LineString([
(point1['geometry'][0].coords[0][0], point1['geometry'][0].coords[0][1]),
(point2['geometry'][0].coords[0][0], point2['geometry'][0].coords[0][1])
]).length
return dist
def collect_results(iso3, sampling_areas):
"""
"""
sampling_areas = sampling_areas.to_crs("epsg:3857")#[:1]
output = []
#set output folder
for idx, sampling_area in sampling_areas.iterrows():
lon = sampling_area['geometry'].representative_point().coords[0][0]
lat = sampling_area['geometry'].representative_point().coords[0][1]
filename = "{}-{}".format(lon, lat)
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'los_results')
data = pd.read_csv(os.path.join(directory, filename + '.csv'))
seen = set()
interval_size = 2500
for distance_lower in range(0, 45000, interval_size):
distance_upper = distance_lower + interval_size
clos = 0
nlos = 0
for idx, item in data.iterrows():
path_id = '{}_{}_{}'.format(
item['point_id'],
item['node_id'],
item['distance']
)
if not path_id in seen:
if item['distance'] < distance_upper:
if item['los'] == 'clos':
clos += 1
elif item['los'] == 'nlos':
nlos += 1
else:
print('Did not recognize los')
seen.add(path_id)
if clos > 0:
clos_probability = (clos / (clos + nlos))
else:
clos_probability = 'no data'
if nlos > 0:
nlos_probability = (nlos / (clos + nlos))
else:
nlos_probability = 'no data'
output.append({
'decile': item['decile'],
'id_range_m': item['id_range_m'],
'distance_lower': distance_lower,
'distance_upper': distance_upper,
'total_samples': clos + nlos,
'clos_probability': clos_probability,
'nlos_probability': nlos_probability,
})
output = pd.DataFrame(output)
folder = os.path.join(DATA_INTERMEDIATE, iso3)
output.to_csv(os.path.join(folder, 'los_lookup.csv'), index=False)
if __name__ == "__main__":
countries = [
("PER", 5e4, 25e2),
("IDN", 5e4, 25e2),
]
for country in countries:
iso3 = country[0]
side_length = country[1]
point_spacing = country[2]
##Load the raster tile lookup
tile_lookup = load_raster_tile_lookup(iso3)
##Generate grids
generate_grid(iso3, side_length) #1e5
# ##Add interdecile range to grid
grid = add_id_range_data_to_grid(iso3, tile_lookup, side_length)
##Get the terrain deciles
terrain_values = estimate_terrain_deciles(grid)
##Get the grid tile samples
sampling_areas = select_grid_sampling_areas(iso3, grid, terrain_values)#[:1]
##Generate the terrain lookup
sampling_points = get_points(iso3, sampling_areas, tile_lookup, point_spacing)#[:1]
##Process viewsheds
generate_viewsheds(iso3, sampling_areas, sampling_points)
## Collect results
collect_results(iso3, sampling_areas)
|
def load_raster_tile_lookup(iso3):
"""
Load in the preprocessed raster tile lookup.
Parameters
----------
iso3 : string
Country iso3 code.
Returns
-------
lookup : dict
A lookup table containing raster tile boundary coordinates
as the keys, and the file paths as the values.
"""
path = os.path.join(DATA_INTERMEDIATE, iso3, 'raster_lookup.csv')
data = pd.read_csv(path)
data = data.to_records('dicts')
lookup = {}
for item in data:
coords = (item['x1'], item['y1'], item['x2'], item['y2'])
lookup[coords] = item['path']
return lookup
| 44 | 72 |
"""
Extract CLOS / NLOS lookup.
Written by Ed Oughton.
March 2021
"""
import os
import configparser
import json
import math
import glob
import random
import numpy as np
import pandas as pd
import geopandas as gpd
import pyproj
from shapely.geometry import Point, Polygon, box, LineString
from shapely.ops import transform
import rasterio
# import networkx as nx
from rasterio.warp import calculate_default_transform, reproject, Resampling
from rasterio.mask import mask
from rasterstats import zonal_stats, gen_zonal_stats
from tqdm import tqdm
grass7bin = r'"C:\Program Files\GRASS GIS 7.8\grass78.bat"'
os.environ['GRASSBIN'] = grass7bin
os.environ['PATH'] += ';' + r"C:\Program Files\GRASS GIS 7.8\lib"
from grass_session import Session
from grass.script import core as gcore
CONFIG = configparser.ConfigParser()
CONFIG.read(os.path.join(os.path.dirname(__file__), "script_config.ini"))
BASE_PATH = CONFIG["file_locations"]["base_path"]
DATA_RAW = os.path.join(BASE_PATH, "raw")
DATA_INTERMEDIATE = os.path.join(BASE_PATH, "intermediate")
DATA_PROCESSED = os.path.join(BASE_PATH, "processed")
def load_raster_tile_lookup(iso3):
"""
Load in the preprocessed raster tile lookup.
Parameters
----------
iso3 : string
Country iso3 code.
Returns
-------
lookup : dict
A lookup table containing raster tile boundary coordinates
as the keys, and the file paths as the values.
"""
path = os.path.join(DATA_INTERMEDIATE, iso3, 'raster_lookup.csv')
data = pd.read_csv(path)
data = data.to_records('dicts')
lookup = {}
for item in data:
coords = (item['x1'], item['y1'], item['x2'], item['y2'])
lookup[coords] = item['path']
return lookup
def generate_grid(iso3, side_length):
"""
Generate a spatial grid for the chosen country.
"""
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'grid')
if not os.path.exists(directory):
os.makedirs(directory)
filename = 'grid_{}_{}_km.shp'.format(side_length, side_length)
path_output = os.path.join(directory, filename)
if os.path.exists(path_output):
return
filename = 'national_outline.shp'
path = os.path.join(DATA_INTERMEDIATE, iso3, filename)
country_outline = gpd.read_file(path, crs="epsg:4326")
country_outline.crs = "epsg:4326"
country_outline = country_outline.to_crs("epsg:3857")
xmin, ymin, xmax, ymax = country_outline.total_bounds
polygons = manually_create_grid(
xmin, ymin, xmax, ymax, side_length, side_length
)
grid = gpd.GeoDataFrame({'geometry': polygons}, crs="epsg:3857")#[:100]
intersection = gpd.overlay(grid, country_outline, how='intersection')
intersection.crs = "epsg:3857"
intersection['area_km2'] = intersection['geometry'].area / 1e6
intersection = intersection.to_crs("epsg:4326")
intersection.to_file(path_output, crs="epsg:4326")
return intersection
def manually_create_grid(xmin, ymin, xmax, ymax, length, wide):
"""
"""
cols = list(range(int(np.floor(xmin)), int(np.ceil(xmax - int(wide))), int(wide)))
rows = list(range(int(np.floor(ymin)), int(np.ceil(ymax)), int(length)))
polygons = []
for x in cols:
for y in rows:
polygons.append(
Polygon([(x, y), (x+wide, y), (x+wide, y-length), (x, y-length)])
)
return polygons
def find_tile(polygon, tile_lookup):
"""
Parameters
----------
polygon : tuple
The bounds of the modeling region.
tile_lookup : dict
A lookup table containing raster tile boundary coordinates
as the keys, and the file paths as the values.
Return
------
output : list
Contains the file path to the correct raster tile. Note:
only the first element is returned and if there are more than
one paths, an error is returned.
"""
output = []
poly_bbox = box(polygon[0], polygon[1], polygon[2], polygon[3])
for key, value in tile_lookup.items():
bbox = box(key[0], key[1], key[2], key[3])
if bbox.intersects(poly_bbox):
output.append(value)
if len(output) == 1:
return output[0]
elif len(output) > 1:
print('Problem with find_tile returning more than 1 path')
return output[0]
else:
print('Problem with find_tile: Unable to find raster path')
def add_id_range_data_to_grid(iso3, tile_lookup, side_length):
"""
Query the Digital Elevation Model to get an estimated interdecile
range for each grid square.
"""
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'grid')
filename = 'grid_final.shp'
path_output = os.path.join(directory, filename)
if os.path.exists(path_output):
return gpd.read_file(path_output, crs='epsg:4328')
filename = 'grid_{}_{}_km.shp'.format(side_length, side_length)
path = os.path.join(directory, filename)
grid = gpd.read_file(path, crs='epsg:4328')
output = []
for idx, grid_tile in grid.iterrows():
path_input = find_tile(
grid_tile['geometry'].bounds,
tile_lookup
)
stats = next(gen_zonal_stats(
grid_tile['geometry'],
path_input,
add_stats={
'interdecile_range': interdecile_range
},
nodata=0
))
id_range_m = stats['interdecile_range']
output.append({
'type': 'Feature',
'geometry': grid_tile['geometry'],
'properties': {
'id_range_m': id_range_m,
'area_km2': grid_tile['area_km2'],
# 'pop_density_km2': grid_tile['pop_densit'],
# 'population': grid_tile['population'],
}
})
output = gpd.GeoDataFrame.from_features(output, crs='epsg:4326')
output = output.replace([np.inf, -np.inf], np.nan)
output = output[output.geometry.notnull()]
output.to_file(path_output, crs="epsg:4326")
return output
def interdecile_range(x):
"""
Get range between bottom 10% and top 10% of values.
This is from the Longley-Rice Irregular Terrain Model.
Code here: https://github.com/edwardoughton/itmlogic
Paper here: https://joss.theoj.org/papers/10.21105/joss.02266.pdf
Parameters
----------
x : list
Terrain profile values.
Returns
-------
interdecile_range : int
The terrain irregularity parameter.
"""
q90, q10 = np.percentile(x, [90, 10])
interdecile_range = int(round(q90 - q10, 0))
return interdecile_range
def estimate_terrain_deciles(grid):
"""
"""
# terrain_lookup = grid.loc[grid['area_km2'] > 1000].reset_index()
terrain_lookup = grid
terrain_lookup['decile'] = pd.qcut(terrain_lookup['id_range_m'], 10, labels=False)
terrain_lookup = terrain_lookup[['decile', 'id_range_m']]
terrain_lookup = terrain_lookup.groupby(['decile']).min()
terrain_lookup = terrain_lookup['id_range_m'].to_list()
return terrain_lookup
def select_grid_sampling_areas(iso3, grid, lut):
"""
"""
for i in range(1, 11):
if i == 1:
grid.loc[(grid['id_range_m'] < lut[1]), 'decile'] = str(i)
value_name = '0-{}'.format(str(lut[1]))
grid.loc[(grid['id_range_m'] < lut[1]), 'value'] = value_name
elif i <= 9:
grid.loc[(
grid['id_range_m'] >= lut[i-1]) &
(grid['id_range_m'] <= lut[i]), 'decile'] = str(i)
value_name = '{}-{}'.format(str(lut[i-1]), str(lut[i]))
grid.loc[(
grid['id_range_m'] >= lut[i-1]) &
(grid['id_range_m'] <= lut[i]), 'value'] = value_name
elif i == 10:
grid.loc[(grid['id_range_m'] > lut[i-1]), 'decile'] = str(i)
value_name = '>{}'.format(str(lut[i-1]))
grid.loc[(grid['id_range_m'] > lut[i-1]), 'value'] = value_name
else:
continue
np.random.seed(2)
grid = grid.loc[grid['area_km2'] > 2400].reset_index()
sampling_areas = grid.groupby(['decile']).apply(lambda x: x.sample(1)).reset_index(drop=True)
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'sampling_area')
if not os.path.exists(directory):
os.makedirs(directory)
sampling_areas.to_file(os.path.join(directory, 'sampling_areas.shp'))
sampling_areas.crs = 'epsg:4326'
return sampling_areas
def get_points(iso3, sampling_areas, tile_lookup, point_spacing):
"""
"""
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'sampling_points')
if not os.path.exists(directory):
os.makedirs(directory)
sampling_areas = sampling_areas.to_crs("epsg:3857")
for idx, sampling_area in sampling_areas.iterrows():
lon = sampling_area['geometry'].representative_point().coords[0][0]
lat = sampling_area['geometry'].representative_point().coords[0][1]
filename = "{}-{}".format(lon, lat)
xmin, ymin, xmax, ymax = sampling_area['geometry'].bounds
polygons = manually_create_grid(xmin, ymin, xmax, ymax, point_spacing, point_spacing)
#make geopandas dataframes
grid_sample = gpd.GeoDataFrame({'geometry': polygons}, crs="epsg:3857")
boundary = gpd.GeoDataFrame({'geometry': sampling_area['geometry']},
crs="epsg:3857", index=[0])
#only get points within the tile boundary
grid_sample = gpd.overlay(grid_sample, boundary, how='intersection')
grid_sample = grid_sample.to_crs("epsg:4326") #convert to lon lat
##get the highest points in each grid sample tile
sampling_points = find_points(iso3, grid_sample, tile_lookup, filename)#[:1]
##convert to projected for viewsheding
sampling_points = sampling_points.to_crs("epsg:4326")
path_output = os.path.join(directory, filename + '.shp')
sampling_points.to_file(path_output)
return sampling_points
def find_points(iso3, grid_sample, tile_lookup, filename):
"""
"""
filename_2 = filename + '.shp'
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'sampling_points')
path_output = os.path.join(directory, filename_2)
if os.path.exists(path_output):
return gpd.read_file(path_output, crs='epsg:4326')
output = []
for idx, grid_tile in grid_sample.iterrows():
min_x, min_y, max_x, max_y = grid_tile['geometry'].bounds
geom = Point(random.uniform(min_x, max_x), random.uniform(min_y, max_y))
output.append({
'type': 'Feature',
'geometry': geom,
'properties': {
}
})
output = gpd.GeoDataFrame.from_features(output, crs='epsg:4326')
return output
def generate_viewsheds(iso3, sampling_areas, sampling_points):
"""
"""
sampling_areas = sampling_areas.to_crs("epsg:3857")
#set output folder
folder_out_viewsheds = os.path.join(DATA_INTERMEDIATE, iso3, 'viewsheds')
if not os.path.exists(folder_out_viewsheds):
os.makedirs(folder_out_viewsheds)
for idx, sampling_area in tqdm(sampling_areas.iterrows(),
total=sampling_areas.shape[0]):
output = []
lon = sampling_area['geometry'].representative_point().coords[0][0]
lat = sampling_area['geometry'].representative_point().coords[0][1]
area_filename = "{}-{}".format(lon, lat)
print('--Working on {}'.format(area_filename))
##load sampling points
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'sampling_points')
points = gpd.read_file(os.path.join(directory, area_filename + '.shp'))#[:2]
##convert to lon lat to get correct raster tile
sampling_area_df = gpd.GeoDataFrame({'geometry': sampling_area['geometry']},
crs="epsg:3857", index=[0])
sampling_area_df = sampling_area_df.to_crs("epsg:4326")
for idx, item in sampling_area_df.iterrows():
#needs a loop because the data structure needs a series
path_input = find_tile(item['geometry'].bounds, tile_lookup)
for idx, point in tqdm(points.iterrows(), total=points.shape[0]):
results = []
lon = point['geometry'].representative_point().coords[0][0]
lat = point['geometry'].representative_point().coords[0][1]
filename2 = "{}-{}".format(lon, lat)
path_output = os.path.join(folder_out_viewsheds, filename2)
file_path = os.path.join(path_output, 'location', 'PERMANENT',
'viewsheds', filename2 + '.tif')
x = point['geometry'].coords[0][0]
y = point['geometry'].coords[0][1]
if not os.path.exists(file_path):
try:
viewshed((x, y), path_input, path_output, filename2, 45000, 'epsg:4326')
except:
print('--Viewshed already exists')
seen = set()
for idx, node in tqdm(points.iterrows(), total=points.shape[0]):
x2 = node['geometry'].coords[0][0]
y2 = node['geometry'].coords[0][1]
link = '{}_{}_{}_{}'.format(x, y, x2, y2)
if link in seen:
continue
dist = find_distance((x, y), (x2, y2))
if dist < 10:
continue
los = check_los(file_path, (x2, y2))
results.append({
'sampling_area': area_filename,
'point_id': filename2,
'node_id': '{}_{}'.format(x2, y2),
'distance': dist,
'id_range_m': sampling_area['id_range_m'],
'decile': sampling_area['decile'],
'los': los,
})
seen.add('{}_{}_{}_{}'.format(x, y, x2, y2))
seen.add('{}_{}_{}_{}'.format(x2, y2, x, y))
output = output + results
output = pd.DataFrame(output)
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'los_results')
if not os.path.exists(folder):
os.makedirs(folder)
output.to_csv(os.path.join(folder, area_filename + '.csv'), index=False)
def viewshed(point, path_input, path_output, tile_name, max_distance, crs):
"""
Perform a viewshed using GRASS.
Parameters
---------
point : tuple
The point being queried.
tile_lookup : dict
A lookup table containing raster tile boundary coordinates
as the keys, and the file paths as the values.
path_output : string
The directory path for the output folder.
tile_name : string
The name allocated to the viewshed tile.
max_distance : int
The maximum distance a path can be.
crs : string
The coordinate reference system in use.
Returns
-------
grid : dataframe
A geopandas dataframe containing the created grid.
"""
with Session(gisdb=path_output, location="location", create_opts=crs):
# print('parse command')
# print(gcore.parse_command("g.gisenv", flags="s"))#, set="DEBUG=3"
# print('r.external')
# now link a GDAL supported raster file to a binary raster map layer,
# from any GDAL supported raster map format, with an optional title.
# The file is not imported but just registered as GRASS raster map.
gcore.run_command('r.external', input=path_input, output=tile_name, overwrite=True)
# print('r.external.out')
#write out as geotiff
gcore.run_command('r.external.out', directory='viewsheds', format="GTiff")
# print('r.region')
#manage the settings of the current geographic region
gcore.run_command('g.region', raster=tile_name)
# print('r.viewshed')
#for each point in the output that is NULL: No LOS
gcore.run_command('r.viewshed', #flags='e',
input=tile_name,
output='{}.tif'.format(tile_name),
coordinate= [point[0], point[1]],
observer_elevation=30,
target_elevation=30,
memory=5000,
overwrite=True,
quiet=True,
max_distance=max_distance,
# verbose=True
)
def check_los(path_input, point):
"""
Find potential LOS high points.
Parameters
----------
path_input : string
File path for the digital elevation raster tile.
point : tuple
Coordinate point being queried.
Returns
-------
los : string
The Line of Sight (los) of the path queried.
"""
with rasterio.open(path_input) as src:
x = point[0]
y = point[1]
for val in src.sample([(x, y)]):
if np.isnan(val):
# print('is nan: {} therefore nlos'.format(val))
los = 'nlos'
return los
else:
# print('is not nan: {} therefore los'.format(val))
los ='clos'
return los
def find_distance(point1, point2):
"""
"""
point1 = Point(point1)
point1 = gpd.GeoDataFrame({'geometry': [point1]}, index=[0])
point1 = point1.set_crs('epsg:4326')
point1 = point1.to_crs('epsg:3857')
point2 = Point(point2)
point2 = gpd.GeoDataFrame({'geometry': [point2]}, index=[0])
point2 = point2.set_crs('epsg:4326')
point2 = point2.to_crs('epsg:3857')
dist = LineString([
(point1['geometry'][0].coords[0][0], point1['geometry'][0].coords[0][1]),
(point2['geometry'][0].coords[0][0], point2['geometry'][0].coords[0][1])
]).length
return dist
def collect_results(iso3, sampling_areas):
"""
"""
sampling_areas = sampling_areas.to_crs("epsg:3857")#[:1]
output = []
#set output folder
for idx, sampling_area in sampling_areas.iterrows():
lon = sampling_area['geometry'].representative_point().coords[0][0]
lat = sampling_area['geometry'].representative_point().coords[0][1]
filename = "{}-{}".format(lon, lat)
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'los_results')
data = pd.read_csv(os.path.join(directory, filename + '.csv'))
seen = set()
interval_size = 2500
for distance_lower in range(0, 45000, interval_size):
distance_upper = distance_lower + interval_size
clos = 0
nlos = 0
for idx, item in data.iterrows():
path_id = '{}_{}_{}'.format(
item['point_id'],
item['node_id'],
item['distance']
)
if not path_id in seen:
if item['distance'] < distance_upper:
if item['los'] == 'clos':
clos += 1
elif item['los'] == 'nlos':
nlos += 1
else:
print('Did not recognize los')
seen.add(path_id)
if clos > 0:
clos_probability = (clos / (clos + nlos))
else:
clos_probability = 'no data'
if nlos > 0:
nlos_probability = (nlos / (clos + nlos))
else:
nlos_probability = 'no data'
output.append({
'decile': item['decile'],
'id_range_m': item['id_range_m'],
'distance_lower': distance_lower,
'distance_upper': distance_upper,
'total_samples': clos + nlos,
'clos_probability': clos_probability,
'nlos_probability': nlos_probability,
})
output = pd.DataFrame(output)
folder = os.path.join(DATA_INTERMEDIATE, iso3)
output.to_csv(os.path.join(folder, 'los_lookup.csv'), index=False)
if __name__ == "__main__":
countries = [
("PER", 5e4, 25e2),
("IDN", 5e4, 25e2),
]
for country in countries:
iso3 = country[0]
side_length = country[1]
point_spacing = country[2]
##Load the raster tile lookup
tile_lookup = load_raster_tile_lookup(iso3)
##Generate grids
generate_grid(iso3, side_length) #1e5
# ##Add interdecile range to grid
grid = add_id_range_data_to_grid(iso3, tile_lookup, side_length)
##Get the terrain deciles
terrain_values = estimate_terrain_deciles(grid)
##Get the grid tile samples
sampling_areas = select_grid_sampling_areas(iso3, grid, terrain_values)#[:1]
##Generate the terrain lookup
sampling_points = get_points(iso3, sampling_areas, tile_lookup, point_spacing)#[:1]
##Process viewsheds
generate_viewsheds(iso3, sampling_areas, sampling_points)
## Collect results
collect_results(iso3, sampling_areas)
|
find_tile
|
Parameters
----------
polygon : tuple
The bounds of the modeling region.
tile_lookup : dict
A lookup table containing raster tile boundary coordinates
as the keys, and the file paths as the values.
Return
------
output : list
Contains the file path to the correct raster tile. Note:
only the first element is returned and if there are more than
one paths, an error is returned.
|
"""
Extract CLOS / NLOS lookup.
Written by Ed Oughton.
March 2021
"""
import os
import configparser
import json
import math
import glob
import random
import numpy as np
import pandas as pd
import geopandas as gpd
import pyproj
from shapely.geometry import Point, Polygon, box, LineString
from shapely.ops import transform
import rasterio
# import networkx as nx
from rasterio.warp import calculate_default_transform, reproject, Resampling
from rasterio.mask import mask
from rasterstats import zonal_stats, gen_zonal_stats
from tqdm import tqdm
grass7bin = r'"C:\Program Files\GRASS GIS 7.8\grass78.bat"'
os.environ['GRASSBIN'] = grass7bin
os.environ['PATH'] += ';' + r"C:\Program Files\GRASS GIS 7.8\lib"
from grass_session import Session
from grass.script import core as gcore
CONFIG = configparser.ConfigParser()
CONFIG.read(os.path.join(os.path.dirname(__file__), "script_config.ini"))
BASE_PATH = CONFIG["file_locations"]["base_path"]
DATA_RAW = os.path.join(BASE_PATH, "raw")
DATA_INTERMEDIATE = os.path.join(BASE_PATH, "intermediate")
DATA_PROCESSED = os.path.join(BASE_PATH, "processed")
def load_raster_tile_lookup(iso3):
"""
Load in the preprocessed raster tile lookup.
Parameters
----------
iso3 : string
Country iso3 code.
Returns
-------
lookup : dict
A lookup table containing raster tile boundary coordinates
as the keys, and the file paths as the values.
"""
path = os.path.join(DATA_INTERMEDIATE, iso3, 'raster_lookup.csv')
data = pd.read_csv(path)
data = data.to_records('dicts')
lookup = {}
for item in data:
coords = (item['x1'], item['y1'], item['x2'], item['y2'])
lookup[coords] = item['path']
return lookup
def generate_grid(iso3, side_length):
"""
Generate a spatial grid for the chosen country.
"""
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'grid')
if not os.path.exists(directory):
os.makedirs(directory)
filename = 'grid_{}_{}_km.shp'.format(side_length, side_length)
path_output = os.path.join(directory, filename)
if os.path.exists(path_output):
return
filename = 'national_outline.shp'
path = os.path.join(DATA_INTERMEDIATE, iso3, filename)
country_outline = gpd.read_file(path, crs="epsg:4326")
country_outline.crs = "epsg:4326"
country_outline = country_outline.to_crs("epsg:3857")
xmin, ymin, xmax, ymax = country_outline.total_bounds
polygons = manually_create_grid(
xmin, ymin, xmax, ymax, side_length, side_length
)
grid = gpd.GeoDataFrame({'geometry': polygons}, crs="epsg:3857")#[:100]
intersection = gpd.overlay(grid, country_outline, how='intersection')
intersection.crs = "epsg:3857"
intersection['area_km2'] = intersection['geometry'].area / 1e6
intersection = intersection.to_crs("epsg:4326")
intersection.to_file(path_output, crs="epsg:4326")
return intersection
def manually_create_grid(xmin, ymin, xmax, ymax, length, wide):
"""
"""
cols = list(range(int(np.floor(xmin)), int(np.ceil(xmax - int(wide))), int(wide)))
rows = list(range(int(np.floor(ymin)), int(np.ceil(ymax)), int(length)))
polygons = []
for x in cols:
for y in rows:
polygons.append(
Polygon([(x, y), (x+wide, y), (x+wide, y-length), (x, y-length)])
)
return polygons
# MASKED: find_tile function (lines 132-168)
def add_id_range_data_to_grid(iso3, tile_lookup, side_length):
"""
Query the Digital Elevation Model to get an estimated interdecile
range for each grid square.
"""
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'grid')
filename = 'grid_final.shp'
path_output = os.path.join(directory, filename)
if os.path.exists(path_output):
return gpd.read_file(path_output, crs='epsg:4328')
filename = 'grid_{}_{}_km.shp'.format(side_length, side_length)
path = os.path.join(directory, filename)
grid = gpd.read_file(path, crs='epsg:4328')
output = []
for idx, grid_tile in grid.iterrows():
path_input = find_tile(
grid_tile['geometry'].bounds,
tile_lookup
)
stats = next(gen_zonal_stats(
grid_tile['geometry'],
path_input,
add_stats={
'interdecile_range': interdecile_range
},
nodata=0
))
id_range_m = stats['interdecile_range']
output.append({
'type': 'Feature',
'geometry': grid_tile['geometry'],
'properties': {
'id_range_m': id_range_m,
'area_km2': grid_tile['area_km2'],
# 'pop_density_km2': grid_tile['pop_densit'],
# 'population': grid_tile['population'],
}
})
output = gpd.GeoDataFrame.from_features(output, crs='epsg:4326')
output = output.replace([np.inf, -np.inf], np.nan)
output = output[output.geometry.notnull()]
output.to_file(path_output, crs="epsg:4326")
return output
def interdecile_range(x):
"""
Get range between bottom 10% and top 10% of values.
This is from the Longley-Rice Irregular Terrain Model.
Code here: https://github.com/edwardoughton/itmlogic
Paper here: https://joss.theoj.org/papers/10.21105/joss.02266.pdf
Parameters
----------
x : list
Terrain profile values.
Returns
-------
interdecile_range : int
The terrain irregularity parameter.
"""
q90, q10 = np.percentile(x, [90, 10])
interdecile_range = int(round(q90 - q10, 0))
return interdecile_range
def estimate_terrain_deciles(grid):
"""
"""
# terrain_lookup = grid.loc[grid['area_km2'] > 1000].reset_index()
terrain_lookup = grid
terrain_lookup['decile'] = pd.qcut(terrain_lookup['id_range_m'], 10, labels=False)
terrain_lookup = terrain_lookup[['decile', 'id_range_m']]
terrain_lookup = terrain_lookup.groupby(['decile']).min()
terrain_lookup = terrain_lookup['id_range_m'].to_list()
return terrain_lookup
def select_grid_sampling_areas(iso3, grid, lut):
"""
"""
for i in range(1, 11):
if i == 1:
grid.loc[(grid['id_range_m'] < lut[1]), 'decile'] = str(i)
value_name = '0-{}'.format(str(lut[1]))
grid.loc[(grid['id_range_m'] < lut[1]), 'value'] = value_name
elif i <= 9:
grid.loc[(
grid['id_range_m'] >= lut[i-1]) &
(grid['id_range_m'] <= lut[i]), 'decile'] = str(i)
value_name = '{}-{}'.format(str(lut[i-1]), str(lut[i]))
grid.loc[(
grid['id_range_m'] >= lut[i-1]) &
(grid['id_range_m'] <= lut[i]), 'value'] = value_name
elif i == 10:
grid.loc[(grid['id_range_m'] > lut[i-1]), 'decile'] = str(i)
value_name = '>{}'.format(str(lut[i-1]))
grid.loc[(grid['id_range_m'] > lut[i-1]), 'value'] = value_name
else:
continue
np.random.seed(2)
grid = grid.loc[grid['area_km2'] > 2400].reset_index()
sampling_areas = grid.groupby(['decile']).apply(lambda x: x.sample(1)).reset_index(drop=True)
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'sampling_area')
if not os.path.exists(directory):
os.makedirs(directory)
sampling_areas.to_file(os.path.join(directory, 'sampling_areas.shp'))
sampling_areas.crs = 'epsg:4326'
return sampling_areas
def get_points(iso3, sampling_areas, tile_lookup, point_spacing):
"""
"""
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'sampling_points')
if not os.path.exists(directory):
os.makedirs(directory)
sampling_areas = sampling_areas.to_crs("epsg:3857")
for idx, sampling_area in sampling_areas.iterrows():
lon = sampling_area['geometry'].representative_point().coords[0][0]
lat = sampling_area['geometry'].representative_point().coords[0][1]
filename = "{}-{}".format(lon, lat)
xmin, ymin, xmax, ymax = sampling_area['geometry'].bounds
polygons = manually_create_grid(xmin, ymin, xmax, ymax, point_spacing, point_spacing)
#make geopandas dataframes
grid_sample = gpd.GeoDataFrame({'geometry': polygons}, crs="epsg:3857")
boundary = gpd.GeoDataFrame({'geometry': sampling_area['geometry']},
crs="epsg:3857", index=[0])
#only get points within the tile boundary
grid_sample = gpd.overlay(grid_sample, boundary, how='intersection')
grid_sample = grid_sample.to_crs("epsg:4326") #convert to lon lat
##get the highest points in each grid sample tile
sampling_points = find_points(iso3, grid_sample, tile_lookup, filename)#[:1]
##convert to projected for viewsheding
sampling_points = sampling_points.to_crs("epsg:4326")
path_output = os.path.join(directory, filename + '.shp')
sampling_points.to_file(path_output)
return sampling_points
def find_points(iso3, grid_sample, tile_lookup, filename):
"""
"""
filename_2 = filename + '.shp'
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'sampling_points')
path_output = os.path.join(directory, filename_2)
if os.path.exists(path_output):
return gpd.read_file(path_output, crs='epsg:4326')
output = []
for idx, grid_tile in grid_sample.iterrows():
min_x, min_y, max_x, max_y = grid_tile['geometry'].bounds
geom = Point(random.uniform(min_x, max_x), random.uniform(min_y, max_y))
output.append({
'type': 'Feature',
'geometry': geom,
'properties': {
}
})
output = gpd.GeoDataFrame.from_features(output, crs='epsg:4326')
return output
def generate_viewsheds(iso3, sampling_areas, sampling_points):
"""
"""
sampling_areas = sampling_areas.to_crs("epsg:3857")
#set output folder
folder_out_viewsheds = os.path.join(DATA_INTERMEDIATE, iso3, 'viewsheds')
if not os.path.exists(folder_out_viewsheds):
os.makedirs(folder_out_viewsheds)
for idx, sampling_area in tqdm(sampling_areas.iterrows(),
total=sampling_areas.shape[0]):
output = []
lon = sampling_area['geometry'].representative_point().coords[0][0]
lat = sampling_area['geometry'].representative_point().coords[0][1]
area_filename = "{}-{}".format(lon, lat)
print('--Working on {}'.format(area_filename))
##load sampling points
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'sampling_points')
points = gpd.read_file(os.path.join(directory, area_filename + '.shp'))#[:2]
##convert to lon lat to get correct raster tile
sampling_area_df = gpd.GeoDataFrame({'geometry': sampling_area['geometry']},
crs="epsg:3857", index=[0])
sampling_area_df = sampling_area_df.to_crs("epsg:4326")
for idx, item in sampling_area_df.iterrows():
#needs a loop because the data structure needs a series
path_input = find_tile(item['geometry'].bounds, tile_lookup)
for idx, point in tqdm(points.iterrows(), total=points.shape[0]):
results = []
lon = point['geometry'].representative_point().coords[0][0]
lat = point['geometry'].representative_point().coords[0][1]
filename2 = "{}-{}".format(lon, lat)
path_output = os.path.join(folder_out_viewsheds, filename2)
file_path = os.path.join(path_output, 'location', 'PERMANENT',
'viewsheds', filename2 + '.tif')
x = point['geometry'].coords[0][0]
y = point['geometry'].coords[0][1]
if not os.path.exists(file_path):
try:
viewshed((x, y), path_input, path_output, filename2, 45000, 'epsg:4326')
except:
print('--Viewshed already exists')
seen = set()
for idx, node in tqdm(points.iterrows(), total=points.shape[0]):
x2 = node['geometry'].coords[0][0]
y2 = node['geometry'].coords[0][1]
link = '{}_{}_{}_{}'.format(x, y, x2, y2)
if link in seen:
continue
dist = find_distance((x, y), (x2, y2))
if dist < 10:
continue
los = check_los(file_path, (x2, y2))
results.append({
'sampling_area': area_filename,
'point_id': filename2,
'node_id': '{}_{}'.format(x2, y2),
'distance': dist,
'id_range_m': sampling_area['id_range_m'],
'decile': sampling_area['decile'],
'los': los,
})
seen.add('{}_{}_{}_{}'.format(x, y, x2, y2))
seen.add('{}_{}_{}_{}'.format(x2, y2, x, y))
output = output + results
output = pd.DataFrame(output)
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'los_results')
if not os.path.exists(folder):
os.makedirs(folder)
output.to_csv(os.path.join(folder, area_filename + '.csv'), index=False)
def viewshed(point, path_input, path_output, tile_name, max_distance, crs):
"""
Perform a viewshed using GRASS.
Parameters
---------
point : tuple
The point being queried.
tile_lookup : dict
A lookup table containing raster tile boundary coordinates
as the keys, and the file paths as the values.
path_output : string
The directory path for the output folder.
tile_name : string
The name allocated to the viewshed tile.
max_distance : int
The maximum distance a path can be.
crs : string
The coordinate reference system in use.
Returns
-------
grid : dataframe
A geopandas dataframe containing the created grid.
"""
with Session(gisdb=path_output, location="location", create_opts=crs):
# print('parse command')
# print(gcore.parse_command("g.gisenv", flags="s"))#, set="DEBUG=3"
# print('r.external')
# now link a GDAL supported raster file to a binary raster map layer,
# from any GDAL supported raster map format, with an optional title.
# The file is not imported but just registered as GRASS raster map.
gcore.run_command('r.external', input=path_input, output=tile_name, overwrite=True)
# print('r.external.out')
#write out as geotiff
gcore.run_command('r.external.out', directory='viewsheds', format="GTiff")
# print('r.region')
#manage the settings of the current geographic region
gcore.run_command('g.region', raster=tile_name)
# print('r.viewshed')
#for each point in the output that is NULL: No LOS
gcore.run_command('r.viewshed', #flags='e',
input=tile_name,
output='{}.tif'.format(tile_name),
coordinate= [point[0], point[1]],
observer_elevation=30,
target_elevation=30,
memory=5000,
overwrite=True,
quiet=True,
max_distance=max_distance,
# verbose=True
)
def check_los(path_input, point):
"""
Find potential LOS high points.
Parameters
----------
path_input : string
File path for the digital elevation raster tile.
point : tuple
Coordinate point being queried.
Returns
-------
los : string
The Line of Sight (los) of the path queried.
"""
with rasterio.open(path_input) as src:
x = point[0]
y = point[1]
for val in src.sample([(x, y)]):
if np.isnan(val):
# print('is nan: {} therefore nlos'.format(val))
los = 'nlos'
return los
else:
# print('is not nan: {} therefore los'.format(val))
los ='clos'
return los
def find_distance(point1, point2):
"""
"""
point1 = Point(point1)
point1 = gpd.GeoDataFrame({'geometry': [point1]}, index=[0])
point1 = point1.set_crs('epsg:4326')
point1 = point1.to_crs('epsg:3857')
point2 = Point(point2)
point2 = gpd.GeoDataFrame({'geometry': [point2]}, index=[0])
point2 = point2.set_crs('epsg:4326')
point2 = point2.to_crs('epsg:3857')
dist = LineString([
(point1['geometry'][0].coords[0][0], point1['geometry'][0].coords[0][1]),
(point2['geometry'][0].coords[0][0], point2['geometry'][0].coords[0][1])
]).length
return dist
def collect_results(iso3, sampling_areas):
"""
"""
sampling_areas = sampling_areas.to_crs("epsg:3857")#[:1]
output = []
#set output folder
for idx, sampling_area in sampling_areas.iterrows():
lon = sampling_area['geometry'].representative_point().coords[0][0]
lat = sampling_area['geometry'].representative_point().coords[0][1]
filename = "{}-{}".format(lon, lat)
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'los_results')
data = pd.read_csv(os.path.join(directory, filename + '.csv'))
seen = set()
interval_size = 2500
for distance_lower in range(0, 45000, interval_size):
distance_upper = distance_lower + interval_size
clos = 0
nlos = 0
for idx, item in data.iterrows():
path_id = '{}_{}_{}'.format(
item['point_id'],
item['node_id'],
item['distance']
)
if not path_id in seen:
if item['distance'] < distance_upper:
if item['los'] == 'clos':
clos += 1
elif item['los'] == 'nlos':
nlos += 1
else:
print('Did not recognize los')
seen.add(path_id)
if clos > 0:
clos_probability = (clos / (clos + nlos))
else:
clos_probability = 'no data'
if nlos > 0:
nlos_probability = (nlos / (clos + nlos))
else:
nlos_probability = 'no data'
output.append({
'decile': item['decile'],
'id_range_m': item['id_range_m'],
'distance_lower': distance_lower,
'distance_upper': distance_upper,
'total_samples': clos + nlos,
'clos_probability': clos_probability,
'nlos_probability': nlos_probability,
})
output = pd.DataFrame(output)
folder = os.path.join(DATA_INTERMEDIATE, iso3)
output.to_csv(os.path.join(folder, 'los_lookup.csv'), index=False)
if __name__ == "__main__":
countries = [
("PER", 5e4, 25e2),
("IDN", 5e4, 25e2),
]
for country in countries:
iso3 = country[0]
side_length = country[1]
point_spacing = country[2]
##Load the raster tile lookup
tile_lookup = load_raster_tile_lookup(iso3)
##Generate grids
generate_grid(iso3, side_length) #1e5
# ##Add interdecile range to grid
grid = add_id_range_data_to_grid(iso3, tile_lookup, side_length)
##Get the terrain deciles
terrain_values = estimate_terrain_deciles(grid)
##Get the grid tile samples
sampling_areas = select_grid_sampling_areas(iso3, grid, terrain_values)#[:1]
##Generate the terrain lookup
sampling_points = get_points(iso3, sampling_areas, tile_lookup, point_spacing)#[:1]
##Process viewsheds
generate_viewsheds(iso3, sampling_areas, sampling_points)
## Collect results
collect_results(iso3, sampling_areas)
|
def find_tile(polygon, tile_lookup):
"""
Parameters
----------
polygon : tuple
The bounds of the modeling region.
tile_lookup : dict
A lookup table containing raster tile boundary coordinates
as the keys, and the file paths as the values.
Return
------
output : list
Contains the file path to the correct raster tile. Note:
only the first element is returned and if there are more than
one paths, an error is returned.
"""
output = []
poly_bbox = box(polygon[0], polygon[1], polygon[2], polygon[3])
for key, value in tile_lookup.items():
bbox = box(key[0], key[1], key[2], key[3])
if bbox.intersects(poly_bbox):
output.append(value)
if len(output) == 1:
return output[0]
elif len(output) > 1:
print('Problem with find_tile returning more than 1 path')
return output[0]
else:
print('Problem with find_tile: Unable to find raster path')
| 132 | 168 |
"""
Extract CLOS / NLOS lookup.
Written by Ed Oughton.
March 2021
"""
import os
import configparser
import json
import math
import glob
import random
import numpy as np
import pandas as pd
import geopandas as gpd
import pyproj
from shapely.geometry import Point, Polygon, box, LineString
from shapely.ops import transform
import rasterio
# import networkx as nx
from rasterio.warp import calculate_default_transform, reproject, Resampling
from rasterio.mask import mask
from rasterstats import zonal_stats, gen_zonal_stats
from tqdm import tqdm
grass7bin = r'"C:\Program Files\GRASS GIS 7.8\grass78.bat"'
os.environ['GRASSBIN'] = grass7bin
os.environ['PATH'] += ';' + r"C:\Program Files\GRASS GIS 7.8\lib"
from grass_session import Session
from grass.script import core as gcore
CONFIG = configparser.ConfigParser()
CONFIG.read(os.path.join(os.path.dirname(__file__), "script_config.ini"))
BASE_PATH = CONFIG["file_locations"]["base_path"]
DATA_RAW = os.path.join(BASE_PATH, "raw")
DATA_INTERMEDIATE = os.path.join(BASE_PATH, "intermediate")
DATA_PROCESSED = os.path.join(BASE_PATH, "processed")
def load_raster_tile_lookup(iso3):
"""
Load in the preprocessed raster tile lookup.
Parameters
----------
iso3 : string
Country iso3 code.
Returns
-------
lookup : dict
A lookup table containing raster tile boundary coordinates
as the keys, and the file paths as the values.
"""
path = os.path.join(DATA_INTERMEDIATE, iso3, 'raster_lookup.csv')
data = pd.read_csv(path)
data = data.to_records('dicts')
lookup = {}
for item in data:
coords = (item['x1'], item['y1'], item['x2'], item['y2'])
lookup[coords] = item['path']
return lookup
def generate_grid(iso3, side_length):
"""
Generate a spatial grid for the chosen country.
"""
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'grid')
if not os.path.exists(directory):
os.makedirs(directory)
filename = 'grid_{}_{}_km.shp'.format(side_length, side_length)
path_output = os.path.join(directory, filename)
if os.path.exists(path_output):
return
filename = 'national_outline.shp'
path = os.path.join(DATA_INTERMEDIATE, iso3, filename)
country_outline = gpd.read_file(path, crs="epsg:4326")
country_outline.crs = "epsg:4326"
country_outline = country_outline.to_crs("epsg:3857")
xmin, ymin, xmax, ymax = country_outline.total_bounds
polygons = manually_create_grid(
xmin, ymin, xmax, ymax, side_length, side_length
)
grid = gpd.GeoDataFrame({'geometry': polygons}, crs="epsg:3857")#[:100]
intersection = gpd.overlay(grid, country_outline, how='intersection')
intersection.crs = "epsg:3857"
intersection['area_km2'] = intersection['geometry'].area / 1e6
intersection = intersection.to_crs("epsg:4326")
intersection.to_file(path_output, crs="epsg:4326")
return intersection
def manually_create_grid(xmin, ymin, xmax, ymax, length, wide):
"""
"""
cols = list(range(int(np.floor(xmin)), int(np.ceil(xmax - int(wide))), int(wide)))
rows = list(range(int(np.floor(ymin)), int(np.ceil(ymax)), int(length)))
polygons = []
for x in cols:
for y in rows:
polygons.append(
Polygon([(x, y), (x+wide, y), (x+wide, y-length), (x, y-length)])
)
return polygons
def find_tile(polygon, tile_lookup):
"""
Parameters
----------
polygon : tuple
The bounds of the modeling region.
tile_lookup : dict
A lookup table containing raster tile boundary coordinates
as the keys, and the file paths as the values.
Return
------
output : list
Contains the file path to the correct raster tile. Note:
only the first element is returned and if there are more than
one paths, an error is returned.
"""
output = []
poly_bbox = box(polygon[0], polygon[1], polygon[2], polygon[3])
for key, value in tile_lookup.items():
bbox = box(key[0], key[1], key[2], key[3])
if bbox.intersects(poly_bbox):
output.append(value)
if len(output) == 1:
return output[0]
elif len(output) > 1:
print('Problem with find_tile returning more than 1 path')
return output[0]
else:
print('Problem with find_tile: Unable to find raster path')
def add_id_range_data_to_grid(iso3, tile_lookup, side_length):
"""
Query the Digital Elevation Model to get an estimated interdecile
range for each grid square.
"""
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'grid')
filename = 'grid_final.shp'
path_output = os.path.join(directory, filename)
if os.path.exists(path_output):
return gpd.read_file(path_output, crs='epsg:4328')
filename = 'grid_{}_{}_km.shp'.format(side_length, side_length)
path = os.path.join(directory, filename)
grid = gpd.read_file(path, crs='epsg:4328')
output = []
for idx, grid_tile in grid.iterrows():
path_input = find_tile(
grid_tile['geometry'].bounds,
tile_lookup
)
stats = next(gen_zonal_stats(
grid_tile['geometry'],
path_input,
add_stats={
'interdecile_range': interdecile_range
},
nodata=0
))
id_range_m = stats['interdecile_range']
output.append({
'type': 'Feature',
'geometry': grid_tile['geometry'],
'properties': {
'id_range_m': id_range_m,
'area_km2': grid_tile['area_km2'],
# 'pop_density_km2': grid_tile['pop_densit'],
# 'population': grid_tile['population'],
}
})
output = gpd.GeoDataFrame.from_features(output, crs='epsg:4326')
output = output.replace([np.inf, -np.inf], np.nan)
output = output[output.geometry.notnull()]
output.to_file(path_output, crs="epsg:4326")
return output
def interdecile_range(x):
"""
Get range between bottom 10% and top 10% of values.
This is from the Longley-Rice Irregular Terrain Model.
Code here: https://github.com/edwardoughton/itmlogic
Paper here: https://joss.theoj.org/papers/10.21105/joss.02266.pdf
Parameters
----------
x : list
Terrain profile values.
Returns
-------
interdecile_range : int
The terrain irregularity parameter.
"""
q90, q10 = np.percentile(x, [90, 10])
interdecile_range = int(round(q90 - q10, 0))
return interdecile_range
def estimate_terrain_deciles(grid):
"""
"""
# terrain_lookup = grid.loc[grid['area_km2'] > 1000].reset_index()
terrain_lookup = grid
terrain_lookup['decile'] = pd.qcut(terrain_lookup['id_range_m'], 10, labels=False)
terrain_lookup = terrain_lookup[['decile', 'id_range_m']]
terrain_lookup = terrain_lookup.groupby(['decile']).min()
terrain_lookup = terrain_lookup['id_range_m'].to_list()
return terrain_lookup
def select_grid_sampling_areas(iso3, grid, lut):
"""
"""
for i in range(1, 11):
if i == 1:
grid.loc[(grid['id_range_m'] < lut[1]), 'decile'] = str(i)
value_name = '0-{}'.format(str(lut[1]))
grid.loc[(grid['id_range_m'] < lut[1]), 'value'] = value_name
elif i <= 9:
grid.loc[(
grid['id_range_m'] >= lut[i-1]) &
(grid['id_range_m'] <= lut[i]), 'decile'] = str(i)
value_name = '{}-{}'.format(str(lut[i-1]), str(lut[i]))
grid.loc[(
grid['id_range_m'] >= lut[i-1]) &
(grid['id_range_m'] <= lut[i]), 'value'] = value_name
elif i == 10:
grid.loc[(grid['id_range_m'] > lut[i-1]), 'decile'] = str(i)
value_name = '>{}'.format(str(lut[i-1]))
grid.loc[(grid['id_range_m'] > lut[i-1]), 'value'] = value_name
else:
continue
np.random.seed(2)
grid = grid.loc[grid['area_km2'] > 2400].reset_index()
sampling_areas = grid.groupby(['decile']).apply(lambda x: x.sample(1)).reset_index(drop=True)
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'sampling_area')
if not os.path.exists(directory):
os.makedirs(directory)
sampling_areas.to_file(os.path.join(directory, 'sampling_areas.shp'))
sampling_areas.crs = 'epsg:4326'
return sampling_areas
def get_points(iso3, sampling_areas, tile_lookup, point_spacing):
"""
"""
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'sampling_points')
if not os.path.exists(directory):
os.makedirs(directory)
sampling_areas = sampling_areas.to_crs("epsg:3857")
for idx, sampling_area in sampling_areas.iterrows():
lon = sampling_area['geometry'].representative_point().coords[0][0]
lat = sampling_area['geometry'].representative_point().coords[0][1]
filename = "{}-{}".format(lon, lat)
xmin, ymin, xmax, ymax = sampling_area['geometry'].bounds
polygons = manually_create_grid(xmin, ymin, xmax, ymax, point_spacing, point_spacing)
#make geopandas dataframes
grid_sample = gpd.GeoDataFrame({'geometry': polygons}, crs="epsg:3857")
boundary = gpd.GeoDataFrame({'geometry': sampling_area['geometry']},
crs="epsg:3857", index=[0])
#only get points within the tile boundary
grid_sample = gpd.overlay(grid_sample, boundary, how='intersection')
grid_sample = grid_sample.to_crs("epsg:4326") #convert to lon lat
##get the highest points in each grid sample tile
sampling_points = find_points(iso3, grid_sample, tile_lookup, filename)#[:1]
##convert to projected for viewsheding
sampling_points = sampling_points.to_crs("epsg:4326")
path_output = os.path.join(directory, filename + '.shp')
sampling_points.to_file(path_output)
return sampling_points
def find_points(iso3, grid_sample, tile_lookup, filename):
"""
"""
filename_2 = filename + '.shp'
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'sampling_points')
path_output = os.path.join(directory, filename_2)
if os.path.exists(path_output):
return gpd.read_file(path_output, crs='epsg:4326')
output = []
for idx, grid_tile in grid_sample.iterrows():
min_x, min_y, max_x, max_y = grid_tile['geometry'].bounds
geom = Point(random.uniform(min_x, max_x), random.uniform(min_y, max_y))
output.append({
'type': 'Feature',
'geometry': geom,
'properties': {
}
})
output = gpd.GeoDataFrame.from_features(output, crs='epsg:4326')
return output
def generate_viewsheds(iso3, sampling_areas, sampling_points):
"""
"""
sampling_areas = sampling_areas.to_crs("epsg:3857")
#set output folder
folder_out_viewsheds = os.path.join(DATA_INTERMEDIATE, iso3, 'viewsheds')
if not os.path.exists(folder_out_viewsheds):
os.makedirs(folder_out_viewsheds)
for idx, sampling_area in tqdm(sampling_areas.iterrows(),
total=sampling_areas.shape[0]):
output = []
lon = sampling_area['geometry'].representative_point().coords[0][0]
lat = sampling_area['geometry'].representative_point().coords[0][1]
area_filename = "{}-{}".format(lon, lat)
print('--Working on {}'.format(area_filename))
##load sampling points
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'sampling_points')
points = gpd.read_file(os.path.join(directory, area_filename + '.shp'))#[:2]
##convert to lon lat to get correct raster tile
sampling_area_df = gpd.GeoDataFrame({'geometry': sampling_area['geometry']},
crs="epsg:3857", index=[0])
sampling_area_df = sampling_area_df.to_crs("epsg:4326")
for idx, item in sampling_area_df.iterrows():
#needs a loop because the data structure needs a series
path_input = find_tile(item['geometry'].bounds, tile_lookup)
for idx, point in tqdm(points.iterrows(), total=points.shape[0]):
results = []
lon = point['geometry'].representative_point().coords[0][0]
lat = point['geometry'].representative_point().coords[0][1]
filename2 = "{}-{}".format(lon, lat)
path_output = os.path.join(folder_out_viewsheds, filename2)
file_path = os.path.join(path_output, 'location', 'PERMANENT',
'viewsheds', filename2 + '.tif')
x = point['geometry'].coords[0][0]
y = point['geometry'].coords[0][1]
if not os.path.exists(file_path):
try:
viewshed((x, y), path_input, path_output, filename2, 45000, 'epsg:4326')
except:
print('--Viewshed already exists')
seen = set()
for idx, node in tqdm(points.iterrows(), total=points.shape[0]):
x2 = node['geometry'].coords[0][0]
y2 = node['geometry'].coords[0][1]
link = '{}_{}_{}_{}'.format(x, y, x2, y2)
if link in seen:
continue
dist = find_distance((x, y), (x2, y2))
if dist < 10:
continue
los = check_los(file_path, (x2, y2))
results.append({
'sampling_area': area_filename,
'point_id': filename2,
'node_id': '{}_{}'.format(x2, y2),
'distance': dist,
'id_range_m': sampling_area['id_range_m'],
'decile': sampling_area['decile'],
'los': los,
})
seen.add('{}_{}_{}_{}'.format(x, y, x2, y2))
seen.add('{}_{}_{}_{}'.format(x2, y2, x, y))
output = output + results
output = pd.DataFrame(output)
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'los_results')
if not os.path.exists(folder):
os.makedirs(folder)
output.to_csv(os.path.join(folder, area_filename + '.csv'), index=False)
def viewshed(point, path_input, path_output, tile_name, max_distance, crs):
"""
Perform a viewshed using GRASS.
Parameters
---------
point : tuple
The point being queried.
tile_lookup : dict
A lookup table containing raster tile boundary coordinates
as the keys, and the file paths as the values.
path_output : string
The directory path for the output folder.
tile_name : string
The name allocated to the viewshed tile.
max_distance : int
The maximum distance a path can be.
crs : string
The coordinate reference system in use.
Returns
-------
grid : dataframe
A geopandas dataframe containing the created grid.
"""
with Session(gisdb=path_output, location="location", create_opts=crs):
# print('parse command')
# print(gcore.parse_command("g.gisenv", flags="s"))#, set="DEBUG=3"
# print('r.external')
# now link a GDAL supported raster file to a binary raster map layer,
# from any GDAL supported raster map format, with an optional title.
# The file is not imported but just registered as GRASS raster map.
gcore.run_command('r.external', input=path_input, output=tile_name, overwrite=True)
# print('r.external.out')
#write out as geotiff
gcore.run_command('r.external.out', directory='viewsheds', format="GTiff")
# print('r.region')
#manage the settings of the current geographic region
gcore.run_command('g.region', raster=tile_name)
# print('r.viewshed')
#for each point in the output that is NULL: No LOS
gcore.run_command('r.viewshed', #flags='e',
input=tile_name,
output='{}.tif'.format(tile_name),
coordinate= [point[0], point[1]],
observer_elevation=30,
target_elevation=30,
memory=5000,
overwrite=True,
quiet=True,
max_distance=max_distance,
# verbose=True
)
def check_los(path_input, point):
"""
Find potential LOS high points.
Parameters
----------
path_input : string
File path for the digital elevation raster tile.
point : tuple
Coordinate point being queried.
Returns
-------
los : string
The Line of Sight (los) of the path queried.
"""
with rasterio.open(path_input) as src:
x = point[0]
y = point[1]
for val in src.sample([(x, y)]):
if np.isnan(val):
# print('is nan: {} therefore nlos'.format(val))
los = 'nlos'
return los
else:
# print('is not nan: {} therefore los'.format(val))
los ='clos'
return los
def find_distance(point1, point2):
"""
"""
point1 = Point(point1)
point1 = gpd.GeoDataFrame({'geometry': [point1]}, index=[0])
point1 = point1.set_crs('epsg:4326')
point1 = point1.to_crs('epsg:3857')
point2 = Point(point2)
point2 = gpd.GeoDataFrame({'geometry': [point2]}, index=[0])
point2 = point2.set_crs('epsg:4326')
point2 = point2.to_crs('epsg:3857')
dist = LineString([
(point1['geometry'][0].coords[0][0], point1['geometry'][0].coords[0][1]),
(point2['geometry'][0].coords[0][0], point2['geometry'][0].coords[0][1])
]).length
return dist
def collect_results(iso3, sampling_areas):
"""
"""
sampling_areas = sampling_areas.to_crs("epsg:3857")#[:1]
output = []
#set output folder
for idx, sampling_area in sampling_areas.iterrows():
lon = sampling_area['geometry'].representative_point().coords[0][0]
lat = sampling_area['geometry'].representative_point().coords[0][1]
filename = "{}-{}".format(lon, lat)
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'los_results')
data = pd.read_csv(os.path.join(directory, filename + '.csv'))
seen = set()
interval_size = 2500
for distance_lower in range(0, 45000, interval_size):
distance_upper = distance_lower + interval_size
clos = 0
nlos = 0
for idx, item in data.iterrows():
path_id = '{}_{}_{}'.format(
item['point_id'],
item['node_id'],
item['distance']
)
if not path_id in seen:
if item['distance'] < distance_upper:
if item['los'] == 'clos':
clos += 1
elif item['los'] == 'nlos':
nlos += 1
else:
print('Did not recognize los')
seen.add(path_id)
if clos > 0:
clos_probability = (clos / (clos + nlos))
else:
clos_probability = 'no data'
if nlos > 0:
nlos_probability = (nlos / (clos + nlos))
else:
nlos_probability = 'no data'
output.append({
'decile': item['decile'],
'id_range_m': item['id_range_m'],
'distance_lower': distance_lower,
'distance_upper': distance_upper,
'total_samples': clos + nlos,
'clos_probability': clos_probability,
'nlos_probability': nlos_probability,
})
output = pd.DataFrame(output)
folder = os.path.join(DATA_INTERMEDIATE, iso3)
output.to_csv(os.path.join(folder, 'los_lookup.csv'), index=False)
if __name__ == "__main__":
countries = [
("PER", 5e4, 25e2),
("IDN", 5e4, 25e2),
]
for country in countries:
iso3 = country[0]
side_length = country[1]
point_spacing = country[2]
##Load the raster tile lookup
tile_lookup = load_raster_tile_lookup(iso3)
##Generate grids
generate_grid(iso3, side_length) #1e5
# ##Add interdecile range to grid
grid = add_id_range_data_to_grid(iso3, tile_lookup, side_length)
##Get the terrain deciles
terrain_values = estimate_terrain_deciles(grid)
##Get the grid tile samples
sampling_areas = select_grid_sampling_areas(iso3, grid, terrain_values)#[:1]
##Generate the terrain lookup
sampling_points = get_points(iso3, sampling_areas, tile_lookup, point_spacing)#[:1]
##Process viewsheds
generate_viewsheds(iso3, sampling_areas, sampling_points)
## Collect results
collect_results(iso3, sampling_areas)
|
test_jlock_init_and_delete
|
Tests initialization and deleting a ``JLock``.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
|
# Copyright 2017 Square, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pylink.jlock as jlock
import mock
import errno
import os
import unittest
class TestJLock(unittest.TestCase):
"""Tests the ``jlock`` submodule."""
def setUp(self):
"""Called before each test.
Performs setup.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
assertRaisesRegexp = getattr(self, 'assertRaisesRegexp', None)
self.assertRaisesRegexp = getattr(self, 'assertRaisesRegex', assertRaisesRegexp)
def tearDown(self):
"""Called after each test.
Performs teardown.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
pass
# MASKED: test_jlock_init_and_delete function (lines 54-69)
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_exists(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire when the lock exists for an active process.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='%s\n' % pid).return_value,
]
mock_exists.side_effect = [True, True]
mock_util.pid_exists.return_value = True
mock_op.side_effect = [OSError(errno.EEXIST, '')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertFalse(lock.acquire())
self.assertFalse(lock.acquired)
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_with(pid)
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_os_error(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire the lock but generating an os-level error.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
serial_no = 0xdeadbeef
mock_exists.side_effect = [False, False]
mock_op.side_effect = [OSError(~errno.EEXIST, 'Message')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
with self.assertRaisesRegexp(OSError, 'Message'):
lock.acquire()
self.assertFalse(lock.acquired)
mock_open.assert_not_called()
mock_util.pid_exists.assert_not_called()
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_bad_file(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the current lockfile is invallid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
IOError()
]
mock_exists.return_value = True
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_not_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_invalid_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the pid in the lockfile is invalid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='dog\n').return_value,
]
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_called_once()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_old_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring when the PID in the lockfile does not exist.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='42\n').return_value,
]
mock_op.return_value = fd
mock_util.pid_exists.return_value = False
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_once_with(42)
mock_rm.assert_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.path.exists')
@mock.patch('os.close')
@mock.patch('os.remove')
def test_jlock_release_acquired(self, mock_remove, mock_close, mock_exists):
"""Tests releasing a held lock.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_remove (Mock): mock file removal method
mock_close (Mock): mocked close method
mock_exists (Mock): mocked path exist method
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
lock.acquired = True
lock.fd = 1
lock.path = os.sep
self.assertTrue(lock.release())
mock_exists.return_value = True
mock_remove.assert_called_once_with(os.sep)
mock_close.assert_called_once_with(1)
mock_exists.assert_called_once_with(os.sep)
self.assertEqual(False, lock.acquired)
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_release_not_held(self):
"""Tests calling release when lock not held.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
self.assertFalse(lock.release())
if __name__ == '__main__':
unittest.main()
|
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_init_and_delete(self):
"""Tests initialization and deleting a ``JLock``.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
serial_no = 0xdeadbeef
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
del lock
| 54 | 69 |
# Copyright 2017 Square, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pylink.jlock as jlock
import mock
import errno
import os
import unittest
class TestJLock(unittest.TestCase):
"""Tests the ``jlock`` submodule."""
def setUp(self):
"""Called before each test.
Performs setup.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
assertRaisesRegexp = getattr(self, 'assertRaisesRegexp', None)
self.assertRaisesRegexp = getattr(self, 'assertRaisesRegex', assertRaisesRegexp)
def tearDown(self):
"""Called after each test.
Performs teardown.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
pass
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_init_and_delete(self):
"""Tests initialization and deleting a ``JLock``.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
serial_no = 0xdeadbeef
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
del lock
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_exists(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire when the lock exists for an active process.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='%s\n' % pid).return_value,
]
mock_exists.side_effect = [True, True]
mock_util.pid_exists.return_value = True
mock_op.side_effect = [OSError(errno.EEXIST, '')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertFalse(lock.acquire())
self.assertFalse(lock.acquired)
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_with(pid)
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_os_error(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire the lock but generating an os-level error.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
serial_no = 0xdeadbeef
mock_exists.side_effect = [False, False]
mock_op.side_effect = [OSError(~errno.EEXIST, 'Message')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
with self.assertRaisesRegexp(OSError, 'Message'):
lock.acquire()
self.assertFalse(lock.acquired)
mock_open.assert_not_called()
mock_util.pid_exists.assert_not_called()
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_bad_file(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the current lockfile is invallid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
IOError()
]
mock_exists.return_value = True
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_not_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_invalid_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the pid in the lockfile is invalid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='dog\n').return_value,
]
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_called_once()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_old_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring when the PID in the lockfile does not exist.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='42\n').return_value,
]
mock_op.return_value = fd
mock_util.pid_exists.return_value = False
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_once_with(42)
mock_rm.assert_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.path.exists')
@mock.patch('os.close')
@mock.patch('os.remove')
def test_jlock_release_acquired(self, mock_remove, mock_close, mock_exists):
"""Tests releasing a held lock.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_remove (Mock): mock file removal method
mock_close (Mock): mocked close method
mock_exists (Mock): mocked path exist method
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
lock.acquired = True
lock.fd = 1
lock.path = os.sep
self.assertTrue(lock.release())
mock_exists.return_value = True
mock_remove.assert_called_once_with(os.sep)
mock_close.assert_called_once_with(1)
mock_exists.assert_called_once_with(os.sep)
self.assertEqual(False, lock.acquired)
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_release_not_held(self):
"""Tests calling release when lock not held.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
self.assertFalse(lock.release())
if __name__ == '__main__':
unittest.main()
|
test_jlock_acquire_exists
|
Tests trying to acquire when the lock exists for an active process.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
|
# Copyright 2017 Square, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pylink.jlock as jlock
import mock
import errno
import os
import unittest
class TestJLock(unittest.TestCase):
"""Tests the ``jlock`` submodule."""
def setUp(self):
"""Called before each test.
Performs setup.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
assertRaisesRegexp = getattr(self, 'assertRaisesRegexp', None)
self.assertRaisesRegexp = getattr(self, 'assertRaisesRegex', assertRaisesRegexp)
def tearDown(self):
"""Called after each test.
Performs teardown.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
pass
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_init_and_delete(self):
"""Tests initialization and deleting a ``JLock``.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
serial_no = 0xdeadbeef
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
del lock
# MASKED: test_jlock_acquire_exists function (lines 71-117)
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_os_error(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire the lock but generating an os-level error.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
serial_no = 0xdeadbeef
mock_exists.side_effect = [False, False]
mock_op.side_effect = [OSError(~errno.EEXIST, 'Message')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
with self.assertRaisesRegexp(OSError, 'Message'):
lock.acquire()
self.assertFalse(lock.acquired)
mock_open.assert_not_called()
mock_util.pid_exists.assert_not_called()
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_bad_file(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the current lockfile is invallid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
IOError()
]
mock_exists.return_value = True
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_not_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_invalid_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the pid in the lockfile is invalid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='dog\n').return_value,
]
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_called_once()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_old_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring when the PID in the lockfile does not exist.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='42\n').return_value,
]
mock_op.return_value = fd
mock_util.pid_exists.return_value = False
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_once_with(42)
mock_rm.assert_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.path.exists')
@mock.patch('os.close')
@mock.patch('os.remove')
def test_jlock_release_acquired(self, mock_remove, mock_close, mock_exists):
"""Tests releasing a held lock.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_remove (Mock): mock file removal method
mock_close (Mock): mocked close method
mock_exists (Mock): mocked path exist method
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
lock.acquired = True
lock.fd = 1
lock.path = os.sep
self.assertTrue(lock.release())
mock_exists.return_value = True
mock_remove.assert_called_once_with(os.sep)
mock_close.assert_called_once_with(1)
mock_exists.assert_called_once_with(os.sep)
self.assertEqual(False, lock.acquired)
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_release_not_held(self):
"""Tests calling release when lock not held.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
self.assertFalse(lock.release())
if __name__ == '__main__':
unittest.main()
|
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_exists(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire when the lock exists for an active process.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='%s\n' % pid).return_value,
]
mock_exists.side_effect = [True, True]
mock_util.pid_exists.return_value = True
mock_op.side_effect = [OSError(errno.EEXIST, '')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertFalse(lock.acquire())
self.assertFalse(lock.acquired)
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_with(pid)
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
| 71 | 117 |
# Copyright 2017 Square, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pylink.jlock as jlock
import mock
import errno
import os
import unittest
class TestJLock(unittest.TestCase):
"""Tests the ``jlock`` submodule."""
def setUp(self):
"""Called before each test.
Performs setup.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
assertRaisesRegexp = getattr(self, 'assertRaisesRegexp', None)
self.assertRaisesRegexp = getattr(self, 'assertRaisesRegex', assertRaisesRegexp)
def tearDown(self):
"""Called after each test.
Performs teardown.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
pass
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_init_and_delete(self):
"""Tests initialization and deleting a ``JLock``.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
serial_no = 0xdeadbeef
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
del lock
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_exists(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire when the lock exists for an active process.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='%s\n' % pid).return_value,
]
mock_exists.side_effect = [True, True]
mock_util.pid_exists.return_value = True
mock_op.side_effect = [OSError(errno.EEXIST, '')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertFalse(lock.acquire())
self.assertFalse(lock.acquired)
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_with(pid)
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_os_error(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire the lock but generating an os-level error.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
serial_no = 0xdeadbeef
mock_exists.side_effect = [False, False]
mock_op.side_effect = [OSError(~errno.EEXIST, 'Message')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
with self.assertRaisesRegexp(OSError, 'Message'):
lock.acquire()
self.assertFalse(lock.acquired)
mock_open.assert_not_called()
mock_util.pid_exists.assert_not_called()
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_bad_file(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the current lockfile is invallid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
IOError()
]
mock_exists.return_value = True
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_not_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_invalid_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the pid in the lockfile is invalid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='dog\n').return_value,
]
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_called_once()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_old_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring when the PID in the lockfile does not exist.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='42\n').return_value,
]
mock_op.return_value = fd
mock_util.pid_exists.return_value = False
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_once_with(42)
mock_rm.assert_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.path.exists')
@mock.patch('os.close')
@mock.patch('os.remove')
def test_jlock_release_acquired(self, mock_remove, mock_close, mock_exists):
"""Tests releasing a held lock.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_remove (Mock): mock file removal method
mock_close (Mock): mocked close method
mock_exists (Mock): mocked path exist method
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
lock.acquired = True
lock.fd = 1
lock.path = os.sep
self.assertTrue(lock.release())
mock_exists.return_value = True
mock_remove.assert_called_once_with(os.sep)
mock_close.assert_called_once_with(1)
mock_exists.assert_called_once_with(os.sep)
self.assertEqual(False, lock.acquired)
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_release_not_held(self):
"""Tests calling release when lock not held.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
self.assertFalse(lock.release())
if __name__ == '__main__':
unittest.main()
|
test_jlock_acquire_os_error
|
Tests trying to acquire the lock but generating an os-level error.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
|
# Copyright 2017 Square, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pylink.jlock as jlock
import mock
import errno
import os
import unittest
class TestJLock(unittest.TestCase):
"""Tests the ``jlock`` submodule."""
def setUp(self):
"""Called before each test.
Performs setup.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
assertRaisesRegexp = getattr(self, 'assertRaisesRegexp', None)
self.assertRaisesRegexp = getattr(self, 'assertRaisesRegex', assertRaisesRegexp)
def tearDown(self):
"""Called after each test.
Performs teardown.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
pass
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_init_and_delete(self):
"""Tests initialization and deleting a ``JLock``.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
serial_no = 0xdeadbeef
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
del lock
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_exists(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire when the lock exists for an active process.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='%s\n' % pid).return_value,
]
mock_exists.side_effect = [True, True]
mock_util.pid_exists.return_value = True
mock_op.side_effect = [OSError(errno.EEXIST, '')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertFalse(lock.acquire())
self.assertFalse(lock.acquired)
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_with(pid)
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
# MASKED: test_jlock_acquire_os_error function (lines 119-162)
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_bad_file(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the current lockfile is invallid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
IOError()
]
mock_exists.return_value = True
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_not_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_invalid_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the pid in the lockfile is invalid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='dog\n').return_value,
]
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_called_once()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_old_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring when the PID in the lockfile does not exist.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='42\n').return_value,
]
mock_op.return_value = fd
mock_util.pid_exists.return_value = False
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_once_with(42)
mock_rm.assert_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.path.exists')
@mock.patch('os.close')
@mock.patch('os.remove')
def test_jlock_release_acquired(self, mock_remove, mock_close, mock_exists):
"""Tests releasing a held lock.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_remove (Mock): mock file removal method
mock_close (Mock): mocked close method
mock_exists (Mock): mocked path exist method
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
lock.acquired = True
lock.fd = 1
lock.path = os.sep
self.assertTrue(lock.release())
mock_exists.return_value = True
mock_remove.assert_called_once_with(os.sep)
mock_close.assert_called_once_with(1)
mock_exists.assert_called_once_with(os.sep)
self.assertEqual(False, lock.acquired)
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_release_not_held(self):
"""Tests calling release when lock not held.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
self.assertFalse(lock.release())
if __name__ == '__main__':
unittest.main()
|
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_os_error(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire the lock but generating an os-level error.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
serial_no = 0xdeadbeef
mock_exists.side_effect = [False, False]
mock_op.side_effect = [OSError(~errno.EEXIST, 'Message')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
with self.assertRaisesRegexp(OSError, 'Message'):
lock.acquire()
self.assertFalse(lock.acquired)
mock_open.assert_not_called()
mock_util.pid_exists.assert_not_called()
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
| 119 | 162 |
# Copyright 2017 Square, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pylink.jlock as jlock
import mock
import errno
import os
import unittest
class TestJLock(unittest.TestCase):
"""Tests the ``jlock`` submodule."""
def setUp(self):
"""Called before each test.
Performs setup.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
assertRaisesRegexp = getattr(self, 'assertRaisesRegexp', None)
self.assertRaisesRegexp = getattr(self, 'assertRaisesRegex', assertRaisesRegexp)
def tearDown(self):
"""Called after each test.
Performs teardown.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
pass
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_init_and_delete(self):
"""Tests initialization and deleting a ``JLock``.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
serial_no = 0xdeadbeef
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
del lock
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_exists(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire when the lock exists for an active process.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='%s\n' % pid).return_value,
]
mock_exists.side_effect = [True, True]
mock_util.pid_exists.return_value = True
mock_op.side_effect = [OSError(errno.EEXIST, '')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertFalse(lock.acquire())
self.assertFalse(lock.acquired)
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_with(pid)
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_os_error(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire the lock but generating an os-level error.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
serial_no = 0xdeadbeef
mock_exists.side_effect = [False, False]
mock_op.side_effect = [OSError(~errno.EEXIST, 'Message')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
with self.assertRaisesRegexp(OSError, 'Message'):
lock.acquire()
self.assertFalse(lock.acquired)
mock_open.assert_not_called()
mock_util.pid_exists.assert_not_called()
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_bad_file(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the current lockfile is invallid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
IOError()
]
mock_exists.return_value = True
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_not_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_invalid_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the pid in the lockfile is invalid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='dog\n').return_value,
]
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_called_once()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_old_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring when the PID in the lockfile does not exist.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='42\n').return_value,
]
mock_op.return_value = fd
mock_util.pid_exists.return_value = False
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_once_with(42)
mock_rm.assert_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.path.exists')
@mock.patch('os.close')
@mock.patch('os.remove')
def test_jlock_release_acquired(self, mock_remove, mock_close, mock_exists):
"""Tests releasing a held lock.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_remove (Mock): mock file removal method
mock_close (Mock): mocked close method
mock_exists (Mock): mocked path exist method
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
lock.acquired = True
lock.fd = 1
lock.path = os.sep
self.assertTrue(lock.release())
mock_exists.return_value = True
mock_remove.assert_called_once_with(os.sep)
mock_close.assert_called_once_with(1)
mock_exists.assert_called_once_with(os.sep)
self.assertEqual(False, lock.acquired)
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_release_not_held(self):
"""Tests calling release when lock not held.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
self.assertFalse(lock.release())
if __name__ == '__main__':
unittest.main()
|
test_jlock_acquire_bad_file
|
Tests acquiring the lockfile when the current lockfile is invallid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
|
# Copyright 2017 Square, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pylink.jlock as jlock
import mock
import errno
import os
import unittest
class TestJLock(unittest.TestCase):
"""Tests the ``jlock`` submodule."""
def setUp(self):
"""Called before each test.
Performs setup.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
assertRaisesRegexp = getattr(self, 'assertRaisesRegexp', None)
self.assertRaisesRegexp = getattr(self, 'assertRaisesRegex', assertRaisesRegexp)
def tearDown(self):
"""Called after each test.
Performs teardown.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
pass
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_init_and_delete(self):
"""Tests initialization and deleting a ``JLock``.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
serial_no = 0xdeadbeef
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
del lock
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_exists(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire when the lock exists for an active process.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='%s\n' % pid).return_value,
]
mock_exists.side_effect = [True, True]
mock_util.pid_exists.return_value = True
mock_op.side_effect = [OSError(errno.EEXIST, '')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertFalse(lock.acquire())
self.assertFalse(lock.acquired)
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_with(pid)
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_os_error(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire the lock but generating an os-level error.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
serial_no = 0xdeadbeef
mock_exists.side_effect = [False, False]
mock_op.side_effect = [OSError(~errno.EEXIST, 'Message')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
with self.assertRaisesRegexp(OSError, 'Message'):
lock.acquire()
self.assertFalse(lock.acquired)
mock_open.assert_not_called()
mock_util.pid_exists.assert_not_called()
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
# MASKED: test_jlock_acquire_bad_file function (lines 164-211)
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_invalid_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the pid in the lockfile is invalid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='dog\n').return_value,
]
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_called_once()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_old_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring when the PID in the lockfile does not exist.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='42\n').return_value,
]
mock_op.return_value = fd
mock_util.pid_exists.return_value = False
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_once_with(42)
mock_rm.assert_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.path.exists')
@mock.patch('os.close')
@mock.patch('os.remove')
def test_jlock_release_acquired(self, mock_remove, mock_close, mock_exists):
"""Tests releasing a held lock.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_remove (Mock): mock file removal method
mock_close (Mock): mocked close method
mock_exists (Mock): mocked path exist method
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
lock.acquired = True
lock.fd = 1
lock.path = os.sep
self.assertTrue(lock.release())
mock_exists.return_value = True
mock_remove.assert_called_once_with(os.sep)
mock_close.assert_called_once_with(1)
mock_exists.assert_called_once_with(os.sep)
self.assertEqual(False, lock.acquired)
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_release_not_held(self):
"""Tests calling release when lock not held.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
self.assertFalse(lock.release())
if __name__ == '__main__':
unittest.main()
|
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_bad_file(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the current lockfile is invallid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
IOError()
]
mock_exists.return_value = True
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_not_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
| 164 | 211 |
# Copyright 2017 Square, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pylink.jlock as jlock
import mock
import errno
import os
import unittest
class TestJLock(unittest.TestCase):
"""Tests the ``jlock`` submodule."""
def setUp(self):
"""Called before each test.
Performs setup.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
assertRaisesRegexp = getattr(self, 'assertRaisesRegexp', None)
self.assertRaisesRegexp = getattr(self, 'assertRaisesRegex', assertRaisesRegexp)
def tearDown(self):
"""Called after each test.
Performs teardown.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
pass
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_init_and_delete(self):
"""Tests initialization and deleting a ``JLock``.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
serial_no = 0xdeadbeef
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
del lock
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_exists(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire when the lock exists for an active process.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='%s\n' % pid).return_value,
]
mock_exists.side_effect = [True, True]
mock_util.pid_exists.return_value = True
mock_op.side_effect = [OSError(errno.EEXIST, '')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertFalse(lock.acquire())
self.assertFalse(lock.acquired)
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_with(pid)
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_os_error(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire the lock but generating an os-level error.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
serial_no = 0xdeadbeef
mock_exists.side_effect = [False, False]
mock_op.side_effect = [OSError(~errno.EEXIST, 'Message')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
with self.assertRaisesRegexp(OSError, 'Message'):
lock.acquire()
self.assertFalse(lock.acquired)
mock_open.assert_not_called()
mock_util.pid_exists.assert_not_called()
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_bad_file(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the current lockfile is invallid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
IOError()
]
mock_exists.return_value = True
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_not_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_invalid_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the pid in the lockfile is invalid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='dog\n').return_value,
]
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_called_once()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_old_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring when the PID in the lockfile does not exist.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='42\n').return_value,
]
mock_op.return_value = fd
mock_util.pid_exists.return_value = False
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_once_with(42)
mock_rm.assert_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.path.exists')
@mock.patch('os.close')
@mock.patch('os.remove')
def test_jlock_release_acquired(self, mock_remove, mock_close, mock_exists):
"""Tests releasing a held lock.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_remove (Mock): mock file removal method
mock_close (Mock): mocked close method
mock_exists (Mock): mocked path exist method
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
lock.acquired = True
lock.fd = 1
lock.path = os.sep
self.assertTrue(lock.release())
mock_exists.return_value = True
mock_remove.assert_called_once_with(os.sep)
mock_close.assert_called_once_with(1)
mock_exists.assert_called_once_with(os.sep)
self.assertEqual(False, lock.acquired)
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_release_not_held(self):
"""Tests calling release when lock not held.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
self.assertFalse(lock.release())
if __name__ == '__main__':
unittest.main()
|
test_jlock_acquire_invalid_pid
|
Tests acquiring the lockfile when the pid in the lockfile is invalid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
|
# Copyright 2017 Square, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pylink.jlock as jlock
import mock
import errno
import os
import unittest
class TestJLock(unittest.TestCase):
"""Tests the ``jlock`` submodule."""
def setUp(self):
"""Called before each test.
Performs setup.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
assertRaisesRegexp = getattr(self, 'assertRaisesRegexp', None)
self.assertRaisesRegexp = getattr(self, 'assertRaisesRegex', assertRaisesRegexp)
def tearDown(self):
"""Called after each test.
Performs teardown.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
pass
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_init_and_delete(self):
"""Tests initialization and deleting a ``JLock``.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
serial_no = 0xdeadbeef
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
del lock
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_exists(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire when the lock exists for an active process.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='%s\n' % pid).return_value,
]
mock_exists.side_effect = [True, True]
mock_util.pid_exists.return_value = True
mock_op.side_effect = [OSError(errno.EEXIST, '')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertFalse(lock.acquire())
self.assertFalse(lock.acquired)
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_with(pid)
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_os_error(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire the lock but generating an os-level error.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
serial_no = 0xdeadbeef
mock_exists.side_effect = [False, False]
mock_op.side_effect = [OSError(~errno.EEXIST, 'Message')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
with self.assertRaisesRegexp(OSError, 'Message'):
lock.acquire()
self.assertFalse(lock.acquired)
mock_open.assert_not_called()
mock_util.pid_exists.assert_not_called()
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_bad_file(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the current lockfile is invallid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
IOError()
]
mock_exists.return_value = True
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_not_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
# MASKED: test_jlock_acquire_invalid_pid function (lines 213-258)
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_old_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring when the PID in the lockfile does not exist.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='42\n').return_value,
]
mock_op.return_value = fd
mock_util.pid_exists.return_value = False
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_once_with(42)
mock_rm.assert_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.path.exists')
@mock.patch('os.close')
@mock.patch('os.remove')
def test_jlock_release_acquired(self, mock_remove, mock_close, mock_exists):
"""Tests releasing a held lock.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_remove (Mock): mock file removal method
mock_close (Mock): mocked close method
mock_exists (Mock): mocked path exist method
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
lock.acquired = True
lock.fd = 1
lock.path = os.sep
self.assertTrue(lock.release())
mock_exists.return_value = True
mock_remove.assert_called_once_with(os.sep)
mock_close.assert_called_once_with(1)
mock_exists.assert_called_once_with(os.sep)
self.assertEqual(False, lock.acquired)
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_release_not_held(self):
"""Tests calling release when lock not held.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
self.assertFalse(lock.release())
if __name__ == '__main__':
unittest.main()
|
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_invalid_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the pid in the lockfile is invalid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='dog\n').return_value,
]
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_called_once()
mock_op.assert_called_once()
mock_wr.assert_called_once()
| 213 | 258 |
# Copyright 2017 Square, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pylink.jlock as jlock
import mock
import errno
import os
import unittest
class TestJLock(unittest.TestCase):
"""Tests the ``jlock`` submodule."""
def setUp(self):
"""Called before each test.
Performs setup.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
assertRaisesRegexp = getattr(self, 'assertRaisesRegexp', None)
self.assertRaisesRegexp = getattr(self, 'assertRaisesRegex', assertRaisesRegexp)
def tearDown(self):
"""Called after each test.
Performs teardown.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
pass
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_init_and_delete(self):
"""Tests initialization and deleting a ``JLock``.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
serial_no = 0xdeadbeef
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
del lock
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_exists(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire when the lock exists for an active process.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='%s\n' % pid).return_value,
]
mock_exists.side_effect = [True, True]
mock_util.pid_exists.return_value = True
mock_op.side_effect = [OSError(errno.EEXIST, '')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertFalse(lock.acquire())
self.assertFalse(lock.acquired)
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_with(pid)
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_os_error(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire the lock but generating an os-level error.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
serial_no = 0xdeadbeef
mock_exists.side_effect = [False, False]
mock_op.side_effect = [OSError(~errno.EEXIST, 'Message')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
with self.assertRaisesRegexp(OSError, 'Message'):
lock.acquire()
self.assertFalse(lock.acquired)
mock_open.assert_not_called()
mock_util.pid_exists.assert_not_called()
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_bad_file(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the current lockfile is invallid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
IOError()
]
mock_exists.return_value = True
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_not_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_invalid_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the pid in the lockfile is invalid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='dog\n').return_value,
]
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_called_once()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_old_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring when the PID in the lockfile does not exist.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='42\n').return_value,
]
mock_op.return_value = fd
mock_util.pid_exists.return_value = False
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_once_with(42)
mock_rm.assert_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.path.exists')
@mock.patch('os.close')
@mock.patch('os.remove')
def test_jlock_release_acquired(self, mock_remove, mock_close, mock_exists):
"""Tests releasing a held lock.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_remove (Mock): mock file removal method
mock_close (Mock): mocked close method
mock_exists (Mock): mocked path exist method
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
lock.acquired = True
lock.fd = 1
lock.path = os.sep
self.assertTrue(lock.release())
mock_exists.return_value = True
mock_remove.assert_called_once_with(os.sep)
mock_close.assert_called_once_with(1)
mock_exists.assert_called_once_with(os.sep)
self.assertEqual(False, lock.acquired)
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_release_not_held(self):
"""Tests calling release when lock not held.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
self.assertFalse(lock.release())
if __name__ == '__main__':
unittest.main()
|
test_jlock_acquire_old_pid
|
Tests acquiring when the PID in the lockfile does not exist.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
|
# Copyright 2017 Square, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pylink.jlock as jlock
import mock
import errno
import os
import unittest
class TestJLock(unittest.TestCase):
"""Tests the ``jlock`` submodule."""
def setUp(self):
"""Called before each test.
Performs setup.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
assertRaisesRegexp = getattr(self, 'assertRaisesRegexp', None)
self.assertRaisesRegexp = getattr(self, 'assertRaisesRegex', assertRaisesRegexp)
def tearDown(self):
"""Called after each test.
Performs teardown.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
pass
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_init_and_delete(self):
"""Tests initialization and deleting a ``JLock``.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
serial_no = 0xdeadbeef
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
del lock
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_exists(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire when the lock exists for an active process.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='%s\n' % pid).return_value,
]
mock_exists.side_effect = [True, True]
mock_util.pid_exists.return_value = True
mock_op.side_effect = [OSError(errno.EEXIST, '')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertFalse(lock.acquire())
self.assertFalse(lock.acquired)
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_with(pid)
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_os_error(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire the lock but generating an os-level error.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
serial_no = 0xdeadbeef
mock_exists.side_effect = [False, False]
mock_op.side_effect = [OSError(~errno.EEXIST, 'Message')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
with self.assertRaisesRegexp(OSError, 'Message'):
lock.acquire()
self.assertFalse(lock.acquired)
mock_open.assert_not_called()
mock_util.pid_exists.assert_not_called()
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_bad_file(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the current lockfile is invallid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
IOError()
]
mock_exists.return_value = True
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_not_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_invalid_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the pid in the lockfile is invalid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='dog\n').return_value,
]
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_called_once()
mock_op.assert_called_once()
mock_wr.assert_called_once()
# MASKED: test_jlock_acquire_old_pid function (lines 260-306)
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.path.exists')
@mock.patch('os.close')
@mock.patch('os.remove')
def test_jlock_release_acquired(self, mock_remove, mock_close, mock_exists):
"""Tests releasing a held lock.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_remove (Mock): mock file removal method
mock_close (Mock): mocked close method
mock_exists (Mock): mocked path exist method
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
lock.acquired = True
lock.fd = 1
lock.path = os.sep
self.assertTrue(lock.release())
mock_exists.return_value = True
mock_remove.assert_called_once_with(os.sep)
mock_close.assert_called_once_with(1)
mock_exists.assert_called_once_with(os.sep)
self.assertEqual(False, lock.acquired)
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_release_not_held(self):
"""Tests calling release when lock not held.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
self.assertFalse(lock.release())
if __name__ == '__main__':
unittest.main()
|
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_old_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring when the PID in the lockfile does not exist.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='42\n').return_value,
]
mock_op.return_value = fd
mock_util.pid_exists.return_value = False
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_once_with(42)
mock_rm.assert_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
| 260 | 306 |
# Copyright 2017 Square, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pylink.jlock as jlock
import mock
import errno
import os
import unittest
class TestJLock(unittest.TestCase):
"""Tests the ``jlock`` submodule."""
def setUp(self):
"""Called before each test.
Performs setup.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
assertRaisesRegexp = getattr(self, 'assertRaisesRegexp', None)
self.assertRaisesRegexp = getattr(self, 'assertRaisesRegex', assertRaisesRegexp)
def tearDown(self):
"""Called after each test.
Performs teardown.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
pass
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_init_and_delete(self):
"""Tests initialization and deleting a ``JLock``.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
serial_no = 0xdeadbeef
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
del lock
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_exists(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire when the lock exists for an active process.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='%s\n' % pid).return_value,
]
mock_exists.side_effect = [True, True]
mock_util.pid_exists.return_value = True
mock_op.side_effect = [OSError(errno.EEXIST, '')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertFalse(lock.acquire())
self.assertFalse(lock.acquired)
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_with(pid)
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_os_error(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire the lock but generating an os-level error.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
serial_no = 0xdeadbeef
mock_exists.side_effect = [False, False]
mock_op.side_effect = [OSError(~errno.EEXIST, 'Message')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
with self.assertRaisesRegexp(OSError, 'Message'):
lock.acquire()
self.assertFalse(lock.acquired)
mock_open.assert_not_called()
mock_util.pid_exists.assert_not_called()
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_bad_file(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the current lockfile is invallid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
IOError()
]
mock_exists.return_value = True
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_not_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_invalid_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the pid in the lockfile is invalid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='dog\n').return_value,
]
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_called_once()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_old_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring when the PID in the lockfile does not exist.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='42\n').return_value,
]
mock_op.return_value = fd
mock_util.pid_exists.return_value = False
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_once_with(42)
mock_rm.assert_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.path.exists')
@mock.patch('os.close')
@mock.patch('os.remove')
def test_jlock_release_acquired(self, mock_remove, mock_close, mock_exists):
"""Tests releasing a held lock.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_remove (Mock): mock file removal method
mock_close (Mock): mocked close method
mock_exists (Mock): mocked path exist method
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
lock.acquired = True
lock.fd = 1
lock.path = os.sep
self.assertTrue(lock.release())
mock_exists.return_value = True
mock_remove.assert_called_once_with(os.sep)
mock_close.assert_called_once_with(1)
mock_exists.assert_called_once_with(os.sep)
self.assertEqual(False, lock.acquired)
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_release_not_held(self):
"""Tests calling release when lock not held.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
self.assertFalse(lock.release())
if __name__ == '__main__':
unittest.main()
|
test_jlock_release_acquired
|
Tests releasing a held lock.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_remove (Mock): mock file removal method
mock_close (Mock): mocked close method
mock_exists (Mock): mocked path exist method
Returns:
``None``
|
# Copyright 2017 Square, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pylink.jlock as jlock
import mock
import errno
import os
import unittest
class TestJLock(unittest.TestCase):
"""Tests the ``jlock`` submodule."""
def setUp(self):
"""Called before each test.
Performs setup.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
assertRaisesRegexp = getattr(self, 'assertRaisesRegexp', None)
self.assertRaisesRegexp = getattr(self, 'assertRaisesRegex', assertRaisesRegexp)
def tearDown(self):
"""Called after each test.
Performs teardown.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
pass
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_init_and_delete(self):
"""Tests initialization and deleting a ``JLock``.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
serial_no = 0xdeadbeef
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
del lock
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_exists(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire when the lock exists for an active process.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='%s\n' % pid).return_value,
]
mock_exists.side_effect = [True, True]
mock_util.pid_exists.return_value = True
mock_op.side_effect = [OSError(errno.EEXIST, '')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertFalse(lock.acquire())
self.assertFalse(lock.acquired)
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_with(pid)
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_os_error(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire the lock but generating an os-level error.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
serial_no = 0xdeadbeef
mock_exists.side_effect = [False, False]
mock_op.side_effect = [OSError(~errno.EEXIST, 'Message')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
with self.assertRaisesRegexp(OSError, 'Message'):
lock.acquire()
self.assertFalse(lock.acquired)
mock_open.assert_not_called()
mock_util.pid_exists.assert_not_called()
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_bad_file(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the current lockfile is invallid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
IOError()
]
mock_exists.return_value = True
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_not_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_invalid_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the pid in the lockfile is invalid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='dog\n').return_value,
]
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_called_once()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_old_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring when the PID in the lockfile does not exist.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='42\n').return_value,
]
mock_op.return_value = fd
mock_util.pid_exists.return_value = False
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_once_with(42)
mock_rm.assert_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
# MASKED: test_jlock_release_acquired function (lines 308-336)
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_release_not_held(self):
"""Tests calling release when lock not held.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
self.assertFalse(lock.release())
if __name__ == '__main__':
unittest.main()
|
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.path.exists')
@mock.patch('os.close')
@mock.patch('os.remove')
def test_jlock_release_acquired(self, mock_remove, mock_close, mock_exists):
"""Tests releasing a held lock.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_remove (Mock): mock file removal method
mock_close (Mock): mocked close method
mock_exists (Mock): mocked path exist method
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
lock.acquired = True
lock.fd = 1
lock.path = os.sep
self.assertTrue(lock.release())
mock_exists.return_value = True
mock_remove.assert_called_once_with(os.sep)
mock_close.assert_called_once_with(1)
mock_exists.assert_called_once_with(os.sep)
self.assertEqual(False, lock.acquired)
| 308 | 336 |
# Copyright 2017 Square, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pylink.jlock as jlock
import mock
import errno
import os
import unittest
class TestJLock(unittest.TestCase):
"""Tests the ``jlock`` submodule."""
def setUp(self):
"""Called before each test.
Performs setup.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
assertRaisesRegexp = getattr(self, 'assertRaisesRegexp', None)
self.assertRaisesRegexp = getattr(self, 'assertRaisesRegex', assertRaisesRegexp)
def tearDown(self):
"""Called after each test.
Performs teardown.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
pass
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_init_and_delete(self):
"""Tests initialization and deleting a ``JLock``.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
serial_no = 0xdeadbeef
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
del lock
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_exists(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire when the lock exists for an active process.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='%s\n' % pid).return_value,
]
mock_exists.side_effect = [True, True]
mock_util.pid_exists.return_value = True
mock_op.side_effect = [OSError(errno.EEXIST, '')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertFalse(lock.acquire())
self.assertFalse(lock.acquired)
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_with(pid)
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_os_error(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests trying to acquire the lock but generating an os-level error.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
serial_no = 0xdeadbeef
mock_exists.side_effect = [False, False]
mock_op.side_effect = [OSError(~errno.EEXIST, 'Message')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
with self.assertRaisesRegexp(OSError, 'Message'):
lock.acquire()
self.assertFalse(lock.acquired)
mock_open.assert_not_called()
mock_util.pid_exists.assert_not_called()
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_bad_file(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the current lockfile is invallid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
pid = 42
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
IOError()
]
mock_exists.return_value = True
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_not_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_invalid_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring the lockfile when the pid in the lockfile is invalid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='dog\n').return_value,
]
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_called_once()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_old_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
"""Tests acquiring when the PID in the lockfile does not exist.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None``
"""
fd = 1
serial_no = 0xdeadbeef
mock_open.side_effect = [
mock.mock_open(read_data='42\n').return_value,
]
mock_op.return_value = fd
mock_util.pid_exists.return_value = False
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_once_with(42)
mock_rm.assert_called()
mock_op.assert_called_once()
mock_wr.assert_called_once()
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.path.exists')
@mock.patch('os.close')
@mock.patch('os.remove')
def test_jlock_release_acquired(self, mock_remove, mock_close, mock_exists):
"""Tests releasing a held lock.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_remove (Mock): mock file removal method
mock_close (Mock): mocked close method
mock_exists (Mock): mocked path exist method
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
lock.acquired = True
lock.fd = 1
lock.path = os.sep
self.assertTrue(lock.release())
mock_exists.return_value = True
mock_remove.assert_called_once_with(os.sep)
mock_close.assert_called_once_with(1)
mock_exists.assert_called_once_with(os.sep)
self.assertEqual(False, lock.acquired)
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_release_not_held(self):
"""Tests calling release when lock not held.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None``
"""
lock = jlock.JLock(0xdeadbeef)
self.assertFalse(lock.release())
if __name__ == '__main__':
unittest.main()
|
distance_p2p
|
Computes minimal distances of each point in points_src to points_tgt.
Args:
points_src (numpy array): source points
normals_src (numpy array): source normals
points_tgt (numpy array): target points
normals_tgt (numpy array): target normals
|
import logging
import numpy as np
import trimesh
from src.common import compute_iou
# from scipy.spatial import cKDTree
from src.utils.libkdtree import KDTree
from src.utils.libmesh import check_mesh_contains
# Maximum values for bounding box [-0.5, 0.5]^3
EMPTY_PCL_DICT = {
'completeness': np.sqrt(3),
'accuracy': np.sqrt(3),
'completeness2': 3,
'accuracy2': 3,
'chamfer': 6,
}
EMPTY_PCL_DICT_NORMALS = {
'normals completeness': -1.,
'normals accuracy': -1.,
'normals': -1.,
}
logger = logging.getLogger(__name__)
class MeshEvaluator(object):
""" Mesh evaluation class.
It handles the mesh evaluation process.
Args:
n_points (int): number of points to be used for evaluation
"""
def __init__(self, n_points=100000):
self.n_points = n_points
def eval_mesh(self,
mesh,
pointcloud_tgt,
normals_tgt,
points_iou,
occ_tgt,
remove_wall=False):
""" Evaluates a mesh.
Args:
mesh (trimesh): mesh which should be evaluated
pointcloud_tgt (numpy array): target point cloud
normals_tgt (numpy array): target normals
points_iou (numpy_array): points tensor for IoU evaluation
occ_tgt (numpy_array): GT occupancy values for IoU points
"""
if len(mesh.vertices) != 0 and len(mesh.faces) != 0:
if remove_wall: # ! Remove walls and floors
pointcloud, idx = mesh.sample(2 * self.n_points, return_index=True)
eps = 0.007
x_max, x_min = pointcloud_tgt[:, 0].max(), pointcloud_tgt[:, 0].min()
y_max, y_min = pointcloud_tgt[:, 1].max(), pointcloud_tgt[:, 1].min()
z_max, z_min = pointcloud_tgt[:, 2].max(), pointcloud_tgt[:, 2].min()
# add small offsets
x_max, x_min = x_max + eps, x_min - eps
y_max, y_min = y_max + eps, y_min - eps
z_max, z_min = z_max + eps, z_min - eps
mask_x = (pointcloud[:, 0] <= x_max) & (pointcloud[:, 0] >= x_min)
mask_y = (pointcloud[:, 1] >= y_min) # floor
mask_z = (pointcloud[:, 2] <= z_max) & (pointcloud[:, 2] >= z_min)
mask = mask_x & mask_y & mask_z
pointcloud_new = pointcloud[mask]
# Subsample
idx_new = np.random.randint(pointcloud_new.shape[0], size=self.n_points)
pointcloud = pointcloud_new[idx_new]
idx = idx[mask][idx_new]
else:
pointcloud, idx = mesh.sample(self.n_points, return_index=True)
pointcloud = pointcloud.astype(np.float32)
normals = mesh.face_normals[idx]
else:
pointcloud = np.empty((0, 3))
normals = np.empty((0, 3))
out_dict = self.eval_pointcloud(pointcloud, pointcloud_tgt, normals, normals_tgt)
if len(mesh.vertices) != 0 and len(mesh.faces) != 0:
occ = check_mesh_contains(mesh, points_iou)
if occ_tgt.min() < 0:
occ_tgt = (occ_tgt <= 0).astype(np.float32)
out_dict['iou'] = compute_iou(occ, occ_tgt)
else:
out_dict['iou'] = 0.
return out_dict
@staticmethod
def eval_pointcloud(pointcloud,
pointcloud_tgt,
normals=None,
normals_tgt=None,
thresholds=np.linspace(1. / 1000, 1, 1000)):
""" Evaluates a point cloud.
Args:
pointcloud (numpy array): predicted point cloud
pointcloud_tgt (numpy array): target point cloud
normals (numpy array): predicted normals
normals_tgt (numpy array): target normals
thresholds (numpy array): threshold values for the F-score calculation
"""
# Return maximum losses if pointcloud is empty
if pointcloud.shape[0] == 0:
logger.warning('Empty pointcloud / mesh detected!')
out_dict = EMPTY_PCL_DICT.copy()
if normals is not None and normals_tgt is not None:
out_dict.update(EMPTY_PCL_DICT_NORMALS)
return out_dict
pointcloud = np.asarray(pointcloud)
pointcloud_tgt = np.asarray(pointcloud_tgt)
# Completeness: how far are the points of the target point cloud from the predicted point cloud
completeness, completeness_normals = distance_p2p(pointcloud_tgt, normals_tgt, pointcloud, normals)
recall = get_threshold_percentage(completeness, thresholds)
completeness2 = completeness ** 2
completeness = completeness.mean()
completeness2 = completeness2.mean()
completeness_normals = completeness_normals.mean()
# Accuracy: how far are the points of the predicted pointcloud from the target pointcloud
accuracy, accuracy_normals = distance_p2p(pointcloud, normals, pointcloud_tgt, normals_tgt)
precision = get_threshold_percentage(accuracy, thresholds)
accuracy2 = accuracy ** 2
accuracy = accuracy.mean()
accuracy2 = accuracy2.mean()
accuracy_normals = accuracy_normals.mean()
# Chamfer distance
chamferL2 = 0.5 * (completeness2 + accuracy2)
normals_correctness = (0.5 * completeness_normals + 0.5 * accuracy_normals)
chamferL1 = 0.5 * (completeness + accuracy)
# F-Score
F = [2 * precision[i] * recall[i] / (precision[i] + recall[i]) for i in range(len(precision))]
out_dict = {
'completeness': completeness,
'accuracy': accuracy,
'normals completeness': completeness_normals,
'normals accuracy': accuracy_normals,
'normals': normals_correctness,
'completeness2': completeness2,
'accuracy2': accuracy2,
'chamfer-L2': chamferL2,
'chamfer-L1': chamferL1,
'f-score': F[9], # threshold = 1.0%
'f-score-15': F[14], # threshold = 1.5%
'f-score-20': F[19], # threshold = 2.0%
}
return out_dict
# MASKED: distance_p2p function (lines 173-194)
def distance_p2m(points, mesh):
""" Compute minimal distances of each point in points to mesh.
Args:
points (numpy array): points array
mesh (trimesh): mesh
"""
_, dist, _ = trimesh.proximity.closest_point(mesh, points)
return dist
def get_threshold_percentage(dist, thresholds):
""" Evaluates a point cloud.
Args:
dist (numpy array): calculated distance
thresholds (numpy array): threshold values for the F-score calculation
"""
in_threshold = [(dist <= t).mean() for t in thresholds]
return in_threshold
|
def distance_p2p(points_src, normals_src, points_tgt, normals_tgt):
""" Computes minimal distances of each point in points_src to points_tgt.
Args:
points_src (numpy array): source points
normals_src (numpy array): source normals
points_tgt (numpy array): target points
normals_tgt (numpy array): target normals
"""
kdtree = KDTree(points_tgt)
dist, idx = kdtree.query(points_src)
if normals_src is not None and normals_tgt is not None:
normals_src = normals_src / np.linalg.norm(normals_src, axis=-1, keepdims=True)
normals_tgt = normals_tgt / np.linalg.norm(normals_tgt, axis=-1, keepdims=True)
normals_dot_product = (normals_tgt[idx] * normals_src).sum(axis=-1)
# Handle normals that point into wrong direction gracefully (mostly due to method not caring about this in generation)
normals_dot_product = np.abs(normals_dot_product)
else:
normals_dot_product = np.array([np.nan] * points_src.shape[0], dtype=np.float32)
return dist, normals_dot_product
| 173 | 194 |
import logging
import numpy as np
import trimesh
from src.common import compute_iou
# from scipy.spatial import cKDTree
from src.utils.libkdtree import KDTree
from src.utils.libmesh import check_mesh_contains
# Maximum values for bounding box [-0.5, 0.5]^3
EMPTY_PCL_DICT = {
'completeness': np.sqrt(3),
'accuracy': np.sqrt(3),
'completeness2': 3,
'accuracy2': 3,
'chamfer': 6,
}
EMPTY_PCL_DICT_NORMALS = {
'normals completeness': -1.,
'normals accuracy': -1.,
'normals': -1.,
}
logger = logging.getLogger(__name__)
class MeshEvaluator(object):
""" Mesh evaluation class.
It handles the mesh evaluation process.
Args:
n_points (int): number of points to be used for evaluation
"""
def __init__(self, n_points=100000):
self.n_points = n_points
def eval_mesh(self,
mesh,
pointcloud_tgt,
normals_tgt,
points_iou,
occ_tgt,
remove_wall=False):
""" Evaluates a mesh.
Args:
mesh (trimesh): mesh which should be evaluated
pointcloud_tgt (numpy array): target point cloud
normals_tgt (numpy array): target normals
points_iou (numpy_array): points tensor for IoU evaluation
occ_tgt (numpy_array): GT occupancy values for IoU points
"""
if len(mesh.vertices) != 0 and len(mesh.faces) != 0:
if remove_wall: # ! Remove walls and floors
pointcloud, idx = mesh.sample(2 * self.n_points, return_index=True)
eps = 0.007
x_max, x_min = pointcloud_tgt[:, 0].max(), pointcloud_tgt[:, 0].min()
y_max, y_min = pointcloud_tgt[:, 1].max(), pointcloud_tgt[:, 1].min()
z_max, z_min = pointcloud_tgt[:, 2].max(), pointcloud_tgt[:, 2].min()
# add small offsets
x_max, x_min = x_max + eps, x_min - eps
y_max, y_min = y_max + eps, y_min - eps
z_max, z_min = z_max + eps, z_min - eps
mask_x = (pointcloud[:, 0] <= x_max) & (pointcloud[:, 0] >= x_min)
mask_y = (pointcloud[:, 1] >= y_min) # floor
mask_z = (pointcloud[:, 2] <= z_max) & (pointcloud[:, 2] >= z_min)
mask = mask_x & mask_y & mask_z
pointcloud_new = pointcloud[mask]
# Subsample
idx_new = np.random.randint(pointcloud_new.shape[0], size=self.n_points)
pointcloud = pointcloud_new[idx_new]
idx = idx[mask][idx_new]
else:
pointcloud, idx = mesh.sample(self.n_points, return_index=True)
pointcloud = pointcloud.astype(np.float32)
normals = mesh.face_normals[idx]
else:
pointcloud = np.empty((0, 3))
normals = np.empty((0, 3))
out_dict = self.eval_pointcloud(pointcloud, pointcloud_tgt, normals, normals_tgt)
if len(mesh.vertices) != 0 and len(mesh.faces) != 0:
occ = check_mesh_contains(mesh, points_iou)
if occ_tgt.min() < 0:
occ_tgt = (occ_tgt <= 0).astype(np.float32)
out_dict['iou'] = compute_iou(occ, occ_tgt)
else:
out_dict['iou'] = 0.
return out_dict
@staticmethod
def eval_pointcloud(pointcloud,
pointcloud_tgt,
normals=None,
normals_tgt=None,
thresholds=np.linspace(1. / 1000, 1, 1000)):
""" Evaluates a point cloud.
Args:
pointcloud (numpy array): predicted point cloud
pointcloud_tgt (numpy array): target point cloud
normals (numpy array): predicted normals
normals_tgt (numpy array): target normals
thresholds (numpy array): threshold values for the F-score calculation
"""
# Return maximum losses if pointcloud is empty
if pointcloud.shape[0] == 0:
logger.warning('Empty pointcloud / mesh detected!')
out_dict = EMPTY_PCL_DICT.copy()
if normals is not None and normals_tgt is not None:
out_dict.update(EMPTY_PCL_DICT_NORMALS)
return out_dict
pointcloud = np.asarray(pointcloud)
pointcloud_tgt = np.asarray(pointcloud_tgt)
# Completeness: how far are the points of the target point cloud from the predicted point cloud
completeness, completeness_normals = distance_p2p(pointcloud_tgt, normals_tgt, pointcloud, normals)
recall = get_threshold_percentage(completeness, thresholds)
completeness2 = completeness ** 2
completeness = completeness.mean()
completeness2 = completeness2.mean()
completeness_normals = completeness_normals.mean()
# Accuracy: how far are the points of the predicted pointcloud from the target pointcloud
accuracy, accuracy_normals = distance_p2p(pointcloud, normals, pointcloud_tgt, normals_tgt)
precision = get_threshold_percentage(accuracy, thresholds)
accuracy2 = accuracy ** 2
accuracy = accuracy.mean()
accuracy2 = accuracy2.mean()
accuracy_normals = accuracy_normals.mean()
# Chamfer distance
chamferL2 = 0.5 * (completeness2 + accuracy2)
normals_correctness = (0.5 * completeness_normals + 0.5 * accuracy_normals)
chamferL1 = 0.5 * (completeness + accuracy)
# F-Score
F = [2 * precision[i] * recall[i] / (precision[i] + recall[i]) for i in range(len(precision))]
out_dict = {
'completeness': completeness,
'accuracy': accuracy,
'normals completeness': completeness_normals,
'normals accuracy': accuracy_normals,
'normals': normals_correctness,
'completeness2': completeness2,
'accuracy2': accuracy2,
'chamfer-L2': chamferL2,
'chamfer-L1': chamferL1,
'f-score': F[9], # threshold = 1.0%
'f-score-15': F[14], # threshold = 1.5%
'f-score-20': F[19], # threshold = 2.0%
}
return out_dict
def distance_p2p(points_src, normals_src, points_tgt, normals_tgt):
""" Computes minimal distances of each point in points_src to points_tgt.
Args:
points_src (numpy array): source points
normals_src (numpy array): source normals
points_tgt (numpy array): target points
normals_tgt (numpy array): target normals
"""
kdtree = KDTree(points_tgt)
dist, idx = kdtree.query(points_src)
if normals_src is not None and normals_tgt is not None:
normals_src = normals_src / np.linalg.norm(normals_src, axis=-1, keepdims=True)
normals_tgt = normals_tgt / np.linalg.norm(normals_tgt, axis=-1, keepdims=True)
normals_dot_product = (normals_tgt[idx] * normals_src).sum(axis=-1)
# Handle normals that point into wrong direction gracefully (mostly due to method not caring about this in generation)
normals_dot_product = np.abs(normals_dot_product)
else:
normals_dot_product = np.array([np.nan] * points_src.shape[0], dtype=np.float32)
return dist, normals_dot_product
def distance_p2m(points, mesh):
""" Compute minimal distances of each point in points to mesh.
Args:
points (numpy array): points array
mesh (trimesh): mesh
"""
_, dist, _ = trimesh.proximity.closest_point(mesh, points)
return dist
def get_threshold_percentage(dist, thresholds):
""" Evaluates a point cloud.
Args:
dist (numpy array): calculated distance
thresholds (numpy array): threshold values for the F-score calculation
"""
in_threshold = [(dist <= t).mean() for t in thresholds]
return in_threshold
|
add_descriptor
|
Add a descriptor to this index.
Adding the same descriptor multiple times should not add multiple
copies of the descriptor in the index.
:param descriptor: Descriptor to index.
:type descriptor: smqtk.representation.DescriptorElement
:param no_cache: Do not cache the internal table if a file cache was
provided. This would be used if adding many descriptors at a time,
preventing a file write for every individual descriptor added.
:type no_cache: bool
|
import six
from smqtk.representation import DescriptorIndex, get_data_element_impls
from smqtk.utils import merge_dict, plugin, SimpleTimer
try:
from six.moves import cPickle as pickle
except ImportError:
import pickle
class MemoryDescriptorIndex (DescriptorIndex):
"""
In-memory descriptor index with file caching.
Stored descriptor elements are all held in memory in a uuid-to-element
dictionary (hash table).
If the path to a file cache is provided, it is loaded at construction if it
exists. When elements are added to the index, the in-memory table is dumped
to the cache.
"""
@classmethod
def is_usable(cls):
"""
Check whether this class is available for use.
:return: Boolean determination of whether this implementation is usable.
:rtype: bool
"""
# no dependencies
return True
@classmethod
def get_default_config(cls):
"""
Generate and return a default configuration dictionary for this class.
This will be primarily used for generating what the configuration
dictionary would look like for this class without instantiating it.
By default, we observe what this class's constructor takes as arguments,
turning those argument names into configuration dictionary keys. If any
of those arguments have defaults, we will add those values into the
configuration dictionary appropriately. The dictionary returned should
only contain JSON compliant value types.
It is not be guaranteed that the configuration dictionary returned
from this method is valid for construction of an instance of this class.
:return: Default configuration dictionary for the class.
:rtype: dict
"""
c = super(MemoryDescriptorIndex, cls).get_default_config()
c['cache_element'] = plugin.make_config(get_data_element_impls())
return c
@classmethod
def from_config(cls, config_dict, merge_default=True):
"""
Instantiate a new instance of this class given the configuration
JSON-compliant dictionary encapsulating initialization arguments.
:param config_dict: JSON compliant dictionary encapsulating
a configuration.
:type config_dict: dict
:param merge_default: Merge the given configuration on top of the
default provided by ``get_default_config``.
:type merge_default: bool
:return: Constructed instance from the provided config.
:rtype: MemoryDescriptorIndex
"""
if merge_default:
config_dict = merge_dict(cls.get_default_config(), config_dict)
# Optionally construct cache element from sub-config.
if config_dict['cache_element'] \
and config_dict['cache_element']['type']:
e = plugin.from_plugin_config(config_dict['cache_element'],
get_data_element_impls())
config_dict['cache_element'] = e
else:
config_dict['cache_element'] = None
return super(MemoryDescriptorIndex, cls).from_config(config_dict, False)
def __init__(self, cache_element=None, pickle_protocol=-1):
"""
Initialize a new in-memory descriptor index, or reload one from a
cache.
:param cache_element: Optional data element cache, loading an existing
index if the element has bytes. If the given element is writable,
new descriptors added to this index are cached to the element.
:type cache_element: None | smqtk.representation.DataElement
:param pickle_protocol: Pickling protocol to use when serializing index
table to the optionally provided, writable cache element. We will
use -1 by default (latest version, probably a binary form).
:type pickle_protocol: int
"""
super(MemoryDescriptorIndex, self).__init__()
# Mapping of descriptor UUID to the DescriptorElement instance.
#: :type: dict[collections.Hashable, smqtk.representation.DescriptorElement]
self._table = {}
# Record of optional file cache we're using
self.cache_element = cache_element
self.pickle_protocol = pickle_protocol
if cache_element and not cache_element.is_empty():
self._log.debug("Loading cached descriptor index table from %s "
"element.", cache_element.__class__.__name__)
self._table = pickle.loads(cache_element.get_bytes())
def get_config(self):
c = merge_dict(self.get_default_config(), {
"pickle_protocol": self.pickle_protocol,
})
if self.cache_element:
merge_dict(c['cache_element'],
plugin.to_plugin_config(self.cache_element))
return c
def cache_table(self):
if self.cache_element and self.cache_element.writable():
with SimpleTimer("Caching descriptor table", self._log.debug):
self.cache_element.set_bytes(pickle.dumps(self._table,
self.pickle_protocol))
def count(self):
return len(self._table)
def clear(self):
"""
Clear this descriptor index's entries.
"""
self._table = {}
self.cache_table()
def has_descriptor(self, uuid):
"""
Check if a DescriptorElement with the given UUID exists in this index.
:param uuid: UUID to query for
:type uuid: collections.Hashable
:return: True if a DescriptorElement with the given UUID exists in this
index, or False if not.
:rtype: bool
"""
return uuid in self._table
# MASKED: add_descriptor function (lines 161-179)
def add_many_descriptors(self, descriptors):
"""
Add multiple descriptors at one time.
:param descriptors: Iterable of descriptor instances to add to this
index.
:type descriptors:
collections.Iterable[smqtk.representation.DescriptorElement]
"""
added_something = False
for d in descriptors:
# using no-cache so we don't trigger multiple file writes
self.add_descriptor(d, no_cache=True)
added_something = True
if added_something:
self.cache_table()
def get_descriptor(self, uuid):
"""
Get the descriptor in this index that is associated with the given UUID.
:param uuid: UUID of the DescriptorElement to get.
:type uuid: collections.Hashable
:raises KeyError: The given UUID doesn't associate to a
DescriptorElement in this index.
:return: DescriptorElement associated with the queried UUID.
:rtype: smqtk.representation.DescriptorElement
"""
return self._table[uuid]
def get_many_descriptors(self, uuids):
"""
Get an iterator over descriptors associated to given descriptor UUIDs.
:param uuids: Iterable of descriptor UUIDs to query for.
:type uuids: collections.Iterable[collections.Hashable]
:raises KeyError: A given UUID doesn't associate with a
DescriptorElement in this index.
:return: Iterator of descriptors associated to given uuid values.
:rtype: __generator[smqtk.representation.DescriptorElement]
"""
for uid in uuids:
yield self._table[uid]
def remove_descriptor(self, uuid, no_cache=False):
"""
Remove a descriptor from this index by the given UUID.
:param uuid: UUID of the DescriptorElement to remove.
:type uuid: collections.Hashable
:raises KeyError: The given UUID doesn't associate to a
DescriptorElement in this index.
:param no_cache: Do not cache the internal table if a file cache was
provided. This would be used if adding many descriptors at a time,
preventing a file write for every individual descriptor added.
:type no_cache: bool
"""
del self._table[uuid]
if not no_cache:
self.cache_table()
def remove_many_descriptors(self, uuids):
"""
Remove descriptors associated to given descriptor UUIDs from this
index.
:param uuids: Iterable of descriptor UUIDs to remove.
:type uuids: collections.Iterable[collections.Hashable]
:raises KeyError: A given UUID doesn't associate with a
DescriptorElement in this index.
"""
for uid in uuids:
# using no-cache so we don't trigger multiple file writes
self.remove_descriptor(uid, no_cache=True)
self.cache_table()
def iterkeys(self):
return six.iterkeys(self._table)
def iterdescriptors(self):
return six.itervalues(self._table)
def iteritems(self):
return six.iteritems(self._table)
DESCRIPTOR_INDEX_CLASS = MemoryDescriptorIndex
|
def add_descriptor(self, descriptor, no_cache=False):
"""
Add a descriptor to this index.
Adding the same descriptor multiple times should not add multiple
copies of the descriptor in the index.
:param descriptor: Descriptor to index.
:type descriptor: smqtk.representation.DescriptorElement
:param no_cache: Do not cache the internal table if a file cache was
provided. This would be used if adding many descriptors at a time,
preventing a file write for every individual descriptor added.
:type no_cache: bool
"""
self._table[descriptor.uuid()] = descriptor
if not no_cache:
self.cache_table()
| 161 | 179 |
import six
from smqtk.representation import DescriptorIndex, get_data_element_impls
from smqtk.utils import merge_dict, plugin, SimpleTimer
try:
from six.moves import cPickle as pickle
except ImportError:
import pickle
class MemoryDescriptorIndex (DescriptorIndex):
"""
In-memory descriptor index with file caching.
Stored descriptor elements are all held in memory in a uuid-to-element
dictionary (hash table).
If the path to a file cache is provided, it is loaded at construction if it
exists. When elements are added to the index, the in-memory table is dumped
to the cache.
"""
@classmethod
def is_usable(cls):
"""
Check whether this class is available for use.
:return: Boolean determination of whether this implementation is usable.
:rtype: bool
"""
# no dependencies
return True
@classmethod
def get_default_config(cls):
"""
Generate and return a default configuration dictionary for this class.
This will be primarily used for generating what the configuration
dictionary would look like for this class without instantiating it.
By default, we observe what this class's constructor takes as arguments,
turning those argument names into configuration dictionary keys. If any
of those arguments have defaults, we will add those values into the
configuration dictionary appropriately. The dictionary returned should
only contain JSON compliant value types.
It is not be guaranteed that the configuration dictionary returned
from this method is valid for construction of an instance of this class.
:return: Default configuration dictionary for the class.
:rtype: dict
"""
c = super(MemoryDescriptorIndex, cls).get_default_config()
c['cache_element'] = plugin.make_config(get_data_element_impls())
return c
@classmethod
def from_config(cls, config_dict, merge_default=True):
"""
Instantiate a new instance of this class given the configuration
JSON-compliant dictionary encapsulating initialization arguments.
:param config_dict: JSON compliant dictionary encapsulating
a configuration.
:type config_dict: dict
:param merge_default: Merge the given configuration on top of the
default provided by ``get_default_config``.
:type merge_default: bool
:return: Constructed instance from the provided config.
:rtype: MemoryDescriptorIndex
"""
if merge_default:
config_dict = merge_dict(cls.get_default_config(), config_dict)
# Optionally construct cache element from sub-config.
if config_dict['cache_element'] \
and config_dict['cache_element']['type']:
e = plugin.from_plugin_config(config_dict['cache_element'],
get_data_element_impls())
config_dict['cache_element'] = e
else:
config_dict['cache_element'] = None
return super(MemoryDescriptorIndex, cls).from_config(config_dict, False)
def __init__(self, cache_element=None, pickle_protocol=-1):
"""
Initialize a new in-memory descriptor index, or reload one from a
cache.
:param cache_element: Optional data element cache, loading an existing
index if the element has bytes. If the given element is writable,
new descriptors added to this index are cached to the element.
:type cache_element: None | smqtk.representation.DataElement
:param pickle_protocol: Pickling protocol to use when serializing index
table to the optionally provided, writable cache element. We will
use -1 by default (latest version, probably a binary form).
:type pickle_protocol: int
"""
super(MemoryDescriptorIndex, self).__init__()
# Mapping of descriptor UUID to the DescriptorElement instance.
#: :type: dict[collections.Hashable, smqtk.representation.DescriptorElement]
self._table = {}
# Record of optional file cache we're using
self.cache_element = cache_element
self.pickle_protocol = pickle_protocol
if cache_element and not cache_element.is_empty():
self._log.debug("Loading cached descriptor index table from %s "
"element.", cache_element.__class__.__name__)
self._table = pickle.loads(cache_element.get_bytes())
def get_config(self):
c = merge_dict(self.get_default_config(), {
"pickle_protocol": self.pickle_protocol,
})
if self.cache_element:
merge_dict(c['cache_element'],
plugin.to_plugin_config(self.cache_element))
return c
def cache_table(self):
if self.cache_element and self.cache_element.writable():
with SimpleTimer("Caching descriptor table", self._log.debug):
self.cache_element.set_bytes(pickle.dumps(self._table,
self.pickle_protocol))
def count(self):
return len(self._table)
def clear(self):
"""
Clear this descriptor index's entries.
"""
self._table = {}
self.cache_table()
def has_descriptor(self, uuid):
"""
Check if a DescriptorElement with the given UUID exists in this index.
:param uuid: UUID to query for
:type uuid: collections.Hashable
:return: True if a DescriptorElement with the given UUID exists in this
index, or False if not.
:rtype: bool
"""
return uuid in self._table
def add_descriptor(self, descriptor, no_cache=False):
"""
Add a descriptor to this index.
Adding the same descriptor multiple times should not add multiple
copies of the descriptor in the index.
:param descriptor: Descriptor to index.
:type descriptor: smqtk.representation.DescriptorElement
:param no_cache: Do not cache the internal table if a file cache was
provided. This would be used if adding many descriptors at a time,
preventing a file write for every individual descriptor added.
:type no_cache: bool
"""
self._table[descriptor.uuid()] = descriptor
if not no_cache:
self.cache_table()
def add_many_descriptors(self, descriptors):
"""
Add multiple descriptors at one time.
:param descriptors: Iterable of descriptor instances to add to this
index.
:type descriptors:
collections.Iterable[smqtk.representation.DescriptorElement]
"""
added_something = False
for d in descriptors:
# using no-cache so we don't trigger multiple file writes
self.add_descriptor(d, no_cache=True)
added_something = True
if added_something:
self.cache_table()
def get_descriptor(self, uuid):
"""
Get the descriptor in this index that is associated with the given UUID.
:param uuid: UUID of the DescriptorElement to get.
:type uuid: collections.Hashable
:raises KeyError: The given UUID doesn't associate to a
DescriptorElement in this index.
:return: DescriptorElement associated with the queried UUID.
:rtype: smqtk.representation.DescriptorElement
"""
return self._table[uuid]
def get_many_descriptors(self, uuids):
"""
Get an iterator over descriptors associated to given descriptor UUIDs.
:param uuids: Iterable of descriptor UUIDs to query for.
:type uuids: collections.Iterable[collections.Hashable]
:raises KeyError: A given UUID doesn't associate with a
DescriptorElement in this index.
:return: Iterator of descriptors associated to given uuid values.
:rtype: __generator[smqtk.representation.DescriptorElement]
"""
for uid in uuids:
yield self._table[uid]
def remove_descriptor(self, uuid, no_cache=False):
"""
Remove a descriptor from this index by the given UUID.
:param uuid: UUID of the DescriptorElement to remove.
:type uuid: collections.Hashable
:raises KeyError: The given UUID doesn't associate to a
DescriptorElement in this index.
:param no_cache: Do not cache the internal table if a file cache was
provided. This would be used if adding many descriptors at a time,
preventing a file write for every individual descriptor added.
:type no_cache: bool
"""
del self._table[uuid]
if not no_cache:
self.cache_table()
def remove_many_descriptors(self, uuids):
"""
Remove descriptors associated to given descriptor UUIDs from this
index.
:param uuids: Iterable of descriptor UUIDs to remove.
:type uuids: collections.Iterable[collections.Hashable]
:raises KeyError: A given UUID doesn't associate with a
DescriptorElement in this index.
"""
for uid in uuids:
# using no-cache so we don't trigger multiple file writes
self.remove_descriptor(uid, no_cache=True)
self.cache_table()
def iterkeys(self):
return six.iterkeys(self._table)
def iterdescriptors(self):
return six.itervalues(self._table)
def iteritems(self):
return six.iteritems(self._table)
DESCRIPTOR_INDEX_CLASS = MemoryDescriptorIndex
|
add_many_descriptors
|
Add multiple descriptors at one time.
:param descriptors: Iterable of descriptor instances to add to this
index.
:type descriptors:
collections.Iterable[smqtk.representation.DescriptorElement]
|
import six
from smqtk.representation import DescriptorIndex, get_data_element_impls
from smqtk.utils import merge_dict, plugin, SimpleTimer
try:
from six.moves import cPickle as pickle
except ImportError:
import pickle
class MemoryDescriptorIndex (DescriptorIndex):
"""
In-memory descriptor index with file caching.
Stored descriptor elements are all held in memory in a uuid-to-element
dictionary (hash table).
If the path to a file cache is provided, it is loaded at construction if it
exists. When elements are added to the index, the in-memory table is dumped
to the cache.
"""
@classmethod
def is_usable(cls):
"""
Check whether this class is available for use.
:return: Boolean determination of whether this implementation is usable.
:rtype: bool
"""
# no dependencies
return True
@classmethod
def get_default_config(cls):
"""
Generate and return a default configuration dictionary for this class.
This will be primarily used for generating what the configuration
dictionary would look like for this class without instantiating it.
By default, we observe what this class's constructor takes as arguments,
turning those argument names into configuration dictionary keys. If any
of those arguments have defaults, we will add those values into the
configuration dictionary appropriately. The dictionary returned should
only contain JSON compliant value types.
It is not be guaranteed that the configuration dictionary returned
from this method is valid for construction of an instance of this class.
:return: Default configuration dictionary for the class.
:rtype: dict
"""
c = super(MemoryDescriptorIndex, cls).get_default_config()
c['cache_element'] = plugin.make_config(get_data_element_impls())
return c
@classmethod
def from_config(cls, config_dict, merge_default=True):
"""
Instantiate a new instance of this class given the configuration
JSON-compliant dictionary encapsulating initialization arguments.
:param config_dict: JSON compliant dictionary encapsulating
a configuration.
:type config_dict: dict
:param merge_default: Merge the given configuration on top of the
default provided by ``get_default_config``.
:type merge_default: bool
:return: Constructed instance from the provided config.
:rtype: MemoryDescriptorIndex
"""
if merge_default:
config_dict = merge_dict(cls.get_default_config(), config_dict)
# Optionally construct cache element from sub-config.
if config_dict['cache_element'] \
and config_dict['cache_element']['type']:
e = plugin.from_plugin_config(config_dict['cache_element'],
get_data_element_impls())
config_dict['cache_element'] = e
else:
config_dict['cache_element'] = None
return super(MemoryDescriptorIndex, cls).from_config(config_dict, False)
def __init__(self, cache_element=None, pickle_protocol=-1):
"""
Initialize a new in-memory descriptor index, or reload one from a
cache.
:param cache_element: Optional data element cache, loading an existing
index if the element has bytes. If the given element is writable,
new descriptors added to this index are cached to the element.
:type cache_element: None | smqtk.representation.DataElement
:param pickle_protocol: Pickling protocol to use when serializing index
table to the optionally provided, writable cache element. We will
use -1 by default (latest version, probably a binary form).
:type pickle_protocol: int
"""
super(MemoryDescriptorIndex, self).__init__()
# Mapping of descriptor UUID to the DescriptorElement instance.
#: :type: dict[collections.Hashable, smqtk.representation.DescriptorElement]
self._table = {}
# Record of optional file cache we're using
self.cache_element = cache_element
self.pickle_protocol = pickle_protocol
if cache_element and not cache_element.is_empty():
self._log.debug("Loading cached descriptor index table from %s "
"element.", cache_element.__class__.__name__)
self._table = pickle.loads(cache_element.get_bytes())
def get_config(self):
c = merge_dict(self.get_default_config(), {
"pickle_protocol": self.pickle_protocol,
})
if self.cache_element:
merge_dict(c['cache_element'],
plugin.to_plugin_config(self.cache_element))
return c
def cache_table(self):
if self.cache_element and self.cache_element.writable():
with SimpleTimer("Caching descriptor table", self._log.debug):
self.cache_element.set_bytes(pickle.dumps(self._table,
self.pickle_protocol))
def count(self):
return len(self._table)
def clear(self):
"""
Clear this descriptor index's entries.
"""
self._table = {}
self.cache_table()
def has_descriptor(self, uuid):
"""
Check if a DescriptorElement with the given UUID exists in this index.
:param uuid: UUID to query for
:type uuid: collections.Hashable
:return: True if a DescriptorElement with the given UUID exists in this
index, or False if not.
:rtype: bool
"""
return uuid in self._table
def add_descriptor(self, descriptor, no_cache=False):
"""
Add a descriptor to this index.
Adding the same descriptor multiple times should not add multiple
copies of the descriptor in the index.
:param descriptor: Descriptor to index.
:type descriptor: smqtk.representation.DescriptorElement
:param no_cache: Do not cache the internal table if a file cache was
provided. This would be used if adding many descriptors at a time,
preventing a file write for every individual descriptor added.
:type no_cache: bool
"""
self._table[descriptor.uuid()] = descriptor
if not no_cache:
self.cache_table()
# MASKED: add_many_descriptors function (lines 181-197)
def get_descriptor(self, uuid):
"""
Get the descriptor in this index that is associated with the given UUID.
:param uuid: UUID of the DescriptorElement to get.
:type uuid: collections.Hashable
:raises KeyError: The given UUID doesn't associate to a
DescriptorElement in this index.
:return: DescriptorElement associated with the queried UUID.
:rtype: smqtk.representation.DescriptorElement
"""
return self._table[uuid]
def get_many_descriptors(self, uuids):
"""
Get an iterator over descriptors associated to given descriptor UUIDs.
:param uuids: Iterable of descriptor UUIDs to query for.
:type uuids: collections.Iterable[collections.Hashable]
:raises KeyError: A given UUID doesn't associate with a
DescriptorElement in this index.
:return: Iterator of descriptors associated to given uuid values.
:rtype: __generator[smqtk.representation.DescriptorElement]
"""
for uid in uuids:
yield self._table[uid]
def remove_descriptor(self, uuid, no_cache=False):
"""
Remove a descriptor from this index by the given UUID.
:param uuid: UUID of the DescriptorElement to remove.
:type uuid: collections.Hashable
:raises KeyError: The given UUID doesn't associate to a
DescriptorElement in this index.
:param no_cache: Do not cache the internal table if a file cache was
provided. This would be used if adding many descriptors at a time,
preventing a file write for every individual descriptor added.
:type no_cache: bool
"""
del self._table[uuid]
if not no_cache:
self.cache_table()
def remove_many_descriptors(self, uuids):
"""
Remove descriptors associated to given descriptor UUIDs from this
index.
:param uuids: Iterable of descriptor UUIDs to remove.
:type uuids: collections.Iterable[collections.Hashable]
:raises KeyError: A given UUID doesn't associate with a
DescriptorElement in this index.
"""
for uid in uuids:
# using no-cache so we don't trigger multiple file writes
self.remove_descriptor(uid, no_cache=True)
self.cache_table()
def iterkeys(self):
return six.iterkeys(self._table)
def iterdescriptors(self):
return six.itervalues(self._table)
def iteritems(self):
return six.iteritems(self._table)
DESCRIPTOR_INDEX_CLASS = MemoryDescriptorIndex
|
def add_many_descriptors(self, descriptors):
"""
Add multiple descriptors at one time.
:param descriptors: Iterable of descriptor instances to add to this
index.
:type descriptors:
collections.Iterable[smqtk.representation.DescriptorElement]
"""
added_something = False
for d in descriptors:
# using no-cache so we don't trigger multiple file writes
self.add_descriptor(d, no_cache=True)
added_something = True
if added_something:
self.cache_table()
| 181 | 197 |
import six
from smqtk.representation import DescriptorIndex, get_data_element_impls
from smqtk.utils import merge_dict, plugin, SimpleTimer
try:
from six.moves import cPickle as pickle
except ImportError:
import pickle
class MemoryDescriptorIndex (DescriptorIndex):
"""
In-memory descriptor index with file caching.
Stored descriptor elements are all held in memory in a uuid-to-element
dictionary (hash table).
If the path to a file cache is provided, it is loaded at construction if it
exists. When elements are added to the index, the in-memory table is dumped
to the cache.
"""
@classmethod
def is_usable(cls):
"""
Check whether this class is available for use.
:return: Boolean determination of whether this implementation is usable.
:rtype: bool
"""
# no dependencies
return True
@classmethod
def get_default_config(cls):
"""
Generate and return a default configuration dictionary for this class.
This will be primarily used for generating what the configuration
dictionary would look like for this class without instantiating it.
By default, we observe what this class's constructor takes as arguments,
turning those argument names into configuration dictionary keys. If any
of those arguments have defaults, we will add those values into the
configuration dictionary appropriately. The dictionary returned should
only contain JSON compliant value types.
It is not be guaranteed that the configuration dictionary returned
from this method is valid for construction of an instance of this class.
:return: Default configuration dictionary for the class.
:rtype: dict
"""
c = super(MemoryDescriptorIndex, cls).get_default_config()
c['cache_element'] = plugin.make_config(get_data_element_impls())
return c
@classmethod
def from_config(cls, config_dict, merge_default=True):
"""
Instantiate a new instance of this class given the configuration
JSON-compliant dictionary encapsulating initialization arguments.
:param config_dict: JSON compliant dictionary encapsulating
a configuration.
:type config_dict: dict
:param merge_default: Merge the given configuration on top of the
default provided by ``get_default_config``.
:type merge_default: bool
:return: Constructed instance from the provided config.
:rtype: MemoryDescriptorIndex
"""
if merge_default:
config_dict = merge_dict(cls.get_default_config(), config_dict)
# Optionally construct cache element from sub-config.
if config_dict['cache_element'] \
and config_dict['cache_element']['type']:
e = plugin.from_plugin_config(config_dict['cache_element'],
get_data_element_impls())
config_dict['cache_element'] = e
else:
config_dict['cache_element'] = None
return super(MemoryDescriptorIndex, cls).from_config(config_dict, False)
def __init__(self, cache_element=None, pickle_protocol=-1):
"""
Initialize a new in-memory descriptor index, or reload one from a
cache.
:param cache_element: Optional data element cache, loading an existing
index if the element has bytes. If the given element is writable,
new descriptors added to this index are cached to the element.
:type cache_element: None | smqtk.representation.DataElement
:param pickle_protocol: Pickling protocol to use when serializing index
table to the optionally provided, writable cache element. We will
use -1 by default (latest version, probably a binary form).
:type pickle_protocol: int
"""
super(MemoryDescriptorIndex, self).__init__()
# Mapping of descriptor UUID to the DescriptorElement instance.
#: :type: dict[collections.Hashable, smqtk.representation.DescriptorElement]
self._table = {}
# Record of optional file cache we're using
self.cache_element = cache_element
self.pickle_protocol = pickle_protocol
if cache_element and not cache_element.is_empty():
self._log.debug("Loading cached descriptor index table from %s "
"element.", cache_element.__class__.__name__)
self._table = pickle.loads(cache_element.get_bytes())
def get_config(self):
c = merge_dict(self.get_default_config(), {
"pickle_protocol": self.pickle_protocol,
})
if self.cache_element:
merge_dict(c['cache_element'],
plugin.to_plugin_config(self.cache_element))
return c
def cache_table(self):
if self.cache_element and self.cache_element.writable():
with SimpleTimer("Caching descriptor table", self._log.debug):
self.cache_element.set_bytes(pickle.dumps(self._table,
self.pickle_protocol))
def count(self):
return len(self._table)
def clear(self):
"""
Clear this descriptor index's entries.
"""
self._table = {}
self.cache_table()
def has_descriptor(self, uuid):
"""
Check if a DescriptorElement with the given UUID exists in this index.
:param uuid: UUID to query for
:type uuid: collections.Hashable
:return: True if a DescriptorElement with the given UUID exists in this
index, or False if not.
:rtype: bool
"""
return uuid in self._table
def add_descriptor(self, descriptor, no_cache=False):
"""
Add a descriptor to this index.
Adding the same descriptor multiple times should not add multiple
copies of the descriptor in the index.
:param descriptor: Descriptor to index.
:type descriptor: smqtk.representation.DescriptorElement
:param no_cache: Do not cache the internal table if a file cache was
provided. This would be used if adding many descriptors at a time,
preventing a file write for every individual descriptor added.
:type no_cache: bool
"""
self._table[descriptor.uuid()] = descriptor
if not no_cache:
self.cache_table()
def add_many_descriptors(self, descriptors):
"""
Add multiple descriptors at one time.
:param descriptors: Iterable of descriptor instances to add to this
index.
:type descriptors:
collections.Iterable[smqtk.representation.DescriptorElement]
"""
added_something = False
for d in descriptors:
# using no-cache so we don't trigger multiple file writes
self.add_descriptor(d, no_cache=True)
added_something = True
if added_something:
self.cache_table()
def get_descriptor(self, uuid):
"""
Get the descriptor in this index that is associated with the given UUID.
:param uuid: UUID of the DescriptorElement to get.
:type uuid: collections.Hashable
:raises KeyError: The given UUID doesn't associate to a
DescriptorElement in this index.
:return: DescriptorElement associated with the queried UUID.
:rtype: smqtk.representation.DescriptorElement
"""
return self._table[uuid]
def get_many_descriptors(self, uuids):
"""
Get an iterator over descriptors associated to given descriptor UUIDs.
:param uuids: Iterable of descriptor UUIDs to query for.
:type uuids: collections.Iterable[collections.Hashable]
:raises KeyError: A given UUID doesn't associate with a
DescriptorElement in this index.
:return: Iterator of descriptors associated to given uuid values.
:rtype: __generator[smqtk.representation.DescriptorElement]
"""
for uid in uuids:
yield self._table[uid]
def remove_descriptor(self, uuid, no_cache=False):
"""
Remove a descriptor from this index by the given UUID.
:param uuid: UUID of the DescriptorElement to remove.
:type uuid: collections.Hashable
:raises KeyError: The given UUID doesn't associate to a
DescriptorElement in this index.
:param no_cache: Do not cache the internal table if a file cache was
provided. This would be used if adding many descriptors at a time,
preventing a file write for every individual descriptor added.
:type no_cache: bool
"""
del self._table[uuid]
if not no_cache:
self.cache_table()
def remove_many_descriptors(self, uuids):
"""
Remove descriptors associated to given descriptor UUIDs from this
index.
:param uuids: Iterable of descriptor UUIDs to remove.
:type uuids: collections.Iterable[collections.Hashable]
:raises KeyError: A given UUID doesn't associate with a
DescriptorElement in this index.
"""
for uid in uuids:
# using no-cache so we don't trigger multiple file writes
self.remove_descriptor(uid, no_cache=True)
self.cache_table()
def iterkeys(self):
return six.iterkeys(self._table)
def iterdescriptors(self):
return six.itervalues(self._table)
def iteritems(self):
return six.iteritems(self._table)
DESCRIPTOR_INDEX_CLASS = MemoryDescriptorIndex
|
__init__
|
Set up jailer fields.
This plays the role of a default constructor as it populates
the jailer's fields with some default values. Each field can be
further adjusted by each test even with None values.
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Define a class for creating the jailed context."""
import os
import shutil
from subprocess import run, PIPE
from retry.api import retry_call
from framework.defs import API_USOCKET_NAME, FC_BINARY_NAME, \
JAILER_DEFAULT_CHROOT
class JailerContext:
"""Represents jailer configuration and contains jailer helper functions.
Each microvm will have a jailer configuration associated with it.
"""
# Keep in sync with parameters from code base.
jailer_id = None
exec_file = None
numa_node = None
uid = None
gid = None
chroot_base = None
netns = None
daemonize = None
seccomp_level = None
# MASKED: __init__ function (lines 33-59)
def __del__(self):
"""Cleanup this jailer context."""
self.cleanup()
def construct_param_list(self):
"""Create the list of parameters we want the jailer to start with.
We want to be able to vary any parameter even the required ones as we
might want to add integration tests that validate the enforcement of
mandatory arguments.
"""
jailer_param_list = []
# Pretty please, try to keep the same order as in the code base.
if self.jailer_id is not None:
jailer_param_list.extend(['--id', str(self.jailer_id)])
if self.exec_file is not None:
jailer_param_list.extend(['--exec-file', str(self.exec_file)])
if self.numa_node is not None:
jailer_param_list.extend(['--node', str(self.numa_node)])
if self.uid is not None:
jailer_param_list.extend(['--uid', str(self.uid)])
if self.gid is not None:
jailer_param_list.extend(['--gid', str(self.gid)])
if self.chroot_base is not None:
jailer_param_list.extend(
['--chroot-base-dir', str(self.chroot_base)]
)
if self.netns is not None:
jailer_param_list.extend(['--netns', str(self.netns_file_path())])
if self.daemonize:
jailer_param_list.append('--daemonize')
if self.seccomp_level is not None:
jailer_param_list.extend(
['--seccomp-level', str(self.seccomp_level)]
)
return jailer_param_list
def chroot_base_with_id(self):
"""Return the MicroVM chroot base + MicroVM ID."""
return os.path.join(
self.chroot_base if self.chroot_base is not None
else JAILER_DEFAULT_CHROOT,
FC_BINARY_NAME,
self.jailer_id
)
def api_socket_path(self):
"""Return the MicroVM API socket path."""
return os.path.join(self.chroot_path(), API_USOCKET_NAME)
def chroot_path(self):
"""Return the MicroVM chroot path."""
return os.path.join(self.chroot_base_with_id(), 'root')
def jailed_path(self, file_path, create=False):
"""Create a hard link owned by uid:gid.
Create a hard link to the specified file, changes the owner to
uid:gid, and returns a path to the link which is valid within the jail.
"""
file_name = os.path.basename(file_path)
global_p = os.path.join(self.chroot_path(), file_name)
jailed_p = os.path.join("/", file_name)
if create:
cmd = 'ln -f {} {}'.format(file_path, global_p)
run(cmd, shell=True, check=True)
cmd = 'chown {}:{} {}'.format(self.uid, self.gid, global_p)
run(cmd, shell=True, check=True)
return jailed_p
def netns_file_path(self):
"""Get the host netns file path for a jailer context.
Returns the path on the host to the file which represents the netns,
and which must be passed to the jailer as the value of the --netns
parameter, when in use.
"""
if self.netns:
return '/var/run/netns/{}'.format(self.netns)
return None
def netns_cmd_prefix(self):
"""Return the jailer context netns file prefix."""
if self.netns:
return 'ip netns exec {} '.format(self.netns)
return ''
def setup(self):
"""Set up this jailer context."""
os.makedirs(
self.chroot_base if self.chroot_base is not None
else JAILER_DEFAULT_CHROOT,
exist_ok=True
)
if self.netns:
run('ip netns add {}'.format(self.netns), shell=True, check=True)
def cleanup(self):
"""Clean up this jailer context."""
shutil.rmtree(self.chroot_base_with_id(), ignore_errors=True)
if self.netns:
_ = run(
'ip netns del {}'.format(self.netns),
shell=True,
stderr=PIPE
)
# Remove the cgroup folders associated with this microvm.
# The base /sys/fs/cgroup/<controller>/firecracker folder will remain,
# because we can't remove it unless we're sure there's no other running
# microVM.
# Firecracker is interested in these 3 cgroups for the moment.
controllers = ('cpu', 'cpuset', 'pids')
for controller in controllers:
# Obtain the tasks from each cgroup and wait on them before
# removing the microvm's associated cgroup folder.
try:
retry_call(
f=self._kill_crgoup_tasks,
fargs=[controller],
exceptions=TimeoutError,
max_delay=5
)
except TimeoutError:
pass
# As the files inside a cgroup aren't real, they can't need
# to be removed, that is why 'rm -rf' and 'rmdir' fail.
# We only need to remove the cgroup directories. The "-depth"
# argument tells find to do a depth first recursion, so that
# we remove any sub cgroups first if they are there.
back_cmd = r'-depth -type d -exec rmdir {} \;'
cmd = 'find /sys/fs/cgroup/{}/{}/{} {}'.format(
controller,
FC_BINARY_NAME,
self.jailer_id,
back_cmd
)
# We do not need to know if it succeeded or not; afterall, we are
# trying to clean up resources created by the jailer itself not
# the testing system.
_ = run(cmd, shell=True, stderr=PIPE)
def _kill_crgoup_tasks(self, controller):
"""Simulate wait on pid.
Read the tasks file and stay there until /proc/{pid}
disappears. The retry function that calls this code makes
sure we do not timeout.
"""
tasks_file = '/sys/fs/cgroup/{}/{}/{}/tasks'.format(
controller,
FC_BINARY_NAME,
self.jailer_id
)
# If tests do not call start on machines, the cgroups will not be
# created.
if not os.path.exists(tasks_file):
return True
cmd = 'cat {}'.format(tasks_file)
tasks = run(cmd, shell=True, stdout=PIPE).stdout.decode('utf-8')
tasks_split = tasks.splitlines()
for task in tasks_split:
if os.path.exists("/proc/{}".format(task)):
raise TimeoutError
return True
|
def __init__(
self,
jailer_id,
exec_file,
numa_node=0,
uid=1234,
gid=1234,
chroot_base=JAILER_DEFAULT_CHROOT,
netns=None,
daemonize=True,
seccomp_level=2
):
"""Set up jailer fields.
This plays the role of a default constructor as it populates
the jailer's fields with some default values. Each field can be
further adjusted by each test even with None values.
"""
self.jailer_id = jailer_id
self.exec_file = exec_file
self.numa_node = numa_node
self.uid = uid
self.gid = gid
self.chroot_base = chroot_base
self.netns = netns if netns is not None else jailer_id
self.daemonize = daemonize
self.seccomp_level = seccomp_level
| 33 | 59 |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Define a class for creating the jailed context."""
import os
import shutil
from subprocess import run, PIPE
from retry.api import retry_call
from framework.defs import API_USOCKET_NAME, FC_BINARY_NAME, \
JAILER_DEFAULT_CHROOT
class JailerContext:
"""Represents jailer configuration and contains jailer helper functions.
Each microvm will have a jailer configuration associated with it.
"""
# Keep in sync with parameters from code base.
jailer_id = None
exec_file = None
numa_node = None
uid = None
gid = None
chroot_base = None
netns = None
daemonize = None
seccomp_level = None
def __init__(
self,
jailer_id,
exec_file,
numa_node=0,
uid=1234,
gid=1234,
chroot_base=JAILER_DEFAULT_CHROOT,
netns=None,
daemonize=True,
seccomp_level=2
):
"""Set up jailer fields.
This plays the role of a default constructor as it populates
the jailer's fields with some default values. Each field can be
further adjusted by each test even with None values.
"""
self.jailer_id = jailer_id
self.exec_file = exec_file
self.numa_node = numa_node
self.uid = uid
self.gid = gid
self.chroot_base = chroot_base
self.netns = netns if netns is not None else jailer_id
self.daemonize = daemonize
self.seccomp_level = seccomp_level
def __del__(self):
"""Cleanup this jailer context."""
self.cleanup()
def construct_param_list(self):
"""Create the list of parameters we want the jailer to start with.
We want to be able to vary any parameter even the required ones as we
might want to add integration tests that validate the enforcement of
mandatory arguments.
"""
jailer_param_list = []
# Pretty please, try to keep the same order as in the code base.
if self.jailer_id is not None:
jailer_param_list.extend(['--id', str(self.jailer_id)])
if self.exec_file is not None:
jailer_param_list.extend(['--exec-file', str(self.exec_file)])
if self.numa_node is not None:
jailer_param_list.extend(['--node', str(self.numa_node)])
if self.uid is not None:
jailer_param_list.extend(['--uid', str(self.uid)])
if self.gid is not None:
jailer_param_list.extend(['--gid', str(self.gid)])
if self.chroot_base is not None:
jailer_param_list.extend(
['--chroot-base-dir', str(self.chroot_base)]
)
if self.netns is not None:
jailer_param_list.extend(['--netns', str(self.netns_file_path())])
if self.daemonize:
jailer_param_list.append('--daemonize')
if self.seccomp_level is not None:
jailer_param_list.extend(
['--seccomp-level', str(self.seccomp_level)]
)
return jailer_param_list
def chroot_base_with_id(self):
"""Return the MicroVM chroot base + MicroVM ID."""
return os.path.join(
self.chroot_base if self.chroot_base is not None
else JAILER_DEFAULT_CHROOT,
FC_BINARY_NAME,
self.jailer_id
)
def api_socket_path(self):
"""Return the MicroVM API socket path."""
return os.path.join(self.chroot_path(), API_USOCKET_NAME)
def chroot_path(self):
"""Return the MicroVM chroot path."""
return os.path.join(self.chroot_base_with_id(), 'root')
def jailed_path(self, file_path, create=False):
"""Create a hard link owned by uid:gid.
Create a hard link to the specified file, changes the owner to
uid:gid, and returns a path to the link which is valid within the jail.
"""
file_name = os.path.basename(file_path)
global_p = os.path.join(self.chroot_path(), file_name)
jailed_p = os.path.join("/", file_name)
if create:
cmd = 'ln -f {} {}'.format(file_path, global_p)
run(cmd, shell=True, check=True)
cmd = 'chown {}:{} {}'.format(self.uid, self.gid, global_p)
run(cmd, shell=True, check=True)
return jailed_p
def netns_file_path(self):
"""Get the host netns file path for a jailer context.
Returns the path on the host to the file which represents the netns,
and which must be passed to the jailer as the value of the --netns
parameter, when in use.
"""
if self.netns:
return '/var/run/netns/{}'.format(self.netns)
return None
def netns_cmd_prefix(self):
"""Return the jailer context netns file prefix."""
if self.netns:
return 'ip netns exec {} '.format(self.netns)
return ''
def setup(self):
"""Set up this jailer context."""
os.makedirs(
self.chroot_base if self.chroot_base is not None
else JAILER_DEFAULT_CHROOT,
exist_ok=True
)
if self.netns:
run('ip netns add {}'.format(self.netns), shell=True, check=True)
def cleanup(self):
"""Clean up this jailer context."""
shutil.rmtree(self.chroot_base_with_id(), ignore_errors=True)
if self.netns:
_ = run(
'ip netns del {}'.format(self.netns),
shell=True,
stderr=PIPE
)
# Remove the cgroup folders associated with this microvm.
# The base /sys/fs/cgroup/<controller>/firecracker folder will remain,
# because we can't remove it unless we're sure there's no other running
# microVM.
# Firecracker is interested in these 3 cgroups for the moment.
controllers = ('cpu', 'cpuset', 'pids')
for controller in controllers:
# Obtain the tasks from each cgroup and wait on them before
# removing the microvm's associated cgroup folder.
try:
retry_call(
f=self._kill_crgoup_tasks,
fargs=[controller],
exceptions=TimeoutError,
max_delay=5
)
except TimeoutError:
pass
# As the files inside a cgroup aren't real, they can't need
# to be removed, that is why 'rm -rf' and 'rmdir' fail.
# We only need to remove the cgroup directories. The "-depth"
# argument tells find to do a depth first recursion, so that
# we remove any sub cgroups first if they are there.
back_cmd = r'-depth -type d -exec rmdir {} \;'
cmd = 'find /sys/fs/cgroup/{}/{}/{} {}'.format(
controller,
FC_BINARY_NAME,
self.jailer_id,
back_cmd
)
# We do not need to know if it succeeded or not; afterall, we are
# trying to clean up resources created by the jailer itself not
# the testing system.
_ = run(cmd, shell=True, stderr=PIPE)
def _kill_crgoup_tasks(self, controller):
"""Simulate wait on pid.
Read the tasks file and stay there until /proc/{pid}
disappears. The retry function that calls this code makes
sure we do not timeout.
"""
tasks_file = '/sys/fs/cgroup/{}/{}/{}/tasks'.format(
controller,
FC_BINARY_NAME,
self.jailer_id
)
# If tests do not call start on machines, the cgroups will not be
# created.
if not os.path.exists(tasks_file):
return True
cmd = 'cat {}'.format(tasks_file)
tasks = run(cmd, shell=True, stdout=PIPE).stdout.decode('utf-8')
tasks_split = tasks.splitlines()
for task in tasks_split:
if os.path.exists("/proc/{}".format(task)):
raise TimeoutError
return True
|
_kill_crgoup_tasks
|
Simulate wait on pid.
Read the tasks file and stay there until /proc/{pid}
disappears. The retry function that calls this code makes
sure we do not timeout.
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Define a class for creating the jailed context."""
import os
import shutil
from subprocess import run, PIPE
from retry.api import retry_call
from framework.defs import API_USOCKET_NAME, FC_BINARY_NAME, \
JAILER_DEFAULT_CHROOT
class JailerContext:
"""Represents jailer configuration and contains jailer helper functions.
Each microvm will have a jailer configuration associated with it.
"""
# Keep in sync with parameters from code base.
jailer_id = None
exec_file = None
numa_node = None
uid = None
gid = None
chroot_base = None
netns = None
daemonize = None
seccomp_level = None
def __init__(
self,
jailer_id,
exec_file,
numa_node=0,
uid=1234,
gid=1234,
chroot_base=JAILER_DEFAULT_CHROOT,
netns=None,
daemonize=True,
seccomp_level=2
):
"""Set up jailer fields.
This plays the role of a default constructor as it populates
the jailer's fields with some default values. Each field can be
further adjusted by each test even with None values.
"""
self.jailer_id = jailer_id
self.exec_file = exec_file
self.numa_node = numa_node
self.uid = uid
self.gid = gid
self.chroot_base = chroot_base
self.netns = netns if netns is not None else jailer_id
self.daemonize = daemonize
self.seccomp_level = seccomp_level
def __del__(self):
"""Cleanup this jailer context."""
self.cleanup()
def construct_param_list(self):
"""Create the list of parameters we want the jailer to start with.
We want to be able to vary any parameter even the required ones as we
might want to add integration tests that validate the enforcement of
mandatory arguments.
"""
jailer_param_list = []
# Pretty please, try to keep the same order as in the code base.
if self.jailer_id is not None:
jailer_param_list.extend(['--id', str(self.jailer_id)])
if self.exec_file is not None:
jailer_param_list.extend(['--exec-file', str(self.exec_file)])
if self.numa_node is not None:
jailer_param_list.extend(['--node', str(self.numa_node)])
if self.uid is not None:
jailer_param_list.extend(['--uid', str(self.uid)])
if self.gid is not None:
jailer_param_list.extend(['--gid', str(self.gid)])
if self.chroot_base is not None:
jailer_param_list.extend(
['--chroot-base-dir', str(self.chroot_base)]
)
if self.netns is not None:
jailer_param_list.extend(['--netns', str(self.netns_file_path())])
if self.daemonize:
jailer_param_list.append('--daemonize')
if self.seccomp_level is not None:
jailer_param_list.extend(
['--seccomp-level', str(self.seccomp_level)]
)
return jailer_param_list
def chroot_base_with_id(self):
"""Return the MicroVM chroot base + MicroVM ID."""
return os.path.join(
self.chroot_base if self.chroot_base is not None
else JAILER_DEFAULT_CHROOT,
FC_BINARY_NAME,
self.jailer_id
)
def api_socket_path(self):
"""Return the MicroVM API socket path."""
return os.path.join(self.chroot_path(), API_USOCKET_NAME)
def chroot_path(self):
"""Return the MicroVM chroot path."""
return os.path.join(self.chroot_base_with_id(), 'root')
def jailed_path(self, file_path, create=False):
"""Create a hard link owned by uid:gid.
Create a hard link to the specified file, changes the owner to
uid:gid, and returns a path to the link which is valid within the jail.
"""
file_name = os.path.basename(file_path)
global_p = os.path.join(self.chroot_path(), file_name)
jailed_p = os.path.join("/", file_name)
if create:
cmd = 'ln -f {} {}'.format(file_path, global_p)
run(cmd, shell=True, check=True)
cmd = 'chown {}:{} {}'.format(self.uid, self.gid, global_p)
run(cmd, shell=True, check=True)
return jailed_p
def netns_file_path(self):
"""Get the host netns file path for a jailer context.
Returns the path on the host to the file which represents the netns,
and which must be passed to the jailer as the value of the --netns
parameter, when in use.
"""
if self.netns:
return '/var/run/netns/{}'.format(self.netns)
return None
def netns_cmd_prefix(self):
"""Return the jailer context netns file prefix."""
if self.netns:
return 'ip netns exec {} '.format(self.netns)
return ''
def setup(self):
"""Set up this jailer context."""
os.makedirs(
self.chroot_base if self.chroot_base is not None
else JAILER_DEFAULT_CHROOT,
exist_ok=True
)
if self.netns:
run('ip netns add {}'.format(self.netns), shell=True, check=True)
def cleanup(self):
"""Clean up this jailer context."""
shutil.rmtree(self.chroot_base_with_id(), ignore_errors=True)
if self.netns:
_ = run(
'ip netns del {}'.format(self.netns),
shell=True,
stderr=PIPE
)
# Remove the cgroup folders associated with this microvm.
# The base /sys/fs/cgroup/<controller>/firecracker folder will remain,
# because we can't remove it unless we're sure there's no other running
# microVM.
# Firecracker is interested in these 3 cgroups for the moment.
controllers = ('cpu', 'cpuset', 'pids')
for controller in controllers:
# Obtain the tasks from each cgroup and wait on them before
# removing the microvm's associated cgroup folder.
try:
retry_call(
f=self._kill_crgoup_tasks,
fargs=[controller],
exceptions=TimeoutError,
max_delay=5
)
except TimeoutError:
pass
# As the files inside a cgroup aren't real, they can't need
# to be removed, that is why 'rm -rf' and 'rmdir' fail.
# We only need to remove the cgroup directories. The "-depth"
# argument tells find to do a depth first recursion, so that
# we remove any sub cgroups first if they are there.
back_cmd = r'-depth -type d -exec rmdir {} \;'
cmd = 'find /sys/fs/cgroup/{}/{}/{} {}'.format(
controller,
FC_BINARY_NAME,
self.jailer_id,
back_cmd
)
# We do not need to know if it succeeded or not; afterall, we are
# trying to clean up resources created by the jailer itself not
# the testing system.
_ = run(cmd, shell=True, stderr=PIPE)
# MASKED: _kill_crgoup_tasks function (lines 208-233)
|
def _kill_crgoup_tasks(self, controller):
"""Simulate wait on pid.
Read the tasks file and stay there until /proc/{pid}
disappears. The retry function that calls this code makes
sure we do not timeout.
"""
tasks_file = '/sys/fs/cgroup/{}/{}/{}/tasks'.format(
controller,
FC_BINARY_NAME,
self.jailer_id
)
# If tests do not call start on machines, the cgroups will not be
# created.
if not os.path.exists(tasks_file):
return True
cmd = 'cat {}'.format(tasks_file)
tasks = run(cmd, shell=True, stdout=PIPE).stdout.decode('utf-8')
tasks_split = tasks.splitlines()
for task in tasks_split:
if os.path.exists("/proc/{}".format(task)):
raise TimeoutError
return True
| 208 | 233 |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Define a class for creating the jailed context."""
import os
import shutil
from subprocess import run, PIPE
from retry.api import retry_call
from framework.defs import API_USOCKET_NAME, FC_BINARY_NAME, \
JAILER_DEFAULT_CHROOT
class JailerContext:
"""Represents jailer configuration and contains jailer helper functions.
Each microvm will have a jailer configuration associated with it.
"""
# Keep in sync with parameters from code base.
jailer_id = None
exec_file = None
numa_node = None
uid = None
gid = None
chroot_base = None
netns = None
daemonize = None
seccomp_level = None
def __init__(
self,
jailer_id,
exec_file,
numa_node=0,
uid=1234,
gid=1234,
chroot_base=JAILER_DEFAULT_CHROOT,
netns=None,
daemonize=True,
seccomp_level=2
):
"""Set up jailer fields.
This plays the role of a default constructor as it populates
the jailer's fields with some default values. Each field can be
further adjusted by each test even with None values.
"""
self.jailer_id = jailer_id
self.exec_file = exec_file
self.numa_node = numa_node
self.uid = uid
self.gid = gid
self.chroot_base = chroot_base
self.netns = netns if netns is not None else jailer_id
self.daemonize = daemonize
self.seccomp_level = seccomp_level
def __del__(self):
"""Cleanup this jailer context."""
self.cleanup()
def construct_param_list(self):
"""Create the list of parameters we want the jailer to start with.
We want to be able to vary any parameter even the required ones as we
might want to add integration tests that validate the enforcement of
mandatory arguments.
"""
jailer_param_list = []
# Pretty please, try to keep the same order as in the code base.
if self.jailer_id is not None:
jailer_param_list.extend(['--id', str(self.jailer_id)])
if self.exec_file is not None:
jailer_param_list.extend(['--exec-file', str(self.exec_file)])
if self.numa_node is not None:
jailer_param_list.extend(['--node', str(self.numa_node)])
if self.uid is not None:
jailer_param_list.extend(['--uid', str(self.uid)])
if self.gid is not None:
jailer_param_list.extend(['--gid', str(self.gid)])
if self.chroot_base is not None:
jailer_param_list.extend(
['--chroot-base-dir', str(self.chroot_base)]
)
if self.netns is not None:
jailer_param_list.extend(['--netns', str(self.netns_file_path())])
if self.daemonize:
jailer_param_list.append('--daemonize')
if self.seccomp_level is not None:
jailer_param_list.extend(
['--seccomp-level', str(self.seccomp_level)]
)
return jailer_param_list
def chroot_base_with_id(self):
"""Return the MicroVM chroot base + MicroVM ID."""
return os.path.join(
self.chroot_base if self.chroot_base is not None
else JAILER_DEFAULT_CHROOT,
FC_BINARY_NAME,
self.jailer_id
)
def api_socket_path(self):
"""Return the MicroVM API socket path."""
return os.path.join(self.chroot_path(), API_USOCKET_NAME)
def chroot_path(self):
"""Return the MicroVM chroot path."""
return os.path.join(self.chroot_base_with_id(), 'root')
def jailed_path(self, file_path, create=False):
"""Create a hard link owned by uid:gid.
Create a hard link to the specified file, changes the owner to
uid:gid, and returns a path to the link which is valid within the jail.
"""
file_name = os.path.basename(file_path)
global_p = os.path.join(self.chroot_path(), file_name)
jailed_p = os.path.join("/", file_name)
if create:
cmd = 'ln -f {} {}'.format(file_path, global_p)
run(cmd, shell=True, check=True)
cmd = 'chown {}:{} {}'.format(self.uid, self.gid, global_p)
run(cmd, shell=True, check=True)
return jailed_p
def netns_file_path(self):
"""Get the host netns file path for a jailer context.
Returns the path on the host to the file which represents the netns,
and which must be passed to the jailer as the value of the --netns
parameter, when in use.
"""
if self.netns:
return '/var/run/netns/{}'.format(self.netns)
return None
def netns_cmd_prefix(self):
"""Return the jailer context netns file prefix."""
if self.netns:
return 'ip netns exec {} '.format(self.netns)
return ''
def setup(self):
"""Set up this jailer context."""
os.makedirs(
self.chroot_base if self.chroot_base is not None
else JAILER_DEFAULT_CHROOT,
exist_ok=True
)
if self.netns:
run('ip netns add {}'.format(self.netns), shell=True, check=True)
def cleanup(self):
"""Clean up this jailer context."""
shutil.rmtree(self.chroot_base_with_id(), ignore_errors=True)
if self.netns:
_ = run(
'ip netns del {}'.format(self.netns),
shell=True,
stderr=PIPE
)
# Remove the cgroup folders associated with this microvm.
# The base /sys/fs/cgroup/<controller>/firecracker folder will remain,
# because we can't remove it unless we're sure there's no other running
# microVM.
# Firecracker is interested in these 3 cgroups for the moment.
controllers = ('cpu', 'cpuset', 'pids')
for controller in controllers:
# Obtain the tasks from each cgroup and wait on them before
# removing the microvm's associated cgroup folder.
try:
retry_call(
f=self._kill_crgoup_tasks,
fargs=[controller],
exceptions=TimeoutError,
max_delay=5
)
except TimeoutError:
pass
# As the files inside a cgroup aren't real, they can't need
# to be removed, that is why 'rm -rf' and 'rmdir' fail.
# We only need to remove the cgroup directories. The "-depth"
# argument tells find to do a depth first recursion, so that
# we remove any sub cgroups first if they are there.
back_cmd = r'-depth -type d -exec rmdir {} \;'
cmd = 'find /sys/fs/cgroup/{}/{}/{} {}'.format(
controller,
FC_BINARY_NAME,
self.jailer_id,
back_cmd
)
# We do not need to know if it succeeded or not; afterall, we are
# trying to clean up resources created by the jailer itself not
# the testing system.
_ = run(cmd, shell=True, stderr=PIPE)
def _kill_crgoup_tasks(self, controller):
"""Simulate wait on pid.
Read the tasks file and stay there until /proc/{pid}
disappears. The retry function that calls this code makes
sure we do not timeout.
"""
tasks_file = '/sys/fs/cgroup/{}/{}/{}/tasks'.format(
controller,
FC_BINARY_NAME,
self.jailer_id
)
# If tests do not call start on machines, the cgroups will not be
# created.
if not os.path.exists(tasks_file):
return True
cmd = 'cat {}'.format(tasks_file)
tasks = run(cmd, shell=True, stdout=PIPE).stdout.decode('utf-8')
tasks_split = tasks.splitlines()
for task in tasks_split:
if os.path.exists("/proc/{}".format(task)):
raise TimeoutError
return True
|
__init__
|
Constructor for :class:`.RepTaxonomy`
Parameters
----------
taxonomy
Data containing feature taxonomy
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to :func:`~pandas.read_csv` or :mod:`biome` loader.
|
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from pmaf.biome.essentials._metakit import EssentialFeatureMetabase
from pmaf.biome.essentials._base import EssentialBackboneBase
from pmaf.internal._constants import (
AVAIL_TAXONOMY_NOTATIONS,
jRegexGG,
jRegexQIIME,
BIOM_TAXONOMY_NAMES,
VALID_RANKS,
)
from pmaf.internal._shared import (
generate_lineages_from_taxa,
get_rank_upto,
indentify_taxon_notation,
validate_ranks,
extract_valid_ranks,
cols2ranks,
)
from collections import defaultdict
from os import path
import pandas as pd
import numpy as np
import biom
from typing import Union, Sequence, Tuple, Any, Optional
from pmaf.internal._typing import AnyGenericIdentifier, Mapper
class RepTaxonomy(EssentialBackboneBase, EssentialFeatureMetabase):
"""An `essential` class for handling taxonomy data."""
# MASKED: __init__ function (lines 33-107)
@classmethod
def from_csv(
cls,
filepath: str,
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from CSV file.
Parameters
----------
filepath
Path to .csv File
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to the constructor.
filepath:
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(filepath, **kwargs)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(filepath, **kwargs).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(filepath, **kwargs).loc[:, taxonomy_columns]
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"filepath": path.abspath(filepath)})
return cls(taxonomy=tmp_taxonomy, metadata=tmp_metadata, **kwargs)
@classmethod
def from_biom(cls, filepath: str, **kwargs: Any) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from :mod:`biom`
file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Passed to the constructor.
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
taxonomy_frame, new_metadata = cls.__load_biom(filepath, **kwargs)
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"biom": new_metadata})
return cls(taxonomy=taxonomy_frame, metadata=tmp_metadata, **kwargs)
@classmethod
def __load_biom(cls, filepath: str, **kwargs: Any) -> Tuple[pd.DataFrame, dict]:
"""Actual private method to process :mod:`biom` file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Compatibility
"""
biom_file = biom.load_table(filepath)
if biom_file.metadata(axis="observation") is not None:
obs_data = biom_file.metadata_to_dataframe("observation")
col_names = list(obs_data.columns.values)
col_names_low = [col.lower() for col in col_names]
avail_col_names = [
colname
for tax_name in BIOM_TAXONOMY_NAMES
for colname in col_names_low
if colname[::-1].find(tax_name[::-1]) < 3
and colname[::-1].find(tax_name[::-1]) > -1
]
metadata_cols = [
col for col in col_names if col.lower() not in avail_col_names
]
if len(avail_col_names) == 1:
tmp_col_index = col_names_low.index(avail_col_names[0])
taxonomy_frame = obs_data[col_names[tmp_col_index]]
else:
taxonomy_frame = obs_data
tmp_metadata = obs_data.loc[:, metadata_cols].to_dict()
return taxonomy_frame, tmp_metadata
else:
raise ValueError("Biom file does not contain observation metadata.")
def _remove_features_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by features ids and ratify action.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
tmp_ids = np.asarray(ids, dtype=self.__internal_taxonomy.index.dtype)
if len(tmp_ids) > 0:
self.__internal_taxonomy.drop(tmp_ids, inplace=True)
return self._ratify_action("_remove_features_by_id", ids, **kwargs)
def _merge_features_by_map(
self, map_dict: Mapper, done: bool = False, **kwargs: Any
) -> Optional[Mapper]:
"""Merge features and ratify action.
Parameters
----------
map_dict
Map to use for merging
done
Whether merging was completed or not. Compatibility.
kwargs
Compatibility
"""
if not done:
raise NotImplementedError
if map_dict:
return self._ratify_action(
"_merge_features_by_map",
map_dict,
_annotations=self.__internal_taxonomy.loc[:, "lineage"].to_dict(),
**kwargs
)
def drop_feature_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by feature `ids`.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() == len(target_ids):
return self._remove_features_by_id(target_ids, **kwargs)
else:
raise ValueError("Invalid feature ids are provided.")
def get_taxonomy_by_id(
self, ids: Optional[AnyGenericIdentifier] = None
) -> pd.DataFrame:
"""Get taxonomy :class:`~pandas.DataFrame` by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
Returns
-------
class:`pandas.DataFrame` with taxonomy data
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() <= len(target_ids):
return self.__internal_taxonomy.loc[target_ids, self.__avail_ranks]
else:
raise ValueError("Invalid feature ids are provided.")
def get_lineage_by_id(
self,
ids: Optional[AnyGenericIdentifier] = None,
missing_rank: bool = False,
desired_ranks: Union[bool, Sequence[str]] = False,
drop_ranks: Union[bool, Sequence[str]] = False,
**kwargs: Any
) -> pd.Series:
"""Get taxonomy lineages by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
missing_rank
If True will generate prefix like `s__` or `d__`
desired_ranks
List of desired ranks to generate.
If False then will generate all main ranks
drop_ranks
List of ranks to drop from desired ranks.
This parameter only useful if `missing_rank` is True
kwargs
Compatibility.
Returns
-------
class:`pandas.Series` with consensus lineages and corresponding IDs
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
tmp_desired_ranks = VALID_RANKS if desired_ranks is False else desired_ranks
total_valid_rids = self.xrid.isin(target_ids).sum()
if total_valid_rids == len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[target_ids],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
elif total_valid_rids < len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[np.unique(target_ids)],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
else:
raise ValueError("Invalid feature ids are provided.")
def find_features_by_pattern(
self, pattern_str: str, case_sensitive: bool = False, regex: bool = False
) -> np.ndarray:
"""Searches for features with taxa that matches `pattern_str`
Parameters
----------
pattern_str
Pattern to search for
case_sensitive
Case sensitive mode
regex
Use regular expressions
Returns
-------
class:`~numpy.ndarray` with indices
"""
return self.__internal_taxonomy[
self.__internal_taxonomy.loc[:, "lineage"].str.contains(
pattern_str, case=case_sensitive, regex=regex
)
].index.values
def drop_features_without_taxa(
self, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features that do not contain taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ids_to_drop = self.find_features_without_taxa()
return self._remove_features_by_id(ids_to_drop, **kwargs)
def drop_features_without_ranks(
self, ranks: Sequence[str], any: bool = False, **kwargs: Any
) -> Optional[AnyGenericIdentifier]: # Done
"""Remove features that do not contain `ranks`
Parameters
----------
ranks
Ranks to look for
any
If True removes feature with single occurrence of missing rank.
If False all `ranks` must be missing.
kwargs
Compatibility
"""
target_ranks = np.asarray(ranks)
if self.__internal_taxonomy.columns.isin(target_ranks).sum() == len(
target_ranks
):
no_rank_mask = self.__internal_taxonomy.loc[:, ranks].isna()
no_rank_mask_adjusted = (
no_rank_mask.any(axis=1) if any else no_rank_mask.all(axis=1)
)
ids_to_drop = self.__internal_taxonomy.loc[no_rank_mask_adjusted].index
return self._remove_features_by_id(ids_to_drop, **kwargs)
else:
raise ValueError("Invalid ranks are provided.")
def merge_duplicated_features(self, **kwargs: Any) -> Optional[Mapper]:
"""Merge features with duplicated taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ret = {}
groupby = self.__internal_taxonomy.groupby("lineage")
if any([len(group) > 1 for group in groupby.groups.values()]):
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groupby.groups)))
for lineage, feature_ids in groupby.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
return self._merge_features_by_map(ret, True, **kwargs)
def merge_features_by_rank(self, level: str, **kwargs: Any) -> Optional[Mapper]:
"""Merge features by taxonomic rank/level.
Parameters
----------
level
Taxonomic rank/level to use for merging.
kwargs
Compatibility
"""
ret = {}
if not isinstance(level, str):
raise TypeError("`rank` must have str type.")
if level in self.__avail_ranks:
target_ranks = get_rank_upto(self.avail_ranks, level, True)
if target_ranks:
tmp_lineages = generate_lineages_from_taxa(
self.__internal_taxonomy, False, target_ranks, False
)
groups = tmp_lineages.groupby(tmp_lineages)
if len(groups.groups) > 1:
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groups.groups)))
for lineage, feature_ids in groups.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
else:
raise ValueError("Invalid rank are provided.")
return self._merge_features_by_map(ret, True, **kwargs)
def find_features_without_taxa(self) -> np.ndarray:
"""Find features without taxa.
Returns
-------
class:`~numpy.ndarray` with feature indices.
"""
return self.__internal_taxonomy.loc[
self.__internal_taxonomy.loc[:, VALID_RANKS].agg(
lambda rank: len("".join(map(lambda x: (str(x or "")), rank))), axis=1
)
< 1
].index.values
def get_subset(
self, rids: Optional[AnyGenericIdentifier] = None, *args, **kwargs: Any
) -> "RepTaxonomy":
"""Get subset of the :class:`.RepTaxonomy`.
Parameters
----------
rids
Feature identifiers.
args
Compatibility
kwargs
Compatibility
Returns
-------
class:`.RepTaxonomy`
"""
if rids is None:
target_rids = self.xrid
else:
target_rids = np.asarray(rids).astype(self.__internal_taxonomy.index.dtype)
if not self.xrid.isin(target_rids).sum() == len(target_rids):
raise ValueError("Invalid feature ids are provided.")
return type(self)(
taxonomy=self.__internal_taxonomy.loc[target_rids, "lineage"],
metadata=self.metadata,
name=self.name,
)
def _export(
self, taxlike: str = "lineage", ascending: bool = True, **kwargs: Any
) -> Tuple[pd.Series, dict]:
"""Creates taxonomy for export.
Parameters
----------
taxlike
Generate taxonomy in format(currently only `lineage` is supported.)
ascending
Sorting
kwargs
Compatibility
"""
if taxlike == "lineage":
return (
self.get_lineage_by_id(**kwargs).sort_values(ascending=ascending),
kwargs,
)
else:
raise NotImplemented
def export(
self,
output_fp: str,
*args,
_add_ext: bool = False,
sep: str = ",",
**kwargs: Any
) -> None:
"""Exports the taxonomy into the specified file.
Parameters
----------
output_fp
Export filepath
args
Compatibility
_add_ext
Add file extension or not.
sep
Delimiter
kwargs
Compatibility
"""
tmp_export, rkwarg = self._export(*args, **kwargs)
if _add_ext:
tmp_export.to_csv("{}.csv".format(output_fp), sep=sep)
else:
tmp_export.to_csv(output_fp, sep=sep)
def copy(self) -> "RepTaxonomy":
"""Copy of the instance."""
return type(self)(
taxonomy=self.__internal_taxonomy.loc[:, "lineage"],
metadata=self.metadata,
name=self.name,
)
def __fix_taxon_names(self) -> None:
"""Fix invalid taxon names."""
def taxon_fixer(taxon):
if taxon is not None and pd.notna(taxon):
tmp_taxon_trimmed = taxon.lower().strip()
if len(tmp_taxon_trimmed) > 0:
if tmp_taxon_trimmed[0] == "[":
tmp_taxon_trimmed = tmp_taxon_trimmed[1:]
if tmp_taxon_trimmed[-1] == "]":
tmp_taxon_trimmed = tmp_taxon_trimmed[:-1]
return tmp_taxon_trimmed.capitalize()
else:
return None
else:
return None
self.__internal_taxonomy.loc[:, VALID_RANKS] = self.__internal_taxonomy.loc[
:, VALID_RANKS
].applymap(taxon_fixer)
def __reconstruct_internal_lineages(self) -> None:
"""Reconstruct the internal lineages."""
self.__internal_taxonomy.loc[:, "lineage"] = generate_lineages_from_taxa(
self.__internal_taxonomy, True, self.__avail_ranks, False
)
def __init_internal_taxonomy(
self,
taxonomy_data: Union[pd.Series, pd.DataFrame],
taxonomy_notation: Optional[str] = "greengenes",
order_ranks: Optional[Sequence[str]] = None,
**kwargs: Any
) -> None:
"""Main method to initialize taxonomy.
Parameters
----------
taxonomy_data
Incoming parsed taxonomy data
taxonomy_notation
Taxonomy lineage notation style. Can be one of
:const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None.
The 'silva' notation require `order_ranks`.
kwargs
Compatibility
"""
if isinstance(taxonomy_data, pd.Series):
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data, taxonomy_notation, order_ranks
)
elif isinstance(taxonomy_data, pd.DataFrame):
if taxonomy_data.shape[1] == 1:
taxonomy_data_series = pd.Series(
data=taxonomy_data.iloc[:, 0], index=taxonomy_data.index
)
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data_series, taxonomy_notation, order_ranks
)
else:
new_taxonomy = self.__init_taxonomy_from_frame(
taxonomy_data, taxonomy_notation, order_ranks
)
else:
raise RuntimeError(
"`taxonomy_data` must be either pd.Series or pd.Dataframe"
)
if new_taxonomy is None:
raise ValueError("Provided taxonomy is invalid.")
# Assign newly constructed taxonomy to the self.__internal_taxonomy
self.__internal_taxonomy = new_taxonomy
self.__fix_taxon_names() # Fix incorrect taxa
tmp_avail_ranks = [rank for rank in VALID_RANKS if rank in new_taxonomy.columns]
self.__avail_ranks = [
rank for rank in tmp_avail_ranks if new_taxonomy.loc[:, rank].notna().any()
]
# Reconstruct internal lineages for default greengenes notation
self.__reconstruct_internal_lineages()
self._init_state = True
def __init_taxonomy_from_lineages(
self,
taxonomy_series: pd.Series,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done
"""Main method that produces taxonomy dataframe from lineages.
Parameters
----------
taxonomy_series
:class:`pandas.Series` with taxonomy lineages
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
"""
# Check if taxonomy is known and is available for parsing. Otherwise indentify_taxon_notation() will try to identify notation
if taxonomy_notation in AVAIL_TAXONOMY_NOTATIONS:
notation = taxonomy_notation
else:
# Get first lineage _sample for notation testing assuming the rest have the the same notations
sample_taxon = taxonomy_series.iloc[0]
# Identify notation of the lineage string
notation = indentify_taxon_notation(sample_taxon)
if order_ranks is not None:
if all([rank in VALID_RANKS for rank in order_ranks]):
target_order_ranks = order_ranks
else:
raise NotImplementedError
else:
target_order_ranks = VALID_RANKS
if notation == "greengenes":
lineages = taxonomy_series.reset_index().values.tolist()
ordered_taxa_list = []
ordered_indices_list = [elem[0] for elem in lineages]
for lineage in lineages:
tmp_lineage = jRegexGG.findall(lineage[1])
tmp_taxa_dict = {
elem[0]: elem[1] for elem in tmp_lineage if elem[0] in VALID_RANKS
}
for rank in VALID_RANKS:
if rank not in tmp_taxa_dict.keys():
tmp_taxa_dict.update({rank: None})
tmp_taxa_ordered = [tmp_taxa_dict[rank] for rank in VALID_RANKS]
ordered_taxa_list.append([None] + tmp_taxa_ordered)
taxonomy = pd.DataFrame(
index=ordered_indices_list,
data=ordered_taxa_list,
columns=["lineage"] + VALID_RANKS,
)
return taxonomy
elif notation == "qiime":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = jRegexQIIME.findall(lineage[1])
tmp_lineage.sort(key=lambda x: x[0])
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank, taxon in tmp_lineage:
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, sorted(list(tmp_ranks))]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
elif notation == "silva":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = lineage[1].split(";")
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank_i, taxon in enumerate(tmp_lineage):
rank = target_order_ranks[rank_i]
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_rank_ordered = [
rank for rank in target_order_ranks if rank in VALID_RANKS
]
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, tmp_rank_ordered]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
else:
raise NotImplementedError
def __init_taxonomy_from_frame(
self,
taxonomy_dataframe: pd.DataFrame,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done # For now only pass to _init_taxonomy_from_series
"""Main method that produces taxonomy sheet from dataframe.
Parameters
----------
taxonomy_dataframe
:class:`~pandas.DataFrame` with taxa split by ranks.
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
Returns
-------
:class:`~pandas.DataFrame`
"""
valid_ranks = extract_valid_ranks(taxonomy_dataframe.columns, VALID_RANKS)
if valid_ranks is not None:
if len(valid_ranks) > 0:
return pd.concat(
[
taxonomy_dataframe,
pd.DataFrame(
data="",
index=taxonomy_dataframe.index,
columns=[
rank for rank in VALID_RANKS if rank not in valid_ranks
],
),
],
axis=1,
)
else:
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join(taxa.values.tolist()), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
else:
valid_ranks = cols2ranks(taxonomy_dataframe.columns)
taxonomy_dataframe.columns = valid_ranks
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join([(t if isinstance(t,str) else '') for t in taxa.values]), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
@property
def avail_ranks(self) -> Sequence[str]:
"""List of available taxonomic ranks."""
return self.__avail_ranks
@property
def duplicated(self) -> pd.Index:
"""List of duplicated feature indices."""
return self.__internal_taxonomy.index[
self.__internal_taxonomy["lineage"].duplicated(keep=False)
]
@property
def data(self) -> pd.DataFrame:
"""Actual data representation as pd.DataFrame."""
return self.__internal_taxonomy
@property
def xrid(self) -> pd.Index:
"""Feature indices as pd.Index."""
return self.__internal_taxonomy.index
|
def __init__(
self,
taxonomy: Union[pd.DataFrame, pd.Series, str],
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> None:
"""Constructor for :class:`.RepTaxonomy`
Parameters
----------
taxonomy
Data containing feature taxonomy
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to :func:`~pandas.read_csv` or :mod:`biome` loader.
"""
tmp_metadata = kwargs.pop("metadata", {})
self.__avail_ranks = []
self.__internal_taxonomy = None
if isinstance(taxonomy, pd.DataFrame):
if taxonomy.shape[0] > 0:
if taxonomy.shape[1] > 1:
if validate_ranks(list(taxonomy.columns.values), VALID_RANKS):
tmp_taxonomy = taxonomy
else:
raise ValueError(
"Provided `taxonomy` Datafame has invalid ranks."
)
else:
tmp_taxonomy = taxonomy.iloc[:, 0]
else:
raise ValueError("Provided `taxonomy` Datafame is invalid.")
elif isinstance(taxonomy, pd.Series):
if taxonomy.shape[0] > 0:
tmp_taxonomy = taxonomy
else:
raise ValueError("Provided `taxonomy` Series is invalid.")
elif isinstance(taxonomy, str):
if path.isfile(taxonomy):
file_extension = path.splitext(taxonomy)[-1].lower()
if file_extension in [".csv", ".tsv"]:
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).loc[:, taxonomy_columns]
elif file_extension in [".biom", ".biome"]:
tmp_taxonomy, new_metadata = self.__load_biom(taxonomy, **kwargs)
tmp_metadata.update({"biom": new_metadata})
else:
raise NotImplementedError("File type is not supported.")
else:
raise FileNotFoundError("Provided `taxonomy` file path is invalid.")
else:
raise TypeError("Provided `taxonomy` has invalid type.")
self.__init_internal_taxonomy(tmp_taxonomy, **kwargs)
super().__init__(metadata=tmp_metadata, **kwargs)
| 33 | 107 |
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from pmaf.biome.essentials._metakit import EssentialFeatureMetabase
from pmaf.biome.essentials._base import EssentialBackboneBase
from pmaf.internal._constants import (
AVAIL_TAXONOMY_NOTATIONS,
jRegexGG,
jRegexQIIME,
BIOM_TAXONOMY_NAMES,
VALID_RANKS,
)
from pmaf.internal._shared import (
generate_lineages_from_taxa,
get_rank_upto,
indentify_taxon_notation,
validate_ranks,
extract_valid_ranks,
cols2ranks,
)
from collections import defaultdict
from os import path
import pandas as pd
import numpy as np
import biom
from typing import Union, Sequence, Tuple, Any, Optional
from pmaf.internal._typing import AnyGenericIdentifier, Mapper
class RepTaxonomy(EssentialBackboneBase, EssentialFeatureMetabase):
"""An `essential` class for handling taxonomy data."""
def __init__(
self,
taxonomy: Union[pd.DataFrame, pd.Series, str],
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> None:
"""Constructor for :class:`.RepTaxonomy`
Parameters
----------
taxonomy
Data containing feature taxonomy
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to :func:`~pandas.read_csv` or :mod:`biome` loader.
"""
tmp_metadata = kwargs.pop("metadata", {})
self.__avail_ranks = []
self.__internal_taxonomy = None
if isinstance(taxonomy, pd.DataFrame):
if taxonomy.shape[0] > 0:
if taxonomy.shape[1] > 1:
if validate_ranks(list(taxonomy.columns.values), VALID_RANKS):
tmp_taxonomy = taxonomy
else:
raise ValueError(
"Provided `taxonomy` Datafame has invalid ranks."
)
else:
tmp_taxonomy = taxonomy.iloc[:, 0]
else:
raise ValueError("Provided `taxonomy` Datafame is invalid.")
elif isinstance(taxonomy, pd.Series):
if taxonomy.shape[0] > 0:
tmp_taxonomy = taxonomy
else:
raise ValueError("Provided `taxonomy` Series is invalid.")
elif isinstance(taxonomy, str):
if path.isfile(taxonomy):
file_extension = path.splitext(taxonomy)[-1].lower()
if file_extension in [".csv", ".tsv"]:
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).loc[:, taxonomy_columns]
elif file_extension in [".biom", ".biome"]:
tmp_taxonomy, new_metadata = self.__load_biom(taxonomy, **kwargs)
tmp_metadata.update({"biom": new_metadata})
else:
raise NotImplementedError("File type is not supported.")
else:
raise FileNotFoundError("Provided `taxonomy` file path is invalid.")
else:
raise TypeError("Provided `taxonomy` has invalid type.")
self.__init_internal_taxonomy(tmp_taxonomy, **kwargs)
super().__init__(metadata=tmp_metadata, **kwargs)
@classmethod
def from_csv(
cls,
filepath: str,
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from CSV file.
Parameters
----------
filepath
Path to .csv File
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to the constructor.
filepath:
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(filepath, **kwargs)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(filepath, **kwargs).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(filepath, **kwargs).loc[:, taxonomy_columns]
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"filepath": path.abspath(filepath)})
return cls(taxonomy=tmp_taxonomy, metadata=tmp_metadata, **kwargs)
@classmethod
def from_biom(cls, filepath: str, **kwargs: Any) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from :mod:`biom`
file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Passed to the constructor.
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
taxonomy_frame, new_metadata = cls.__load_biom(filepath, **kwargs)
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"biom": new_metadata})
return cls(taxonomy=taxonomy_frame, metadata=tmp_metadata, **kwargs)
@classmethod
def __load_biom(cls, filepath: str, **kwargs: Any) -> Tuple[pd.DataFrame, dict]:
"""Actual private method to process :mod:`biom` file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Compatibility
"""
biom_file = biom.load_table(filepath)
if biom_file.metadata(axis="observation") is not None:
obs_data = biom_file.metadata_to_dataframe("observation")
col_names = list(obs_data.columns.values)
col_names_low = [col.lower() for col in col_names]
avail_col_names = [
colname
for tax_name in BIOM_TAXONOMY_NAMES
for colname in col_names_low
if colname[::-1].find(tax_name[::-1]) < 3
and colname[::-1].find(tax_name[::-1]) > -1
]
metadata_cols = [
col for col in col_names if col.lower() not in avail_col_names
]
if len(avail_col_names) == 1:
tmp_col_index = col_names_low.index(avail_col_names[0])
taxonomy_frame = obs_data[col_names[tmp_col_index]]
else:
taxonomy_frame = obs_data
tmp_metadata = obs_data.loc[:, metadata_cols].to_dict()
return taxonomy_frame, tmp_metadata
else:
raise ValueError("Biom file does not contain observation metadata.")
def _remove_features_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by features ids and ratify action.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
tmp_ids = np.asarray(ids, dtype=self.__internal_taxonomy.index.dtype)
if len(tmp_ids) > 0:
self.__internal_taxonomy.drop(tmp_ids, inplace=True)
return self._ratify_action("_remove_features_by_id", ids, **kwargs)
def _merge_features_by_map(
self, map_dict: Mapper, done: bool = False, **kwargs: Any
) -> Optional[Mapper]:
"""Merge features and ratify action.
Parameters
----------
map_dict
Map to use for merging
done
Whether merging was completed or not. Compatibility.
kwargs
Compatibility
"""
if not done:
raise NotImplementedError
if map_dict:
return self._ratify_action(
"_merge_features_by_map",
map_dict,
_annotations=self.__internal_taxonomy.loc[:, "lineage"].to_dict(),
**kwargs
)
def drop_feature_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by feature `ids`.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() == len(target_ids):
return self._remove_features_by_id(target_ids, **kwargs)
else:
raise ValueError("Invalid feature ids are provided.")
def get_taxonomy_by_id(
self, ids: Optional[AnyGenericIdentifier] = None
) -> pd.DataFrame:
"""Get taxonomy :class:`~pandas.DataFrame` by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
Returns
-------
class:`pandas.DataFrame` with taxonomy data
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() <= len(target_ids):
return self.__internal_taxonomy.loc[target_ids, self.__avail_ranks]
else:
raise ValueError("Invalid feature ids are provided.")
def get_lineage_by_id(
self,
ids: Optional[AnyGenericIdentifier] = None,
missing_rank: bool = False,
desired_ranks: Union[bool, Sequence[str]] = False,
drop_ranks: Union[bool, Sequence[str]] = False,
**kwargs: Any
) -> pd.Series:
"""Get taxonomy lineages by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
missing_rank
If True will generate prefix like `s__` or `d__`
desired_ranks
List of desired ranks to generate.
If False then will generate all main ranks
drop_ranks
List of ranks to drop from desired ranks.
This parameter only useful if `missing_rank` is True
kwargs
Compatibility.
Returns
-------
class:`pandas.Series` with consensus lineages and corresponding IDs
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
tmp_desired_ranks = VALID_RANKS if desired_ranks is False else desired_ranks
total_valid_rids = self.xrid.isin(target_ids).sum()
if total_valid_rids == len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[target_ids],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
elif total_valid_rids < len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[np.unique(target_ids)],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
else:
raise ValueError("Invalid feature ids are provided.")
def find_features_by_pattern(
self, pattern_str: str, case_sensitive: bool = False, regex: bool = False
) -> np.ndarray:
"""Searches for features with taxa that matches `pattern_str`
Parameters
----------
pattern_str
Pattern to search for
case_sensitive
Case sensitive mode
regex
Use regular expressions
Returns
-------
class:`~numpy.ndarray` with indices
"""
return self.__internal_taxonomy[
self.__internal_taxonomy.loc[:, "lineage"].str.contains(
pattern_str, case=case_sensitive, regex=regex
)
].index.values
def drop_features_without_taxa(
self, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features that do not contain taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ids_to_drop = self.find_features_without_taxa()
return self._remove_features_by_id(ids_to_drop, **kwargs)
def drop_features_without_ranks(
self, ranks: Sequence[str], any: bool = False, **kwargs: Any
) -> Optional[AnyGenericIdentifier]: # Done
"""Remove features that do not contain `ranks`
Parameters
----------
ranks
Ranks to look for
any
If True removes feature with single occurrence of missing rank.
If False all `ranks` must be missing.
kwargs
Compatibility
"""
target_ranks = np.asarray(ranks)
if self.__internal_taxonomy.columns.isin(target_ranks).sum() == len(
target_ranks
):
no_rank_mask = self.__internal_taxonomy.loc[:, ranks].isna()
no_rank_mask_adjusted = (
no_rank_mask.any(axis=1) if any else no_rank_mask.all(axis=1)
)
ids_to_drop = self.__internal_taxonomy.loc[no_rank_mask_adjusted].index
return self._remove_features_by_id(ids_to_drop, **kwargs)
else:
raise ValueError("Invalid ranks are provided.")
def merge_duplicated_features(self, **kwargs: Any) -> Optional[Mapper]:
"""Merge features with duplicated taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ret = {}
groupby = self.__internal_taxonomy.groupby("lineage")
if any([len(group) > 1 for group in groupby.groups.values()]):
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groupby.groups)))
for lineage, feature_ids in groupby.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
return self._merge_features_by_map(ret, True, **kwargs)
def merge_features_by_rank(self, level: str, **kwargs: Any) -> Optional[Mapper]:
"""Merge features by taxonomic rank/level.
Parameters
----------
level
Taxonomic rank/level to use for merging.
kwargs
Compatibility
"""
ret = {}
if not isinstance(level, str):
raise TypeError("`rank` must have str type.")
if level in self.__avail_ranks:
target_ranks = get_rank_upto(self.avail_ranks, level, True)
if target_ranks:
tmp_lineages = generate_lineages_from_taxa(
self.__internal_taxonomy, False, target_ranks, False
)
groups = tmp_lineages.groupby(tmp_lineages)
if len(groups.groups) > 1:
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groups.groups)))
for lineage, feature_ids in groups.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
else:
raise ValueError("Invalid rank are provided.")
return self._merge_features_by_map(ret, True, **kwargs)
def find_features_without_taxa(self) -> np.ndarray:
"""Find features without taxa.
Returns
-------
class:`~numpy.ndarray` with feature indices.
"""
return self.__internal_taxonomy.loc[
self.__internal_taxonomy.loc[:, VALID_RANKS].agg(
lambda rank: len("".join(map(lambda x: (str(x or "")), rank))), axis=1
)
< 1
].index.values
def get_subset(
self, rids: Optional[AnyGenericIdentifier] = None, *args, **kwargs: Any
) -> "RepTaxonomy":
"""Get subset of the :class:`.RepTaxonomy`.
Parameters
----------
rids
Feature identifiers.
args
Compatibility
kwargs
Compatibility
Returns
-------
class:`.RepTaxonomy`
"""
if rids is None:
target_rids = self.xrid
else:
target_rids = np.asarray(rids).astype(self.__internal_taxonomy.index.dtype)
if not self.xrid.isin(target_rids).sum() == len(target_rids):
raise ValueError("Invalid feature ids are provided.")
return type(self)(
taxonomy=self.__internal_taxonomy.loc[target_rids, "lineage"],
metadata=self.metadata,
name=self.name,
)
def _export(
self, taxlike: str = "lineage", ascending: bool = True, **kwargs: Any
) -> Tuple[pd.Series, dict]:
"""Creates taxonomy for export.
Parameters
----------
taxlike
Generate taxonomy in format(currently only `lineage` is supported.)
ascending
Sorting
kwargs
Compatibility
"""
if taxlike == "lineage":
return (
self.get_lineage_by_id(**kwargs).sort_values(ascending=ascending),
kwargs,
)
else:
raise NotImplemented
def export(
self,
output_fp: str,
*args,
_add_ext: bool = False,
sep: str = ",",
**kwargs: Any
) -> None:
"""Exports the taxonomy into the specified file.
Parameters
----------
output_fp
Export filepath
args
Compatibility
_add_ext
Add file extension or not.
sep
Delimiter
kwargs
Compatibility
"""
tmp_export, rkwarg = self._export(*args, **kwargs)
if _add_ext:
tmp_export.to_csv("{}.csv".format(output_fp), sep=sep)
else:
tmp_export.to_csv(output_fp, sep=sep)
def copy(self) -> "RepTaxonomy":
"""Copy of the instance."""
return type(self)(
taxonomy=self.__internal_taxonomy.loc[:, "lineage"],
metadata=self.metadata,
name=self.name,
)
def __fix_taxon_names(self) -> None:
"""Fix invalid taxon names."""
def taxon_fixer(taxon):
if taxon is not None and pd.notna(taxon):
tmp_taxon_trimmed = taxon.lower().strip()
if len(tmp_taxon_trimmed) > 0:
if tmp_taxon_trimmed[0] == "[":
tmp_taxon_trimmed = tmp_taxon_trimmed[1:]
if tmp_taxon_trimmed[-1] == "]":
tmp_taxon_trimmed = tmp_taxon_trimmed[:-1]
return tmp_taxon_trimmed.capitalize()
else:
return None
else:
return None
self.__internal_taxonomy.loc[:, VALID_RANKS] = self.__internal_taxonomy.loc[
:, VALID_RANKS
].applymap(taxon_fixer)
def __reconstruct_internal_lineages(self) -> None:
"""Reconstruct the internal lineages."""
self.__internal_taxonomy.loc[:, "lineage"] = generate_lineages_from_taxa(
self.__internal_taxonomy, True, self.__avail_ranks, False
)
def __init_internal_taxonomy(
self,
taxonomy_data: Union[pd.Series, pd.DataFrame],
taxonomy_notation: Optional[str] = "greengenes",
order_ranks: Optional[Sequence[str]] = None,
**kwargs: Any
) -> None:
"""Main method to initialize taxonomy.
Parameters
----------
taxonomy_data
Incoming parsed taxonomy data
taxonomy_notation
Taxonomy lineage notation style. Can be one of
:const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None.
The 'silva' notation require `order_ranks`.
kwargs
Compatibility
"""
if isinstance(taxonomy_data, pd.Series):
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data, taxonomy_notation, order_ranks
)
elif isinstance(taxonomy_data, pd.DataFrame):
if taxonomy_data.shape[1] == 1:
taxonomy_data_series = pd.Series(
data=taxonomy_data.iloc[:, 0], index=taxonomy_data.index
)
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data_series, taxonomy_notation, order_ranks
)
else:
new_taxonomy = self.__init_taxonomy_from_frame(
taxonomy_data, taxonomy_notation, order_ranks
)
else:
raise RuntimeError(
"`taxonomy_data` must be either pd.Series or pd.Dataframe"
)
if new_taxonomy is None:
raise ValueError("Provided taxonomy is invalid.")
# Assign newly constructed taxonomy to the self.__internal_taxonomy
self.__internal_taxonomy = new_taxonomy
self.__fix_taxon_names() # Fix incorrect taxa
tmp_avail_ranks = [rank for rank in VALID_RANKS if rank in new_taxonomy.columns]
self.__avail_ranks = [
rank for rank in tmp_avail_ranks if new_taxonomy.loc[:, rank].notna().any()
]
# Reconstruct internal lineages for default greengenes notation
self.__reconstruct_internal_lineages()
self._init_state = True
def __init_taxonomy_from_lineages(
self,
taxonomy_series: pd.Series,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done
"""Main method that produces taxonomy dataframe from lineages.
Parameters
----------
taxonomy_series
:class:`pandas.Series` with taxonomy lineages
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
"""
# Check if taxonomy is known and is available for parsing. Otherwise indentify_taxon_notation() will try to identify notation
if taxonomy_notation in AVAIL_TAXONOMY_NOTATIONS:
notation = taxonomy_notation
else:
# Get first lineage _sample for notation testing assuming the rest have the the same notations
sample_taxon = taxonomy_series.iloc[0]
# Identify notation of the lineage string
notation = indentify_taxon_notation(sample_taxon)
if order_ranks is not None:
if all([rank in VALID_RANKS for rank in order_ranks]):
target_order_ranks = order_ranks
else:
raise NotImplementedError
else:
target_order_ranks = VALID_RANKS
if notation == "greengenes":
lineages = taxonomy_series.reset_index().values.tolist()
ordered_taxa_list = []
ordered_indices_list = [elem[0] for elem in lineages]
for lineage in lineages:
tmp_lineage = jRegexGG.findall(lineage[1])
tmp_taxa_dict = {
elem[0]: elem[1] for elem in tmp_lineage if elem[0] in VALID_RANKS
}
for rank in VALID_RANKS:
if rank not in tmp_taxa_dict.keys():
tmp_taxa_dict.update({rank: None})
tmp_taxa_ordered = [tmp_taxa_dict[rank] for rank in VALID_RANKS]
ordered_taxa_list.append([None] + tmp_taxa_ordered)
taxonomy = pd.DataFrame(
index=ordered_indices_list,
data=ordered_taxa_list,
columns=["lineage"] + VALID_RANKS,
)
return taxonomy
elif notation == "qiime":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = jRegexQIIME.findall(lineage[1])
tmp_lineage.sort(key=lambda x: x[0])
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank, taxon in tmp_lineage:
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, sorted(list(tmp_ranks))]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
elif notation == "silva":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = lineage[1].split(";")
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank_i, taxon in enumerate(tmp_lineage):
rank = target_order_ranks[rank_i]
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_rank_ordered = [
rank for rank in target_order_ranks if rank in VALID_RANKS
]
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, tmp_rank_ordered]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
else:
raise NotImplementedError
def __init_taxonomy_from_frame(
self,
taxonomy_dataframe: pd.DataFrame,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done # For now only pass to _init_taxonomy_from_series
"""Main method that produces taxonomy sheet from dataframe.
Parameters
----------
taxonomy_dataframe
:class:`~pandas.DataFrame` with taxa split by ranks.
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
Returns
-------
:class:`~pandas.DataFrame`
"""
valid_ranks = extract_valid_ranks(taxonomy_dataframe.columns, VALID_RANKS)
if valid_ranks is not None:
if len(valid_ranks) > 0:
return pd.concat(
[
taxonomy_dataframe,
pd.DataFrame(
data="",
index=taxonomy_dataframe.index,
columns=[
rank for rank in VALID_RANKS if rank not in valid_ranks
],
),
],
axis=1,
)
else:
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join(taxa.values.tolist()), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
else:
valid_ranks = cols2ranks(taxonomy_dataframe.columns)
taxonomy_dataframe.columns = valid_ranks
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join([(t if isinstance(t,str) else '') for t in taxa.values]), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
@property
def avail_ranks(self) -> Sequence[str]:
"""List of available taxonomic ranks."""
return self.__avail_ranks
@property
def duplicated(self) -> pd.Index:
"""List of duplicated feature indices."""
return self.__internal_taxonomy.index[
self.__internal_taxonomy["lineage"].duplicated(keep=False)
]
@property
def data(self) -> pd.DataFrame:
"""Actual data representation as pd.DataFrame."""
return self.__internal_taxonomy
@property
def xrid(self) -> pd.Index:
"""Feature indices as pd.Index."""
return self.__internal_taxonomy.index
|
__load_biom
|
Actual private method to process :mod:`biom` file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Compatibility
|
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from pmaf.biome.essentials._metakit import EssentialFeatureMetabase
from pmaf.biome.essentials._base import EssentialBackboneBase
from pmaf.internal._constants import (
AVAIL_TAXONOMY_NOTATIONS,
jRegexGG,
jRegexQIIME,
BIOM_TAXONOMY_NAMES,
VALID_RANKS,
)
from pmaf.internal._shared import (
generate_lineages_from_taxa,
get_rank_upto,
indentify_taxon_notation,
validate_ranks,
extract_valid_ranks,
cols2ranks,
)
from collections import defaultdict
from os import path
import pandas as pd
import numpy as np
import biom
from typing import Union, Sequence, Tuple, Any, Optional
from pmaf.internal._typing import AnyGenericIdentifier, Mapper
class RepTaxonomy(EssentialBackboneBase, EssentialFeatureMetabase):
"""An `essential` class for handling taxonomy data."""
def __init__(
self,
taxonomy: Union[pd.DataFrame, pd.Series, str],
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> None:
"""Constructor for :class:`.RepTaxonomy`
Parameters
----------
taxonomy
Data containing feature taxonomy
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to :func:`~pandas.read_csv` or :mod:`biome` loader.
"""
tmp_metadata = kwargs.pop("metadata", {})
self.__avail_ranks = []
self.__internal_taxonomy = None
if isinstance(taxonomy, pd.DataFrame):
if taxonomy.shape[0] > 0:
if taxonomy.shape[1] > 1:
if validate_ranks(list(taxonomy.columns.values), VALID_RANKS):
tmp_taxonomy = taxonomy
else:
raise ValueError(
"Provided `taxonomy` Datafame has invalid ranks."
)
else:
tmp_taxonomy = taxonomy.iloc[:, 0]
else:
raise ValueError("Provided `taxonomy` Datafame is invalid.")
elif isinstance(taxonomy, pd.Series):
if taxonomy.shape[0] > 0:
tmp_taxonomy = taxonomy
else:
raise ValueError("Provided `taxonomy` Series is invalid.")
elif isinstance(taxonomy, str):
if path.isfile(taxonomy):
file_extension = path.splitext(taxonomy)[-1].lower()
if file_extension in [".csv", ".tsv"]:
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).loc[:, taxonomy_columns]
elif file_extension in [".biom", ".biome"]:
tmp_taxonomy, new_metadata = self.__load_biom(taxonomy, **kwargs)
tmp_metadata.update({"biom": new_metadata})
else:
raise NotImplementedError("File type is not supported.")
else:
raise FileNotFoundError("Provided `taxonomy` file path is invalid.")
else:
raise TypeError("Provided `taxonomy` has invalid type.")
self.__init_internal_taxonomy(tmp_taxonomy, **kwargs)
super().__init__(metadata=tmp_metadata, **kwargs)
@classmethod
def from_csv(
cls,
filepath: str,
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from CSV file.
Parameters
----------
filepath
Path to .csv File
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to the constructor.
filepath:
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(filepath, **kwargs)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(filepath, **kwargs).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(filepath, **kwargs).loc[:, taxonomy_columns]
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"filepath": path.abspath(filepath)})
return cls(taxonomy=tmp_taxonomy, metadata=tmp_metadata, **kwargs)
@classmethod
def from_biom(cls, filepath: str, **kwargs: Any) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from :mod:`biom`
file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Passed to the constructor.
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
taxonomy_frame, new_metadata = cls.__load_biom(filepath, **kwargs)
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"biom": new_metadata})
return cls(taxonomy=taxonomy_frame, metadata=tmp_metadata, **kwargs)
# MASKED: __load_biom function (lines 166-200)
def _remove_features_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by features ids and ratify action.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
tmp_ids = np.asarray(ids, dtype=self.__internal_taxonomy.index.dtype)
if len(tmp_ids) > 0:
self.__internal_taxonomy.drop(tmp_ids, inplace=True)
return self._ratify_action("_remove_features_by_id", ids, **kwargs)
def _merge_features_by_map(
self, map_dict: Mapper, done: bool = False, **kwargs: Any
) -> Optional[Mapper]:
"""Merge features and ratify action.
Parameters
----------
map_dict
Map to use for merging
done
Whether merging was completed or not. Compatibility.
kwargs
Compatibility
"""
if not done:
raise NotImplementedError
if map_dict:
return self._ratify_action(
"_merge_features_by_map",
map_dict,
_annotations=self.__internal_taxonomy.loc[:, "lineage"].to_dict(),
**kwargs
)
def drop_feature_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by feature `ids`.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() == len(target_ids):
return self._remove_features_by_id(target_ids, **kwargs)
else:
raise ValueError("Invalid feature ids are provided.")
def get_taxonomy_by_id(
self, ids: Optional[AnyGenericIdentifier] = None
) -> pd.DataFrame:
"""Get taxonomy :class:`~pandas.DataFrame` by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
Returns
-------
class:`pandas.DataFrame` with taxonomy data
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() <= len(target_ids):
return self.__internal_taxonomy.loc[target_ids, self.__avail_ranks]
else:
raise ValueError("Invalid feature ids are provided.")
def get_lineage_by_id(
self,
ids: Optional[AnyGenericIdentifier] = None,
missing_rank: bool = False,
desired_ranks: Union[bool, Sequence[str]] = False,
drop_ranks: Union[bool, Sequence[str]] = False,
**kwargs: Any
) -> pd.Series:
"""Get taxonomy lineages by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
missing_rank
If True will generate prefix like `s__` or `d__`
desired_ranks
List of desired ranks to generate.
If False then will generate all main ranks
drop_ranks
List of ranks to drop from desired ranks.
This parameter only useful if `missing_rank` is True
kwargs
Compatibility.
Returns
-------
class:`pandas.Series` with consensus lineages and corresponding IDs
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
tmp_desired_ranks = VALID_RANKS if desired_ranks is False else desired_ranks
total_valid_rids = self.xrid.isin(target_ids).sum()
if total_valid_rids == len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[target_ids],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
elif total_valid_rids < len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[np.unique(target_ids)],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
else:
raise ValueError("Invalid feature ids are provided.")
def find_features_by_pattern(
self, pattern_str: str, case_sensitive: bool = False, regex: bool = False
) -> np.ndarray:
"""Searches for features with taxa that matches `pattern_str`
Parameters
----------
pattern_str
Pattern to search for
case_sensitive
Case sensitive mode
regex
Use regular expressions
Returns
-------
class:`~numpy.ndarray` with indices
"""
return self.__internal_taxonomy[
self.__internal_taxonomy.loc[:, "lineage"].str.contains(
pattern_str, case=case_sensitive, regex=regex
)
].index.values
def drop_features_without_taxa(
self, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features that do not contain taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ids_to_drop = self.find_features_without_taxa()
return self._remove_features_by_id(ids_to_drop, **kwargs)
def drop_features_without_ranks(
self, ranks: Sequence[str], any: bool = False, **kwargs: Any
) -> Optional[AnyGenericIdentifier]: # Done
"""Remove features that do not contain `ranks`
Parameters
----------
ranks
Ranks to look for
any
If True removes feature with single occurrence of missing rank.
If False all `ranks` must be missing.
kwargs
Compatibility
"""
target_ranks = np.asarray(ranks)
if self.__internal_taxonomy.columns.isin(target_ranks).sum() == len(
target_ranks
):
no_rank_mask = self.__internal_taxonomy.loc[:, ranks].isna()
no_rank_mask_adjusted = (
no_rank_mask.any(axis=1) if any else no_rank_mask.all(axis=1)
)
ids_to_drop = self.__internal_taxonomy.loc[no_rank_mask_adjusted].index
return self._remove_features_by_id(ids_to_drop, **kwargs)
else:
raise ValueError("Invalid ranks are provided.")
def merge_duplicated_features(self, **kwargs: Any) -> Optional[Mapper]:
"""Merge features with duplicated taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ret = {}
groupby = self.__internal_taxonomy.groupby("lineage")
if any([len(group) > 1 for group in groupby.groups.values()]):
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groupby.groups)))
for lineage, feature_ids in groupby.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
return self._merge_features_by_map(ret, True, **kwargs)
def merge_features_by_rank(self, level: str, **kwargs: Any) -> Optional[Mapper]:
"""Merge features by taxonomic rank/level.
Parameters
----------
level
Taxonomic rank/level to use for merging.
kwargs
Compatibility
"""
ret = {}
if not isinstance(level, str):
raise TypeError("`rank` must have str type.")
if level in self.__avail_ranks:
target_ranks = get_rank_upto(self.avail_ranks, level, True)
if target_ranks:
tmp_lineages = generate_lineages_from_taxa(
self.__internal_taxonomy, False, target_ranks, False
)
groups = tmp_lineages.groupby(tmp_lineages)
if len(groups.groups) > 1:
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groups.groups)))
for lineage, feature_ids in groups.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
else:
raise ValueError("Invalid rank are provided.")
return self._merge_features_by_map(ret, True, **kwargs)
def find_features_without_taxa(self) -> np.ndarray:
"""Find features without taxa.
Returns
-------
class:`~numpy.ndarray` with feature indices.
"""
return self.__internal_taxonomy.loc[
self.__internal_taxonomy.loc[:, VALID_RANKS].agg(
lambda rank: len("".join(map(lambda x: (str(x or "")), rank))), axis=1
)
< 1
].index.values
def get_subset(
self, rids: Optional[AnyGenericIdentifier] = None, *args, **kwargs: Any
) -> "RepTaxonomy":
"""Get subset of the :class:`.RepTaxonomy`.
Parameters
----------
rids
Feature identifiers.
args
Compatibility
kwargs
Compatibility
Returns
-------
class:`.RepTaxonomy`
"""
if rids is None:
target_rids = self.xrid
else:
target_rids = np.asarray(rids).astype(self.__internal_taxonomy.index.dtype)
if not self.xrid.isin(target_rids).sum() == len(target_rids):
raise ValueError("Invalid feature ids are provided.")
return type(self)(
taxonomy=self.__internal_taxonomy.loc[target_rids, "lineage"],
metadata=self.metadata,
name=self.name,
)
def _export(
self, taxlike: str = "lineage", ascending: bool = True, **kwargs: Any
) -> Tuple[pd.Series, dict]:
"""Creates taxonomy for export.
Parameters
----------
taxlike
Generate taxonomy in format(currently only `lineage` is supported.)
ascending
Sorting
kwargs
Compatibility
"""
if taxlike == "lineage":
return (
self.get_lineage_by_id(**kwargs).sort_values(ascending=ascending),
kwargs,
)
else:
raise NotImplemented
def export(
self,
output_fp: str,
*args,
_add_ext: bool = False,
sep: str = ",",
**kwargs: Any
) -> None:
"""Exports the taxonomy into the specified file.
Parameters
----------
output_fp
Export filepath
args
Compatibility
_add_ext
Add file extension or not.
sep
Delimiter
kwargs
Compatibility
"""
tmp_export, rkwarg = self._export(*args, **kwargs)
if _add_ext:
tmp_export.to_csv("{}.csv".format(output_fp), sep=sep)
else:
tmp_export.to_csv(output_fp, sep=sep)
def copy(self) -> "RepTaxonomy":
"""Copy of the instance."""
return type(self)(
taxonomy=self.__internal_taxonomy.loc[:, "lineage"],
metadata=self.metadata,
name=self.name,
)
def __fix_taxon_names(self) -> None:
"""Fix invalid taxon names."""
def taxon_fixer(taxon):
if taxon is not None and pd.notna(taxon):
tmp_taxon_trimmed = taxon.lower().strip()
if len(tmp_taxon_trimmed) > 0:
if tmp_taxon_trimmed[0] == "[":
tmp_taxon_trimmed = tmp_taxon_trimmed[1:]
if tmp_taxon_trimmed[-1] == "]":
tmp_taxon_trimmed = tmp_taxon_trimmed[:-1]
return tmp_taxon_trimmed.capitalize()
else:
return None
else:
return None
self.__internal_taxonomy.loc[:, VALID_RANKS] = self.__internal_taxonomy.loc[
:, VALID_RANKS
].applymap(taxon_fixer)
def __reconstruct_internal_lineages(self) -> None:
"""Reconstruct the internal lineages."""
self.__internal_taxonomy.loc[:, "lineage"] = generate_lineages_from_taxa(
self.__internal_taxonomy, True, self.__avail_ranks, False
)
def __init_internal_taxonomy(
self,
taxonomy_data: Union[pd.Series, pd.DataFrame],
taxonomy_notation: Optional[str] = "greengenes",
order_ranks: Optional[Sequence[str]] = None,
**kwargs: Any
) -> None:
"""Main method to initialize taxonomy.
Parameters
----------
taxonomy_data
Incoming parsed taxonomy data
taxonomy_notation
Taxonomy lineage notation style. Can be one of
:const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None.
The 'silva' notation require `order_ranks`.
kwargs
Compatibility
"""
if isinstance(taxonomy_data, pd.Series):
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data, taxonomy_notation, order_ranks
)
elif isinstance(taxonomy_data, pd.DataFrame):
if taxonomy_data.shape[1] == 1:
taxonomy_data_series = pd.Series(
data=taxonomy_data.iloc[:, 0], index=taxonomy_data.index
)
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data_series, taxonomy_notation, order_ranks
)
else:
new_taxonomy = self.__init_taxonomy_from_frame(
taxonomy_data, taxonomy_notation, order_ranks
)
else:
raise RuntimeError(
"`taxonomy_data` must be either pd.Series or pd.Dataframe"
)
if new_taxonomy is None:
raise ValueError("Provided taxonomy is invalid.")
# Assign newly constructed taxonomy to the self.__internal_taxonomy
self.__internal_taxonomy = new_taxonomy
self.__fix_taxon_names() # Fix incorrect taxa
tmp_avail_ranks = [rank for rank in VALID_RANKS if rank in new_taxonomy.columns]
self.__avail_ranks = [
rank for rank in tmp_avail_ranks if new_taxonomy.loc[:, rank].notna().any()
]
# Reconstruct internal lineages for default greengenes notation
self.__reconstruct_internal_lineages()
self._init_state = True
def __init_taxonomy_from_lineages(
self,
taxonomy_series: pd.Series,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done
"""Main method that produces taxonomy dataframe from lineages.
Parameters
----------
taxonomy_series
:class:`pandas.Series` with taxonomy lineages
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
"""
# Check if taxonomy is known and is available for parsing. Otherwise indentify_taxon_notation() will try to identify notation
if taxonomy_notation in AVAIL_TAXONOMY_NOTATIONS:
notation = taxonomy_notation
else:
# Get first lineage _sample for notation testing assuming the rest have the the same notations
sample_taxon = taxonomy_series.iloc[0]
# Identify notation of the lineage string
notation = indentify_taxon_notation(sample_taxon)
if order_ranks is not None:
if all([rank in VALID_RANKS for rank in order_ranks]):
target_order_ranks = order_ranks
else:
raise NotImplementedError
else:
target_order_ranks = VALID_RANKS
if notation == "greengenes":
lineages = taxonomy_series.reset_index().values.tolist()
ordered_taxa_list = []
ordered_indices_list = [elem[0] for elem in lineages]
for lineage in lineages:
tmp_lineage = jRegexGG.findall(lineage[1])
tmp_taxa_dict = {
elem[0]: elem[1] for elem in tmp_lineage if elem[0] in VALID_RANKS
}
for rank in VALID_RANKS:
if rank not in tmp_taxa_dict.keys():
tmp_taxa_dict.update({rank: None})
tmp_taxa_ordered = [tmp_taxa_dict[rank] for rank in VALID_RANKS]
ordered_taxa_list.append([None] + tmp_taxa_ordered)
taxonomy = pd.DataFrame(
index=ordered_indices_list,
data=ordered_taxa_list,
columns=["lineage"] + VALID_RANKS,
)
return taxonomy
elif notation == "qiime":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = jRegexQIIME.findall(lineage[1])
tmp_lineage.sort(key=lambda x: x[0])
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank, taxon in tmp_lineage:
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, sorted(list(tmp_ranks))]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
elif notation == "silva":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = lineage[1].split(";")
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank_i, taxon in enumerate(tmp_lineage):
rank = target_order_ranks[rank_i]
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_rank_ordered = [
rank for rank in target_order_ranks if rank in VALID_RANKS
]
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, tmp_rank_ordered]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
else:
raise NotImplementedError
def __init_taxonomy_from_frame(
self,
taxonomy_dataframe: pd.DataFrame,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done # For now only pass to _init_taxonomy_from_series
"""Main method that produces taxonomy sheet from dataframe.
Parameters
----------
taxonomy_dataframe
:class:`~pandas.DataFrame` with taxa split by ranks.
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
Returns
-------
:class:`~pandas.DataFrame`
"""
valid_ranks = extract_valid_ranks(taxonomy_dataframe.columns, VALID_RANKS)
if valid_ranks is not None:
if len(valid_ranks) > 0:
return pd.concat(
[
taxonomy_dataframe,
pd.DataFrame(
data="",
index=taxonomy_dataframe.index,
columns=[
rank for rank in VALID_RANKS if rank not in valid_ranks
],
),
],
axis=1,
)
else:
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join(taxa.values.tolist()), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
else:
valid_ranks = cols2ranks(taxonomy_dataframe.columns)
taxonomy_dataframe.columns = valid_ranks
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join([(t if isinstance(t,str) else '') for t in taxa.values]), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
@property
def avail_ranks(self) -> Sequence[str]:
"""List of available taxonomic ranks."""
return self.__avail_ranks
@property
def duplicated(self) -> pd.Index:
"""List of duplicated feature indices."""
return self.__internal_taxonomy.index[
self.__internal_taxonomy["lineage"].duplicated(keep=False)
]
@property
def data(self) -> pd.DataFrame:
"""Actual data representation as pd.DataFrame."""
return self.__internal_taxonomy
@property
def xrid(self) -> pd.Index:
"""Feature indices as pd.Index."""
return self.__internal_taxonomy.index
|
@classmethod
def __load_biom(cls, filepath: str, **kwargs: Any) -> Tuple[pd.DataFrame, dict]:
"""Actual private method to process :mod:`biom` file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Compatibility
"""
biom_file = biom.load_table(filepath)
if biom_file.metadata(axis="observation") is not None:
obs_data = biom_file.metadata_to_dataframe("observation")
col_names = list(obs_data.columns.values)
col_names_low = [col.lower() for col in col_names]
avail_col_names = [
colname
for tax_name in BIOM_TAXONOMY_NAMES
for colname in col_names_low
if colname[::-1].find(tax_name[::-1]) < 3
and colname[::-1].find(tax_name[::-1]) > -1
]
metadata_cols = [
col for col in col_names if col.lower() not in avail_col_names
]
if len(avail_col_names) == 1:
tmp_col_index = col_names_low.index(avail_col_names[0])
taxonomy_frame = obs_data[col_names[tmp_col_index]]
else:
taxonomy_frame = obs_data
tmp_metadata = obs_data.loc[:, metadata_cols].to_dict()
return taxonomy_frame, tmp_metadata
else:
raise ValueError("Biom file does not contain observation metadata.")
| 166 | 200 |
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from pmaf.biome.essentials._metakit import EssentialFeatureMetabase
from pmaf.biome.essentials._base import EssentialBackboneBase
from pmaf.internal._constants import (
AVAIL_TAXONOMY_NOTATIONS,
jRegexGG,
jRegexQIIME,
BIOM_TAXONOMY_NAMES,
VALID_RANKS,
)
from pmaf.internal._shared import (
generate_lineages_from_taxa,
get_rank_upto,
indentify_taxon_notation,
validate_ranks,
extract_valid_ranks,
cols2ranks,
)
from collections import defaultdict
from os import path
import pandas as pd
import numpy as np
import biom
from typing import Union, Sequence, Tuple, Any, Optional
from pmaf.internal._typing import AnyGenericIdentifier, Mapper
class RepTaxonomy(EssentialBackboneBase, EssentialFeatureMetabase):
"""An `essential` class for handling taxonomy data."""
def __init__(
self,
taxonomy: Union[pd.DataFrame, pd.Series, str],
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> None:
"""Constructor for :class:`.RepTaxonomy`
Parameters
----------
taxonomy
Data containing feature taxonomy
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to :func:`~pandas.read_csv` or :mod:`biome` loader.
"""
tmp_metadata = kwargs.pop("metadata", {})
self.__avail_ranks = []
self.__internal_taxonomy = None
if isinstance(taxonomy, pd.DataFrame):
if taxonomy.shape[0] > 0:
if taxonomy.shape[1] > 1:
if validate_ranks(list(taxonomy.columns.values), VALID_RANKS):
tmp_taxonomy = taxonomy
else:
raise ValueError(
"Provided `taxonomy` Datafame has invalid ranks."
)
else:
tmp_taxonomy = taxonomy.iloc[:, 0]
else:
raise ValueError("Provided `taxonomy` Datafame is invalid.")
elif isinstance(taxonomy, pd.Series):
if taxonomy.shape[0] > 0:
tmp_taxonomy = taxonomy
else:
raise ValueError("Provided `taxonomy` Series is invalid.")
elif isinstance(taxonomy, str):
if path.isfile(taxonomy):
file_extension = path.splitext(taxonomy)[-1].lower()
if file_extension in [".csv", ".tsv"]:
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).loc[:, taxonomy_columns]
elif file_extension in [".biom", ".biome"]:
tmp_taxonomy, new_metadata = self.__load_biom(taxonomy, **kwargs)
tmp_metadata.update({"biom": new_metadata})
else:
raise NotImplementedError("File type is not supported.")
else:
raise FileNotFoundError("Provided `taxonomy` file path is invalid.")
else:
raise TypeError("Provided `taxonomy` has invalid type.")
self.__init_internal_taxonomy(tmp_taxonomy, **kwargs)
super().__init__(metadata=tmp_metadata, **kwargs)
@classmethod
def from_csv(
cls,
filepath: str,
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from CSV file.
Parameters
----------
filepath
Path to .csv File
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to the constructor.
filepath:
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(filepath, **kwargs)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(filepath, **kwargs).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(filepath, **kwargs).loc[:, taxonomy_columns]
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"filepath": path.abspath(filepath)})
return cls(taxonomy=tmp_taxonomy, metadata=tmp_metadata, **kwargs)
@classmethod
def from_biom(cls, filepath: str, **kwargs: Any) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from :mod:`biom`
file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Passed to the constructor.
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
taxonomy_frame, new_metadata = cls.__load_biom(filepath, **kwargs)
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"biom": new_metadata})
return cls(taxonomy=taxonomy_frame, metadata=tmp_metadata, **kwargs)
@classmethod
def __load_biom(cls, filepath: str, **kwargs: Any) -> Tuple[pd.DataFrame, dict]:
"""Actual private method to process :mod:`biom` file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Compatibility
"""
biom_file = biom.load_table(filepath)
if biom_file.metadata(axis="observation") is not None:
obs_data = biom_file.metadata_to_dataframe("observation")
col_names = list(obs_data.columns.values)
col_names_low = [col.lower() for col in col_names]
avail_col_names = [
colname
for tax_name in BIOM_TAXONOMY_NAMES
for colname in col_names_low
if colname[::-1].find(tax_name[::-1]) < 3
and colname[::-1].find(tax_name[::-1]) > -1
]
metadata_cols = [
col for col in col_names if col.lower() not in avail_col_names
]
if len(avail_col_names) == 1:
tmp_col_index = col_names_low.index(avail_col_names[0])
taxonomy_frame = obs_data[col_names[tmp_col_index]]
else:
taxonomy_frame = obs_data
tmp_metadata = obs_data.loc[:, metadata_cols].to_dict()
return taxonomy_frame, tmp_metadata
else:
raise ValueError("Biom file does not contain observation metadata.")
def _remove_features_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by features ids and ratify action.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
tmp_ids = np.asarray(ids, dtype=self.__internal_taxonomy.index.dtype)
if len(tmp_ids) > 0:
self.__internal_taxonomy.drop(tmp_ids, inplace=True)
return self._ratify_action("_remove_features_by_id", ids, **kwargs)
def _merge_features_by_map(
self, map_dict: Mapper, done: bool = False, **kwargs: Any
) -> Optional[Mapper]:
"""Merge features and ratify action.
Parameters
----------
map_dict
Map to use for merging
done
Whether merging was completed or not. Compatibility.
kwargs
Compatibility
"""
if not done:
raise NotImplementedError
if map_dict:
return self._ratify_action(
"_merge_features_by_map",
map_dict,
_annotations=self.__internal_taxonomy.loc[:, "lineage"].to_dict(),
**kwargs
)
def drop_feature_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by feature `ids`.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() == len(target_ids):
return self._remove_features_by_id(target_ids, **kwargs)
else:
raise ValueError("Invalid feature ids are provided.")
def get_taxonomy_by_id(
self, ids: Optional[AnyGenericIdentifier] = None
) -> pd.DataFrame:
"""Get taxonomy :class:`~pandas.DataFrame` by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
Returns
-------
class:`pandas.DataFrame` with taxonomy data
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() <= len(target_ids):
return self.__internal_taxonomy.loc[target_ids, self.__avail_ranks]
else:
raise ValueError("Invalid feature ids are provided.")
def get_lineage_by_id(
self,
ids: Optional[AnyGenericIdentifier] = None,
missing_rank: bool = False,
desired_ranks: Union[bool, Sequence[str]] = False,
drop_ranks: Union[bool, Sequence[str]] = False,
**kwargs: Any
) -> pd.Series:
"""Get taxonomy lineages by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
missing_rank
If True will generate prefix like `s__` or `d__`
desired_ranks
List of desired ranks to generate.
If False then will generate all main ranks
drop_ranks
List of ranks to drop from desired ranks.
This parameter only useful if `missing_rank` is True
kwargs
Compatibility.
Returns
-------
class:`pandas.Series` with consensus lineages and corresponding IDs
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
tmp_desired_ranks = VALID_RANKS if desired_ranks is False else desired_ranks
total_valid_rids = self.xrid.isin(target_ids).sum()
if total_valid_rids == len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[target_ids],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
elif total_valid_rids < len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[np.unique(target_ids)],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
else:
raise ValueError("Invalid feature ids are provided.")
def find_features_by_pattern(
self, pattern_str: str, case_sensitive: bool = False, regex: bool = False
) -> np.ndarray:
"""Searches for features with taxa that matches `pattern_str`
Parameters
----------
pattern_str
Pattern to search for
case_sensitive
Case sensitive mode
regex
Use regular expressions
Returns
-------
class:`~numpy.ndarray` with indices
"""
return self.__internal_taxonomy[
self.__internal_taxonomy.loc[:, "lineage"].str.contains(
pattern_str, case=case_sensitive, regex=regex
)
].index.values
def drop_features_without_taxa(
self, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features that do not contain taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ids_to_drop = self.find_features_without_taxa()
return self._remove_features_by_id(ids_to_drop, **kwargs)
def drop_features_without_ranks(
self, ranks: Sequence[str], any: bool = False, **kwargs: Any
) -> Optional[AnyGenericIdentifier]: # Done
"""Remove features that do not contain `ranks`
Parameters
----------
ranks
Ranks to look for
any
If True removes feature with single occurrence of missing rank.
If False all `ranks` must be missing.
kwargs
Compatibility
"""
target_ranks = np.asarray(ranks)
if self.__internal_taxonomy.columns.isin(target_ranks).sum() == len(
target_ranks
):
no_rank_mask = self.__internal_taxonomy.loc[:, ranks].isna()
no_rank_mask_adjusted = (
no_rank_mask.any(axis=1) if any else no_rank_mask.all(axis=1)
)
ids_to_drop = self.__internal_taxonomy.loc[no_rank_mask_adjusted].index
return self._remove_features_by_id(ids_to_drop, **kwargs)
else:
raise ValueError("Invalid ranks are provided.")
def merge_duplicated_features(self, **kwargs: Any) -> Optional[Mapper]:
"""Merge features with duplicated taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ret = {}
groupby = self.__internal_taxonomy.groupby("lineage")
if any([len(group) > 1 for group in groupby.groups.values()]):
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groupby.groups)))
for lineage, feature_ids in groupby.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
return self._merge_features_by_map(ret, True, **kwargs)
def merge_features_by_rank(self, level: str, **kwargs: Any) -> Optional[Mapper]:
"""Merge features by taxonomic rank/level.
Parameters
----------
level
Taxonomic rank/level to use for merging.
kwargs
Compatibility
"""
ret = {}
if not isinstance(level, str):
raise TypeError("`rank` must have str type.")
if level in self.__avail_ranks:
target_ranks = get_rank_upto(self.avail_ranks, level, True)
if target_ranks:
tmp_lineages = generate_lineages_from_taxa(
self.__internal_taxonomy, False, target_ranks, False
)
groups = tmp_lineages.groupby(tmp_lineages)
if len(groups.groups) > 1:
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groups.groups)))
for lineage, feature_ids in groups.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
else:
raise ValueError("Invalid rank are provided.")
return self._merge_features_by_map(ret, True, **kwargs)
def find_features_without_taxa(self) -> np.ndarray:
"""Find features without taxa.
Returns
-------
class:`~numpy.ndarray` with feature indices.
"""
return self.__internal_taxonomy.loc[
self.__internal_taxonomy.loc[:, VALID_RANKS].agg(
lambda rank: len("".join(map(lambda x: (str(x or "")), rank))), axis=1
)
< 1
].index.values
def get_subset(
self, rids: Optional[AnyGenericIdentifier] = None, *args, **kwargs: Any
) -> "RepTaxonomy":
"""Get subset of the :class:`.RepTaxonomy`.
Parameters
----------
rids
Feature identifiers.
args
Compatibility
kwargs
Compatibility
Returns
-------
class:`.RepTaxonomy`
"""
if rids is None:
target_rids = self.xrid
else:
target_rids = np.asarray(rids).astype(self.__internal_taxonomy.index.dtype)
if not self.xrid.isin(target_rids).sum() == len(target_rids):
raise ValueError("Invalid feature ids are provided.")
return type(self)(
taxonomy=self.__internal_taxonomy.loc[target_rids, "lineage"],
metadata=self.metadata,
name=self.name,
)
def _export(
self, taxlike: str = "lineage", ascending: bool = True, **kwargs: Any
) -> Tuple[pd.Series, dict]:
"""Creates taxonomy for export.
Parameters
----------
taxlike
Generate taxonomy in format(currently only `lineage` is supported.)
ascending
Sorting
kwargs
Compatibility
"""
if taxlike == "lineage":
return (
self.get_lineage_by_id(**kwargs).sort_values(ascending=ascending),
kwargs,
)
else:
raise NotImplemented
def export(
self,
output_fp: str,
*args,
_add_ext: bool = False,
sep: str = ",",
**kwargs: Any
) -> None:
"""Exports the taxonomy into the specified file.
Parameters
----------
output_fp
Export filepath
args
Compatibility
_add_ext
Add file extension or not.
sep
Delimiter
kwargs
Compatibility
"""
tmp_export, rkwarg = self._export(*args, **kwargs)
if _add_ext:
tmp_export.to_csv("{}.csv".format(output_fp), sep=sep)
else:
tmp_export.to_csv(output_fp, sep=sep)
def copy(self) -> "RepTaxonomy":
"""Copy of the instance."""
return type(self)(
taxonomy=self.__internal_taxonomy.loc[:, "lineage"],
metadata=self.metadata,
name=self.name,
)
def __fix_taxon_names(self) -> None:
"""Fix invalid taxon names."""
def taxon_fixer(taxon):
if taxon is not None and pd.notna(taxon):
tmp_taxon_trimmed = taxon.lower().strip()
if len(tmp_taxon_trimmed) > 0:
if tmp_taxon_trimmed[0] == "[":
tmp_taxon_trimmed = tmp_taxon_trimmed[1:]
if tmp_taxon_trimmed[-1] == "]":
tmp_taxon_trimmed = tmp_taxon_trimmed[:-1]
return tmp_taxon_trimmed.capitalize()
else:
return None
else:
return None
self.__internal_taxonomy.loc[:, VALID_RANKS] = self.__internal_taxonomy.loc[
:, VALID_RANKS
].applymap(taxon_fixer)
def __reconstruct_internal_lineages(self) -> None:
"""Reconstruct the internal lineages."""
self.__internal_taxonomy.loc[:, "lineage"] = generate_lineages_from_taxa(
self.__internal_taxonomy, True, self.__avail_ranks, False
)
def __init_internal_taxonomy(
self,
taxonomy_data: Union[pd.Series, pd.DataFrame],
taxonomy_notation: Optional[str] = "greengenes",
order_ranks: Optional[Sequence[str]] = None,
**kwargs: Any
) -> None:
"""Main method to initialize taxonomy.
Parameters
----------
taxonomy_data
Incoming parsed taxonomy data
taxonomy_notation
Taxonomy lineage notation style. Can be one of
:const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None.
The 'silva' notation require `order_ranks`.
kwargs
Compatibility
"""
if isinstance(taxonomy_data, pd.Series):
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data, taxonomy_notation, order_ranks
)
elif isinstance(taxonomy_data, pd.DataFrame):
if taxonomy_data.shape[1] == 1:
taxonomy_data_series = pd.Series(
data=taxonomy_data.iloc[:, 0], index=taxonomy_data.index
)
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data_series, taxonomy_notation, order_ranks
)
else:
new_taxonomy = self.__init_taxonomy_from_frame(
taxonomy_data, taxonomy_notation, order_ranks
)
else:
raise RuntimeError(
"`taxonomy_data` must be either pd.Series or pd.Dataframe"
)
if new_taxonomy is None:
raise ValueError("Provided taxonomy is invalid.")
# Assign newly constructed taxonomy to the self.__internal_taxonomy
self.__internal_taxonomy = new_taxonomy
self.__fix_taxon_names() # Fix incorrect taxa
tmp_avail_ranks = [rank for rank in VALID_RANKS if rank in new_taxonomy.columns]
self.__avail_ranks = [
rank for rank in tmp_avail_ranks if new_taxonomy.loc[:, rank].notna().any()
]
# Reconstruct internal lineages for default greengenes notation
self.__reconstruct_internal_lineages()
self._init_state = True
def __init_taxonomy_from_lineages(
self,
taxonomy_series: pd.Series,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done
"""Main method that produces taxonomy dataframe from lineages.
Parameters
----------
taxonomy_series
:class:`pandas.Series` with taxonomy lineages
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
"""
# Check if taxonomy is known and is available for parsing. Otherwise indentify_taxon_notation() will try to identify notation
if taxonomy_notation in AVAIL_TAXONOMY_NOTATIONS:
notation = taxonomy_notation
else:
# Get first lineage _sample for notation testing assuming the rest have the the same notations
sample_taxon = taxonomy_series.iloc[0]
# Identify notation of the lineage string
notation = indentify_taxon_notation(sample_taxon)
if order_ranks is not None:
if all([rank in VALID_RANKS for rank in order_ranks]):
target_order_ranks = order_ranks
else:
raise NotImplementedError
else:
target_order_ranks = VALID_RANKS
if notation == "greengenes":
lineages = taxonomy_series.reset_index().values.tolist()
ordered_taxa_list = []
ordered_indices_list = [elem[0] for elem in lineages]
for lineage in lineages:
tmp_lineage = jRegexGG.findall(lineage[1])
tmp_taxa_dict = {
elem[0]: elem[1] for elem in tmp_lineage if elem[0] in VALID_RANKS
}
for rank in VALID_RANKS:
if rank not in tmp_taxa_dict.keys():
tmp_taxa_dict.update({rank: None})
tmp_taxa_ordered = [tmp_taxa_dict[rank] for rank in VALID_RANKS]
ordered_taxa_list.append([None] + tmp_taxa_ordered)
taxonomy = pd.DataFrame(
index=ordered_indices_list,
data=ordered_taxa_list,
columns=["lineage"] + VALID_RANKS,
)
return taxonomy
elif notation == "qiime":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = jRegexQIIME.findall(lineage[1])
tmp_lineage.sort(key=lambda x: x[0])
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank, taxon in tmp_lineage:
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, sorted(list(tmp_ranks))]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
elif notation == "silva":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = lineage[1].split(";")
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank_i, taxon in enumerate(tmp_lineage):
rank = target_order_ranks[rank_i]
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_rank_ordered = [
rank for rank in target_order_ranks if rank in VALID_RANKS
]
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, tmp_rank_ordered]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
else:
raise NotImplementedError
def __init_taxonomy_from_frame(
self,
taxonomy_dataframe: pd.DataFrame,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done # For now only pass to _init_taxonomy_from_series
"""Main method that produces taxonomy sheet from dataframe.
Parameters
----------
taxonomy_dataframe
:class:`~pandas.DataFrame` with taxa split by ranks.
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
Returns
-------
:class:`~pandas.DataFrame`
"""
valid_ranks = extract_valid_ranks(taxonomy_dataframe.columns, VALID_RANKS)
if valid_ranks is not None:
if len(valid_ranks) > 0:
return pd.concat(
[
taxonomy_dataframe,
pd.DataFrame(
data="",
index=taxonomy_dataframe.index,
columns=[
rank for rank in VALID_RANKS if rank not in valid_ranks
],
),
],
axis=1,
)
else:
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join(taxa.values.tolist()), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
else:
valid_ranks = cols2ranks(taxonomy_dataframe.columns)
taxonomy_dataframe.columns = valid_ranks
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join([(t if isinstance(t,str) else '') for t in taxa.values]), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
@property
def avail_ranks(self) -> Sequence[str]:
"""List of available taxonomic ranks."""
return self.__avail_ranks
@property
def duplicated(self) -> pd.Index:
"""List of duplicated feature indices."""
return self.__internal_taxonomy.index[
self.__internal_taxonomy["lineage"].duplicated(keep=False)
]
@property
def data(self) -> pd.DataFrame:
"""Actual data representation as pd.DataFrame."""
return self.__internal_taxonomy
@property
def xrid(self) -> pd.Index:
"""Feature indices as pd.Index."""
return self.__internal_taxonomy.index
|
_merge_features_by_map
|
Merge features and ratify action.
Parameters
----------
map_dict
Map to use for merging
done
Whether merging was completed or not. Compatibility.
kwargs
Compatibility
|
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from pmaf.biome.essentials._metakit import EssentialFeatureMetabase
from pmaf.biome.essentials._base import EssentialBackboneBase
from pmaf.internal._constants import (
AVAIL_TAXONOMY_NOTATIONS,
jRegexGG,
jRegexQIIME,
BIOM_TAXONOMY_NAMES,
VALID_RANKS,
)
from pmaf.internal._shared import (
generate_lineages_from_taxa,
get_rank_upto,
indentify_taxon_notation,
validate_ranks,
extract_valid_ranks,
cols2ranks,
)
from collections import defaultdict
from os import path
import pandas as pd
import numpy as np
import biom
from typing import Union, Sequence, Tuple, Any, Optional
from pmaf.internal._typing import AnyGenericIdentifier, Mapper
class RepTaxonomy(EssentialBackboneBase, EssentialFeatureMetabase):
"""An `essential` class for handling taxonomy data."""
def __init__(
self,
taxonomy: Union[pd.DataFrame, pd.Series, str],
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> None:
"""Constructor for :class:`.RepTaxonomy`
Parameters
----------
taxonomy
Data containing feature taxonomy
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to :func:`~pandas.read_csv` or :mod:`biome` loader.
"""
tmp_metadata = kwargs.pop("metadata", {})
self.__avail_ranks = []
self.__internal_taxonomy = None
if isinstance(taxonomy, pd.DataFrame):
if taxonomy.shape[0] > 0:
if taxonomy.shape[1] > 1:
if validate_ranks(list(taxonomy.columns.values), VALID_RANKS):
tmp_taxonomy = taxonomy
else:
raise ValueError(
"Provided `taxonomy` Datafame has invalid ranks."
)
else:
tmp_taxonomy = taxonomy.iloc[:, 0]
else:
raise ValueError("Provided `taxonomy` Datafame is invalid.")
elif isinstance(taxonomy, pd.Series):
if taxonomy.shape[0] > 0:
tmp_taxonomy = taxonomy
else:
raise ValueError("Provided `taxonomy` Series is invalid.")
elif isinstance(taxonomy, str):
if path.isfile(taxonomy):
file_extension = path.splitext(taxonomy)[-1].lower()
if file_extension in [".csv", ".tsv"]:
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).loc[:, taxonomy_columns]
elif file_extension in [".biom", ".biome"]:
tmp_taxonomy, new_metadata = self.__load_biom(taxonomy, **kwargs)
tmp_metadata.update({"biom": new_metadata})
else:
raise NotImplementedError("File type is not supported.")
else:
raise FileNotFoundError("Provided `taxonomy` file path is invalid.")
else:
raise TypeError("Provided `taxonomy` has invalid type.")
self.__init_internal_taxonomy(tmp_taxonomy, **kwargs)
super().__init__(metadata=tmp_metadata, **kwargs)
@classmethod
def from_csv(
cls,
filepath: str,
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from CSV file.
Parameters
----------
filepath
Path to .csv File
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to the constructor.
filepath:
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(filepath, **kwargs)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(filepath, **kwargs).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(filepath, **kwargs).loc[:, taxonomy_columns]
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"filepath": path.abspath(filepath)})
return cls(taxonomy=tmp_taxonomy, metadata=tmp_metadata, **kwargs)
@classmethod
def from_biom(cls, filepath: str, **kwargs: Any) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from :mod:`biom`
file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Passed to the constructor.
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
taxonomy_frame, new_metadata = cls.__load_biom(filepath, **kwargs)
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"biom": new_metadata})
return cls(taxonomy=taxonomy_frame, metadata=tmp_metadata, **kwargs)
@classmethod
def __load_biom(cls, filepath: str, **kwargs: Any) -> Tuple[pd.DataFrame, dict]:
"""Actual private method to process :mod:`biom` file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Compatibility
"""
biom_file = biom.load_table(filepath)
if biom_file.metadata(axis="observation") is not None:
obs_data = biom_file.metadata_to_dataframe("observation")
col_names = list(obs_data.columns.values)
col_names_low = [col.lower() for col in col_names]
avail_col_names = [
colname
for tax_name in BIOM_TAXONOMY_NAMES
for colname in col_names_low
if colname[::-1].find(tax_name[::-1]) < 3
and colname[::-1].find(tax_name[::-1]) > -1
]
metadata_cols = [
col for col in col_names if col.lower() not in avail_col_names
]
if len(avail_col_names) == 1:
tmp_col_index = col_names_low.index(avail_col_names[0])
taxonomy_frame = obs_data[col_names[tmp_col_index]]
else:
taxonomy_frame = obs_data
tmp_metadata = obs_data.loc[:, metadata_cols].to_dict()
return taxonomy_frame, tmp_metadata
else:
raise ValueError("Biom file does not contain observation metadata.")
def _remove_features_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by features ids and ratify action.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
tmp_ids = np.asarray(ids, dtype=self.__internal_taxonomy.index.dtype)
if len(tmp_ids) > 0:
self.__internal_taxonomy.drop(tmp_ids, inplace=True)
return self._ratify_action("_remove_features_by_id", ids, **kwargs)
# MASKED: _merge_features_by_map function (lines 219-241)
def drop_feature_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by feature `ids`.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() == len(target_ids):
return self._remove_features_by_id(target_ids, **kwargs)
else:
raise ValueError("Invalid feature ids are provided.")
def get_taxonomy_by_id(
self, ids: Optional[AnyGenericIdentifier] = None
) -> pd.DataFrame:
"""Get taxonomy :class:`~pandas.DataFrame` by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
Returns
-------
class:`pandas.DataFrame` with taxonomy data
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() <= len(target_ids):
return self.__internal_taxonomy.loc[target_ids, self.__avail_ranks]
else:
raise ValueError("Invalid feature ids are provided.")
def get_lineage_by_id(
self,
ids: Optional[AnyGenericIdentifier] = None,
missing_rank: bool = False,
desired_ranks: Union[bool, Sequence[str]] = False,
drop_ranks: Union[bool, Sequence[str]] = False,
**kwargs: Any
) -> pd.Series:
"""Get taxonomy lineages by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
missing_rank
If True will generate prefix like `s__` or `d__`
desired_ranks
List of desired ranks to generate.
If False then will generate all main ranks
drop_ranks
List of ranks to drop from desired ranks.
This parameter only useful if `missing_rank` is True
kwargs
Compatibility.
Returns
-------
class:`pandas.Series` with consensus lineages and corresponding IDs
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
tmp_desired_ranks = VALID_RANKS if desired_ranks is False else desired_ranks
total_valid_rids = self.xrid.isin(target_ids).sum()
if total_valid_rids == len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[target_ids],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
elif total_valid_rids < len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[np.unique(target_ids)],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
else:
raise ValueError("Invalid feature ids are provided.")
def find_features_by_pattern(
self, pattern_str: str, case_sensitive: bool = False, regex: bool = False
) -> np.ndarray:
"""Searches for features with taxa that matches `pattern_str`
Parameters
----------
pattern_str
Pattern to search for
case_sensitive
Case sensitive mode
regex
Use regular expressions
Returns
-------
class:`~numpy.ndarray` with indices
"""
return self.__internal_taxonomy[
self.__internal_taxonomy.loc[:, "lineage"].str.contains(
pattern_str, case=case_sensitive, regex=regex
)
].index.values
def drop_features_without_taxa(
self, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features that do not contain taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ids_to_drop = self.find_features_without_taxa()
return self._remove_features_by_id(ids_to_drop, **kwargs)
def drop_features_without_ranks(
self, ranks: Sequence[str], any: bool = False, **kwargs: Any
) -> Optional[AnyGenericIdentifier]: # Done
"""Remove features that do not contain `ranks`
Parameters
----------
ranks
Ranks to look for
any
If True removes feature with single occurrence of missing rank.
If False all `ranks` must be missing.
kwargs
Compatibility
"""
target_ranks = np.asarray(ranks)
if self.__internal_taxonomy.columns.isin(target_ranks).sum() == len(
target_ranks
):
no_rank_mask = self.__internal_taxonomy.loc[:, ranks].isna()
no_rank_mask_adjusted = (
no_rank_mask.any(axis=1) if any else no_rank_mask.all(axis=1)
)
ids_to_drop = self.__internal_taxonomy.loc[no_rank_mask_adjusted].index
return self._remove_features_by_id(ids_to_drop, **kwargs)
else:
raise ValueError("Invalid ranks are provided.")
def merge_duplicated_features(self, **kwargs: Any) -> Optional[Mapper]:
"""Merge features with duplicated taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ret = {}
groupby = self.__internal_taxonomy.groupby("lineage")
if any([len(group) > 1 for group in groupby.groups.values()]):
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groupby.groups)))
for lineage, feature_ids in groupby.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
return self._merge_features_by_map(ret, True, **kwargs)
def merge_features_by_rank(self, level: str, **kwargs: Any) -> Optional[Mapper]:
"""Merge features by taxonomic rank/level.
Parameters
----------
level
Taxonomic rank/level to use for merging.
kwargs
Compatibility
"""
ret = {}
if not isinstance(level, str):
raise TypeError("`rank` must have str type.")
if level in self.__avail_ranks:
target_ranks = get_rank_upto(self.avail_ranks, level, True)
if target_ranks:
tmp_lineages = generate_lineages_from_taxa(
self.__internal_taxonomy, False, target_ranks, False
)
groups = tmp_lineages.groupby(tmp_lineages)
if len(groups.groups) > 1:
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groups.groups)))
for lineage, feature_ids in groups.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
else:
raise ValueError("Invalid rank are provided.")
return self._merge_features_by_map(ret, True, **kwargs)
def find_features_without_taxa(self) -> np.ndarray:
"""Find features without taxa.
Returns
-------
class:`~numpy.ndarray` with feature indices.
"""
return self.__internal_taxonomy.loc[
self.__internal_taxonomy.loc[:, VALID_RANKS].agg(
lambda rank: len("".join(map(lambda x: (str(x or "")), rank))), axis=1
)
< 1
].index.values
def get_subset(
self, rids: Optional[AnyGenericIdentifier] = None, *args, **kwargs: Any
) -> "RepTaxonomy":
"""Get subset of the :class:`.RepTaxonomy`.
Parameters
----------
rids
Feature identifiers.
args
Compatibility
kwargs
Compatibility
Returns
-------
class:`.RepTaxonomy`
"""
if rids is None:
target_rids = self.xrid
else:
target_rids = np.asarray(rids).astype(self.__internal_taxonomy.index.dtype)
if not self.xrid.isin(target_rids).sum() == len(target_rids):
raise ValueError("Invalid feature ids are provided.")
return type(self)(
taxonomy=self.__internal_taxonomy.loc[target_rids, "lineage"],
metadata=self.metadata,
name=self.name,
)
def _export(
self, taxlike: str = "lineage", ascending: bool = True, **kwargs: Any
) -> Tuple[pd.Series, dict]:
"""Creates taxonomy for export.
Parameters
----------
taxlike
Generate taxonomy in format(currently only `lineage` is supported.)
ascending
Sorting
kwargs
Compatibility
"""
if taxlike == "lineage":
return (
self.get_lineage_by_id(**kwargs).sort_values(ascending=ascending),
kwargs,
)
else:
raise NotImplemented
def export(
self,
output_fp: str,
*args,
_add_ext: bool = False,
sep: str = ",",
**kwargs: Any
) -> None:
"""Exports the taxonomy into the specified file.
Parameters
----------
output_fp
Export filepath
args
Compatibility
_add_ext
Add file extension or not.
sep
Delimiter
kwargs
Compatibility
"""
tmp_export, rkwarg = self._export(*args, **kwargs)
if _add_ext:
tmp_export.to_csv("{}.csv".format(output_fp), sep=sep)
else:
tmp_export.to_csv(output_fp, sep=sep)
def copy(self) -> "RepTaxonomy":
"""Copy of the instance."""
return type(self)(
taxonomy=self.__internal_taxonomy.loc[:, "lineage"],
metadata=self.metadata,
name=self.name,
)
def __fix_taxon_names(self) -> None:
"""Fix invalid taxon names."""
def taxon_fixer(taxon):
if taxon is not None and pd.notna(taxon):
tmp_taxon_trimmed = taxon.lower().strip()
if len(tmp_taxon_trimmed) > 0:
if tmp_taxon_trimmed[0] == "[":
tmp_taxon_trimmed = tmp_taxon_trimmed[1:]
if tmp_taxon_trimmed[-1] == "]":
tmp_taxon_trimmed = tmp_taxon_trimmed[:-1]
return tmp_taxon_trimmed.capitalize()
else:
return None
else:
return None
self.__internal_taxonomy.loc[:, VALID_RANKS] = self.__internal_taxonomy.loc[
:, VALID_RANKS
].applymap(taxon_fixer)
def __reconstruct_internal_lineages(self) -> None:
"""Reconstruct the internal lineages."""
self.__internal_taxonomy.loc[:, "lineage"] = generate_lineages_from_taxa(
self.__internal_taxonomy, True, self.__avail_ranks, False
)
def __init_internal_taxonomy(
self,
taxonomy_data: Union[pd.Series, pd.DataFrame],
taxonomy_notation: Optional[str] = "greengenes",
order_ranks: Optional[Sequence[str]] = None,
**kwargs: Any
) -> None:
"""Main method to initialize taxonomy.
Parameters
----------
taxonomy_data
Incoming parsed taxonomy data
taxonomy_notation
Taxonomy lineage notation style. Can be one of
:const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None.
The 'silva' notation require `order_ranks`.
kwargs
Compatibility
"""
if isinstance(taxonomy_data, pd.Series):
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data, taxonomy_notation, order_ranks
)
elif isinstance(taxonomy_data, pd.DataFrame):
if taxonomy_data.shape[1] == 1:
taxonomy_data_series = pd.Series(
data=taxonomy_data.iloc[:, 0], index=taxonomy_data.index
)
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data_series, taxonomy_notation, order_ranks
)
else:
new_taxonomy = self.__init_taxonomy_from_frame(
taxonomy_data, taxonomy_notation, order_ranks
)
else:
raise RuntimeError(
"`taxonomy_data` must be either pd.Series or pd.Dataframe"
)
if new_taxonomy is None:
raise ValueError("Provided taxonomy is invalid.")
# Assign newly constructed taxonomy to the self.__internal_taxonomy
self.__internal_taxonomy = new_taxonomy
self.__fix_taxon_names() # Fix incorrect taxa
tmp_avail_ranks = [rank for rank in VALID_RANKS if rank in new_taxonomy.columns]
self.__avail_ranks = [
rank for rank in tmp_avail_ranks if new_taxonomy.loc[:, rank].notna().any()
]
# Reconstruct internal lineages for default greengenes notation
self.__reconstruct_internal_lineages()
self._init_state = True
def __init_taxonomy_from_lineages(
self,
taxonomy_series: pd.Series,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done
"""Main method that produces taxonomy dataframe from lineages.
Parameters
----------
taxonomy_series
:class:`pandas.Series` with taxonomy lineages
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
"""
# Check if taxonomy is known and is available for parsing. Otherwise indentify_taxon_notation() will try to identify notation
if taxonomy_notation in AVAIL_TAXONOMY_NOTATIONS:
notation = taxonomy_notation
else:
# Get first lineage _sample for notation testing assuming the rest have the the same notations
sample_taxon = taxonomy_series.iloc[0]
# Identify notation of the lineage string
notation = indentify_taxon_notation(sample_taxon)
if order_ranks is not None:
if all([rank in VALID_RANKS for rank in order_ranks]):
target_order_ranks = order_ranks
else:
raise NotImplementedError
else:
target_order_ranks = VALID_RANKS
if notation == "greengenes":
lineages = taxonomy_series.reset_index().values.tolist()
ordered_taxa_list = []
ordered_indices_list = [elem[0] for elem in lineages]
for lineage in lineages:
tmp_lineage = jRegexGG.findall(lineage[1])
tmp_taxa_dict = {
elem[0]: elem[1] for elem in tmp_lineage if elem[0] in VALID_RANKS
}
for rank in VALID_RANKS:
if rank not in tmp_taxa_dict.keys():
tmp_taxa_dict.update({rank: None})
tmp_taxa_ordered = [tmp_taxa_dict[rank] for rank in VALID_RANKS]
ordered_taxa_list.append([None] + tmp_taxa_ordered)
taxonomy = pd.DataFrame(
index=ordered_indices_list,
data=ordered_taxa_list,
columns=["lineage"] + VALID_RANKS,
)
return taxonomy
elif notation == "qiime":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = jRegexQIIME.findall(lineage[1])
tmp_lineage.sort(key=lambda x: x[0])
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank, taxon in tmp_lineage:
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, sorted(list(tmp_ranks))]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
elif notation == "silva":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = lineage[1].split(";")
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank_i, taxon in enumerate(tmp_lineage):
rank = target_order_ranks[rank_i]
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_rank_ordered = [
rank for rank in target_order_ranks if rank in VALID_RANKS
]
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, tmp_rank_ordered]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
else:
raise NotImplementedError
def __init_taxonomy_from_frame(
self,
taxonomy_dataframe: pd.DataFrame,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done # For now only pass to _init_taxonomy_from_series
"""Main method that produces taxonomy sheet from dataframe.
Parameters
----------
taxonomy_dataframe
:class:`~pandas.DataFrame` with taxa split by ranks.
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
Returns
-------
:class:`~pandas.DataFrame`
"""
valid_ranks = extract_valid_ranks(taxonomy_dataframe.columns, VALID_RANKS)
if valid_ranks is not None:
if len(valid_ranks) > 0:
return pd.concat(
[
taxonomy_dataframe,
pd.DataFrame(
data="",
index=taxonomy_dataframe.index,
columns=[
rank for rank in VALID_RANKS if rank not in valid_ranks
],
),
],
axis=1,
)
else:
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join(taxa.values.tolist()), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
else:
valid_ranks = cols2ranks(taxonomy_dataframe.columns)
taxonomy_dataframe.columns = valid_ranks
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join([(t if isinstance(t,str) else '') for t in taxa.values]), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
@property
def avail_ranks(self) -> Sequence[str]:
"""List of available taxonomic ranks."""
return self.__avail_ranks
@property
def duplicated(self) -> pd.Index:
"""List of duplicated feature indices."""
return self.__internal_taxonomy.index[
self.__internal_taxonomy["lineage"].duplicated(keep=False)
]
@property
def data(self) -> pd.DataFrame:
"""Actual data representation as pd.DataFrame."""
return self.__internal_taxonomy
@property
def xrid(self) -> pd.Index:
"""Feature indices as pd.Index."""
return self.__internal_taxonomy.index
|
def _merge_features_by_map(
self, map_dict: Mapper, done: bool = False, **kwargs: Any
) -> Optional[Mapper]:
"""Merge features and ratify action.
Parameters
----------
map_dict
Map to use for merging
done
Whether merging was completed or not. Compatibility.
kwargs
Compatibility
"""
if not done:
raise NotImplementedError
if map_dict:
return self._ratify_action(
"_merge_features_by_map",
map_dict,
_annotations=self.__internal_taxonomy.loc[:, "lineage"].to_dict(),
**kwargs
)
| 219 | 241 |
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from pmaf.biome.essentials._metakit import EssentialFeatureMetabase
from pmaf.biome.essentials._base import EssentialBackboneBase
from pmaf.internal._constants import (
AVAIL_TAXONOMY_NOTATIONS,
jRegexGG,
jRegexQIIME,
BIOM_TAXONOMY_NAMES,
VALID_RANKS,
)
from pmaf.internal._shared import (
generate_lineages_from_taxa,
get_rank_upto,
indentify_taxon_notation,
validate_ranks,
extract_valid_ranks,
cols2ranks,
)
from collections import defaultdict
from os import path
import pandas as pd
import numpy as np
import biom
from typing import Union, Sequence, Tuple, Any, Optional
from pmaf.internal._typing import AnyGenericIdentifier, Mapper
class RepTaxonomy(EssentialBackboneBase, EssentialFeatureMetabase):
"""An `essential` class for handling taxonomy data."""
def __init__(
self,
taxonomy: Union[pd.DataFrame, pd.Series, str],
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> None:
"""Constructor for :class:`.RepTaxonomy`
Parameters
----------
taxonomy
Data containing feature taxonomy
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to :func:`~pandas.read_csv` or :mod:`biome` loader.
"""
tmp_metadata = kwargs.pop("metadata", {})
self.__avail_ranks = []
self.__internal_taxonomy = None
if isinstance(taxonomy, pd.DataFrame):
if taxonomy.shape[0] > 0:
if taxonomy.shape[1] > 1:
if validate_ranks(list(taxonomy.columns.values), VALID_RANKS):
tmp_taxonomy = taxonomy
else:
raise ValueError(
"Provided `taxonomy` Datafame has invalid ranks."
)
else:
tmp_taxonomy = taxonomy.iloc[:, 0]
else:
raise ValueError("Provided `taxonomy` Datafame is invalid.")
elif isinstance(taxonomy, pd.Series):
if taxonomy.shape[0] > 0:
tmp_taxonomy = taxonomy
else:
raise ValueError("Provided `taxonomy` Series is invalid.")
elif isinstance(taxonomy, str):
if path.isfile(taxonomy):
file_extension = path.splitext(taxonomy)[-1].lower()
if file_extension in [".csv", ".tsv"]:
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).loc[:, taxonomy_columns]
elif file_extension in [".biom", ".biome"]:
tmp_taxonomy, new_metadata = self.__load_biom(taxonomy, **kwargs)
tmp_metadata.update({"biom": new_metadata})
else:
raise NotImplementedError("File type is not supported.")
else:
raise FileNotFoundError("Provided `taxonomy` file path is invalid.")
else:
raise TypeError("Provided `taxonomy` has invalid type.")
self.__init_internal_taxonomy(tmp_taxonomy, **kwargs)
super().__init__(metadata=tmp_metadata, **kwargs)
@classmethod
def from_csv(
cls,
filepath: str,
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from CSV file.
Parameters
----------
filepath
Path to .csv File
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to the constructor.
filepath:
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(filepath, **kwargs)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(filepath, **kwargs).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(filepath, **kwargs).loc[:, taxonomy_columns]
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"filepath": path.abspath(filepath)})
return cls(taxonomy=tmp_taxonomy, metadata=tmp_metadata, **kwargs)
@classmethod
def from_biom(cls, filepath: str, **kwargs: Any) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from :mod:`biom`
file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Passed to the constructor.
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
taxonomy_frame, new_metadata = cls.__load_biom(filepath, **kwargs)
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"biom": new_metadata})
return cls(taxonomy=taxonomy_frame, metadata=tmp_metadata, **kwargs)
@classmethod
def __load_biom(cls, filepath: str, **kwargs: Any) -> Tuple[pd.DataFrame, dict]:
"""Actual private method to process :mod:`biom` file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Compatibility
"""
biom_file = biom.load_table(filepath)
if biom_file.metadata(axis="observation") is not None:
obs_data = biom_file.metadata_to_dataframe("observation")
col_names = list(obs_data.columns.values)
col_names_low = [col.lower() for col in col_names]
avail_col_names = [
colname
for tax_name in BIOM_TAXONOMY_NAMES
for colname in col_names_low
if colname[::-1].find(tax_name[::-1]) < 3
and colname[::-1].find(tax_name[::-1]) > -1
]
metadata_cols = [
col for col in col_names if col.lower() not in avail_col_names
]
if len(avail_col_names) == 1:
tmp_col_index = col_names_low.index(avail_col_names[0])
taxonomy_frame = obs_data[col_names[tmp_col_index]]
else:
taxonomy_frame = obs_data
tmp_metadata = obs_data.loc[:, metadata_cols].to_dict()
return taxonomy_frame, tmp_metadata
else:
raise ValueError("Biom file does not contain observation metadata.")
def _remove_features_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by features ids and ratify action.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
tmp_ids = np.asarray(ids, dtype=self.__internal_taxonomy.index.dtype)
if len(tmp_ids) > 0:
self.__internal_taxonomy.drop(tmp_ids, inplace=True)
return self._ratify_action("_remove_features_by_id", ids, **kwargs)
def _merge_features_by_map(
self, map_dict: Mapper, done: bool = False, **kwargs: Any
) -> Optional[Mapper]:
"""Merge features and ratify action.
Parameters
----------
map_dict
Map to use for merging
done
Whether merging was completed or not. Compatibility.
kwargs
Compatibility
"""
if not done:
raise NotImplementedError
if map_dict:
return self._ratify_action(
"_merge_features_by_map",
map_dict,
_annotations=self.__internal_taxonomy.loc[:, "lineage"].to_dict(),
**kwargs
)
def drop_feature_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by feature `ids`.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() == len(target_ids):
return self._remove_features_by_id(target_ids, **kwargs)
else:
raise ValueError("Invalid feature ids are provided.")
def get_taxonomy_by_id(
self, ids: Optional[AnyGenericIdentifier] = None
) -> pd.DataFrame:
"""Get taxonomy :class:`~pandas.DataFrame` by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
Returns
-------
class:`pandas.DataFrame` with taxonomy data
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() <= len(target_ids):
return self.__internal_taxonomy.loc[target_ids, self.__avail_ranks]
else:
raise ValueError("Invalid feature ids are provided.")
def get_lineage_by_id(
self,
ids: Optional[AnyGenericIdentifier] = None,
missing_rank: bool = False,
desired_ranks: Union[bool, Sequence[str]] = False,
drop_ranks: Union[bool, Sequence[str]] = False,
**kwargs: Any
) -> pd.Series:
"""Get taxonomy lineages by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
missing_rank
If True will generate prefix like `s__` or `d__`
desired_ranks
List of desired ranks to generate.
If False then will generate all main ranks
drop_ranks
List of ranks to drop from desired ranks.
This parameter only useful if `missing_rank` is True
kwargs
Compatibility.
Returns
-------
class:`pandas.Series` with consensus lineages and corresponding IDs
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
tmp_desired_ranks = VALID_RANKS if desired_ranks is False else desired_ranks
total_valid_rids = self.xrid.isin(target_ids).sum()
if total_valid_rids == len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[target_ids],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
elif total_valid_rids < len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[np.unique(target_ids)],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
else:
raise ValueError("Invalid feature ids are provided.")
def find_features_by_pattern(
self, pattern_str: str, case_sensitive: bool = False, regex: bool = False
) -> np.ndarray:
"""Searches for features with taxa that matches `pattern_str`
Parameters
----------
pattern_str
Pattern to search for
case_sensitive
Case sensitive mode
regex
Use regular expressions
Returns
-------
class:`~numpy.ndarray` with indices
"""
return self.__internal_taxonomy[
self.__internal_taxonomy.loc[:, "lineage"].str.contains(
pattern_str, case=case_sensitive, regex=regex
)
].index.values
def drop_features_without_taxa(
self, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features that do not contain taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ids_to_drop = self.find_features_without_taxa()
return self._remove_features_by_id(ids_to_drop, **kwargs)
def drop_features_without_ranks(
self, ranks: Sequence[str], any: bool = False, **kwargs: Any
) -> Optional[AnyGenericIdentifier]: # Done
"""Remove features that do not contain `ranks`
Parameters
----------
ranks
Ranks to look for
any
If True removes feature with single occurrence of missing rank.
If False all `ranks` must be missing.
kwargs
Compatibility
"""
target_ranks = np.asarray(ranks)
if self.__internal_taxonomy.columns.isin(target_ranks).sum() == len(
target_ranks
):
no_rank_mask = self.__internal_taxonomy.loc[:, ranks].isna()
no_rank_mask_adjusted = (
no_rank_mask.any(axis=1) if any else no_rank_mask.all(axis=1)
)
ids_to_drop = self.__internal_taxonomy.loc[no_rank_mask_adjusted].index
return self._remove_features_by_id(ids_to_drop, **kwargs)
else:
raise ValueError("Invalid ranks are provided.")
def merge_duplicated_features(self, **kwargs: Any) -> Optional[Mapper]:
"""Merge features with duplicated taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ret = {}
groupby = self.__internal_taxonomy.groupby("lineage")
if any([len(group) > 1 for group in groupby.groups.values()]):
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groupby.groups)))
for lineage, feature_ids in groupby.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
return self._merge_features_by_map(ret, True, **kwargs)
def merge_features_by_rank(self, level: str, **kwargs: Any) -> Optional[Mapper]:
"""Merge features by taxonomic rank/level.
Parameters
----------
level
Taxonomic rank/level to use for merging.
kwargs
Compatibility
"""
ret = {}
if not isinstance(level, str):
raise TypeError("`rank` must have str type.")
if level in self.__avail_ranks:
target_ranks = get_rank_upto(self.avail_ranks, level, True)
if target_ranks:
tmp_lineages = generate_lineages_from_taxa(
self.__internal_taxonomy, False, target_ranks, False
)
groups = tmp_lineages.groupby(tmp_lineages)
if len(groups.groups) > 1:
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groups.groups)))
for lineage, feature_ids in groups.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
else:
raise ValueError("Invalid rank are provided.")
return self._merge_features_by_map(ret, True, **kwargs)
def find_features_without_taxa(self) -> np.ndarray:
"""Find features without taxa.
Returns
-------
class:`~numpy.ndarray` with feature indices.
"""
return self.__internal_taxonomy.loc[
self.__internal_taxonomy.loc[:, VALID_RANKS].agg(
lambda rank: len("".join(map(lambda x: (str(x or "")), rank))), axis=1
)
< 1
].index.values
def get_subset(
self, rids: Optional[AnyGenericIdentifier] = None, *args, **kwargs: Any
) -> "RepTaxonomy":
"""Get subset of the :class:`.RepTaxonomy`.
Parameters
----------
rids
Feature identifiers.
args
Compatibility
kwargs
Compatibility
Returns
-------
class:`.RepTaxonomy`
"""
if rids is None:
target_rids = self.xrid
else:
target_rids = np.asarray(rids).astype(self.__internal_taxonomy.index.dtype)
if not self.xrid.isin(target_rids).sum() == len(target_rids):
raise ValueError("Invalid feature ids are provided.")
return type(self)(
taxonomy=self.__internal_taxonomy.loc[target_rids, "lineage"],
metadata=self.metadata,
name=self.name,
)
def _export(
self, taxlike: str = "lineage", ascending: bool = True, **kwargs: Any
) -> Tuple[pd.Series, dict]:
"""Creates taxonomy for export.
Parameters
----------
taxlike
Generate taxonomy in format(currently only `lineage` is supported.)
ascending
Sorting
kwargs
Compatibility
"""
if taxlike == "lineage":
return (
self.get_lineage_by_id(**kwargs).sort_values(ascending=ascending),
kwargs,
)
else:
raise NotImplemented
def export(
self,
output_fp: str,
*args,
_add_ext: bool = False,
sep: str = ",",
**kwargs: Any
) -> None:
"""Exports the taxonomy into the specified file.
Parameters
----------
output_fp
Export filepath
args
Compatibility
_add_ext
Add file extension or not.
sep
Delimiter
kwargs
Compatibility
"""
tmp_export, rkwarg = self._export(*args, **kwargs)
if _add_ext:
tmp_export.to_csv("{}.csv".format(output_fp), sep=sep)
else:
tmp_export.to_csv(output_fp, sep=sep)
def copy(self) -> "RepTaxonomy":
"""Copy of the instance."""
return type(self)(
taxonomy=self.__internal_taxonomy.loc[:, "lineage"],
metadata=self.metadata,
name=self.name,
)
def __fix_taxon_names(self) -> None:
"""Fix invalid taxon names."""
def taxon_fixer(taxon):
if taxon is not None and pd.notna(taxon):
tmp_taxon_trimmed = taxon.lower().strip()
if len(tmp_taxon_trimmed) > 0:
if tmp_taxon_trimmed[0] == "[":
tmp_taxon_trimmed = tmp_taxon_trimmed[1:]
if tmp_taxon_trimmed[-1] == "]":
tmp_taxon_trimmed = tmp_taxon_trimmed[:-1]
return tmp_taxon_trimmed.capitalize()
else:
return None
else:
return None
self.__internal_taxonomy.loc[:, VALID_RANKS] = self.__internal_taxonomy.loc[
:, VALID_RANKS
].applymap(taxon_fixer)
def __reconstruct_internal_lineages(self) -> None:
"""Reconstruct the internal lineages."""
self.__internal_taxonomy.loc[:, "lineage"] = generate_lineages_from_taxa(
self.__internal_taxonomy, True, self.__avail_ranks, False
)
def __init_internal_taxonomy(
self,
taxonomy_data: Union[pd.Series, pd.DataFrame],
taxonomy_notation: Optional[str] = "greengenes",
order_ranks: Optional[Sequence[str]] = None,
**kwargs: Any
) -> None:
"""Main method to initialize taxonomy.
Parameters
----------
taxonomy_data
Incoming parsed taxonomy data
taxonomy_notation
Taxonomy lineage notation style. Can be one of
:const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None.
The 'silva' notation require `order_ranks`.
kwargs
Compatibility
"""
if isinstance(taxonomy_data, pd.Series):
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data, taxonomy_notation, order_ranks
)
elif isinstance(taxonomy_data, pd.DataFrame):
if taxonomy_data.shape[1] == 1:
taxonomy_data_series = pd.Series(
data=taxonomy_data.iloc[:, 0], index=taxonomy_data.index
)
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data_series, taxonomy_notation, order_ranks
)
else:
new_taxonomy = self.__init_taxonomy_from_frame(
taxonomy_data, taxonomy_notation, order_ranks
)
else:
raise RuntimeError(
"`taxonomy_data` must be either pd.Series or pd.Dataframe"
)
if new_taxonomy is None:
raise ValueError("Provided taxonomy is invalid.")
# Assign newly constructed taxonomy to the self.__internal_taxonomy
self.__internal_taxonomy = new_taxonomy
self.__fix_taxon_names() # Fix incorrect taxa
tmp_avail_ranks = [rank for rank in VALID_RANKS if rank in new_taxonomy.columns]
self.__avail_ranks = [
rank for rank in tmp_avail_ranks if new_taxonomy.loc[:, rank].notna().any()
]
# Reconstruct internal lineages for default greengenes notation
self.__reconstruct_internal_lineages()
self._init_state = True
def __init_taxonomy_from_lineages(
self,
taxonomy_series: pd.Series,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done
"""Main method that produces taxonomy dataframe from lineages.
Parameters
----------
taxonomy_series
:class:`pandas.Series` with taxonomy lineages
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
"""
# Check if taxonomy is known and is available for parsing. Otherwise indentify_taxon_notation() will try to identify notation
if taxonomy_notation in AVAIL_TAXONOMY_NOTATIONS:
notation = taxonomy_notation
else:
# Get first lineage _sample for notation testing assuming the rest have the the same notations
sample_taxon = taxonomy_series.iloc[0]
# Identify notation of the lineage string
notation = indentify_taxon_notation(sample_taxon)
if order_ranks is not None:
if all([rank in VALID_RANKS for rank in order_ranks]):
target_order_ranks = order_ranks
else:
raise NotImplementedError
else:
target_order_ranks = VALID_RANKS
if notation == "greengenes":
lineages = taxonomy_series.reset_index().values.tolist()
ordered_taxa_list = []
ordered_indices_list = [elem[0] for elem in lineages]
for lineage in lineages:
tmp_lineage = jRegexGG.findall(lineage[1])
tmp_taxa_dict = {
elem[0]: elem[1] for elem in tmp_lineage if elem[0] in VALID_RANKS
}
for rank in VALID_RANKS:
if rank not in tmp_taxa_dict.keys():
tmp_taxa_dict.update({rank: None})
tmp_taxa_ordered = [tmp_taxa_dict[rank] for rank in VALID_RANKS]
ordered_taxa_list.append([None] + tmp_taxa_ordered)
taxonomy = pd.DataFrame(
index=ordered_indices_list,
data=ordered_taxa_list,
columns=["lineage"] + VALID_RANKS,
)
return taxonomy
elif notation == "qiime":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = jRegexQIIME.findall(lineage[1])
tmp_lineage.sort(key=lambda x: x[0])
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank, taxon in tmp_lineage:
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, sorted(list(tmp_ranks))]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
elif notation == "silva":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = lineage[1].split(";")
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank_i, taxon in enumerate(tmp_lineage):
rank = target_order_ranks[rank_i]
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_rank_ordered = [
rank for rank in target_order_ranks if rank in VALID_RANKS
]
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, tmp_rank_ordered]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
else:
raise NotImplementedError
def __init_taxonomy_from_frame(
self,
taxonomy_dataframe: pd.DataFrame,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done # For now only pass to _init_taxonomy_from_series
"""Main method that produces taxonomy sheet from dataframe.
Parameters
----------
taxonomy_dataframe
:class:`~pandas.DataFrame` with taxa split by ranks.
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
Returns
-------
:class:`~pandas.DataFrame`
"""
valid_ranks = extract_valid_ranks(taxonomy_dataframe.columns, VALID_RANKS)
if valid_ranks is not None:
if len(valid_ranks) > 0:
return pd.concat(
[
taxonomy_dataframe,
pd.DataFrame(
data="",
index=taxonomy_dataframe.index,
columns=[
rank for rank in VALID_RANKS if rank not in valid_ranks
],
),
],
axis=1,
)
else:
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join(taxa.values.tolist()), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
else:
valid_ranks = cols2ranks(taxonomy_dataframe.columns)
taxonomy_dataframe.columns = valid_ranks
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join([(t if isinstance(t,str) else '') for t in taxa.values]), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
@property
def avail_ranks(self) -> Sequence[str]:
"""List of available taxonomic ranks."""
return self.__avail_ranks
@property
def duplicated(self) -> pd.Index:
"""List of duplicated feature indices."""
return self.__internal_taxonomy.index[
self.__internal_taxonomy["lineage"].duplicated(keep=False)
]
@property
def data(self) -> pd.DataFrame:
"""Actual data representation as pd.DataFrame."""
return self.__internal_taxonomy
@property
def xrid(self) -> pd.Index:
"""Feature indices as pd.Index."""
return self.__internal_taxonomy.index
|
get_taxonomy_by_id
|
Get taxonomy :class:`~pandas.DataFrame` by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
Returns
-------
class:`pandas.DataFrame` with taxonomy data
|
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from pmaf.biome.essentials._metakit import EssentialFeatureMetabase
from pmaf.biome.essentials._base import EssentialBackboneBase
from pmaf.internal._constants import (
AVAIL_TAXONOMY_NOTATIONS,
jRegexGG,
jRegexQIIME,
BIOM_TAXONOMY_NAMES,
VALID_RANKS,
)
from pmaf.internal._shared import (
generate_lineages_from_taxa,
get_rank_upto,
indentify_taxon_notation,
validate_ranks,
extract_valid_ranks,
cols2ranks,
)
from collections import defaultdict
from os import path
import pandas as pd
import numpy as np
import biom
from typing import Union, Sequence, Tuple, Any, Optional
from pmaf.internal._typing import AnyGenericIdentifier, Mapper
class RepTaxonomy(EssentialBackboneBase, EssentialFeatureMetabase):
"""An `essential` class for handling taxonomy data."""
def __init__(
self,
taxonomy: Union[pd.DataFrame, pd.Series, str],
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> None:
"""Constructor for :class:`.RepTaxonomy`
Parameters
----------
taxonomy
Data containing feature taxonomy
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to :func:`~pandas.read_csv` or :mod:`biome` loader.
"""
tmp_metadata = kwargs.pop("metadata", {})
self.__avail_ranks = []
self.__internal_taxonomy = None
if isinstance(taxonomy, pd.DataFrame):
if taxonomy.shape[0] > 0:
if taxonomy.shape[1] > 1:
if validate_ranks(list(taxonomy.columns.values), VALID_RANKS):
tmp_taxonomy = taxonomy
else:
raise ValueError(
"Provided `taxonomy` Datafame has invalid ranks."
)
else:
tmp_taxonomy = taxonomy.iloc[:, 0]
else:
raise ValueError("Provided `taxonomy` Datafame is invalid.")
elif isinstance(taxonomy, pd.Series):
if taxonomy.shape[0] > 0:
tmp_taxonomy = taxonomy
else:
raise ValueError("Provided `taxonomy` Series is invalid.")
elif isinstance(taxonomy, str):
if path.isfile(taxonomy):
file_extension = path.splitext(taxonomy)[-1].lower()
if file_extension in [".csv", ".tsv"]:
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).loc[:, taxonomy_columns]
elif file_extension in [".biom", ".biome"]:
tmp_taxonomy, new_metadata = self.__load_biom(taxonomy, **kwargs)
tmp_metadata.update({"biom": new_metadata})
else:
raise NotImplementedError("File type is not supported.")
else:
raise FileNotFoundError("Provided `taxonomy` file path is invalid.")
else:
raise TypeError("Provided `taxonomy` has invalid type.")
self.__init_internal_taxonomy(tmp_taxonomy, **kwargs)
super().__init__(metadata=tmp_metadata, **kwargs)
@classmethod
def from_csv(
cls,
filepath: str,
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from CSV file.
Parameters
----------
filepath
Path to .csv File
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to the constructor.
filepath:
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(filepath, **kwargs)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(filepath, **kwargs).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(filepath, **kwargs).loc[:, taxonomy_columns]
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"filepath": path.abspath(filepath)})
return cls(taxonomy=tmp_taxonomy, metadata=tmp_metadata, **kwargs)
@classmethod
def from_biom(cls, filepath: str, **kwargs: Any) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from :mod:`biom`
file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Passed to the constructor.
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
taxonomy_frame, new_metadata = cls.__load_biom(filepath, **kwargs)
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"biom": new_metadata})
return cls(taxonomy=taxonomy_frame, metadata=tmp_metadata, **kwargs)
@classmethod
def __load_biom(cls, filepath: str, **kwargs: Any) -> Tuple[pd.DataFrame, dict]:
"""Actual private method to process :mod:`biom` file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Compatibility
"""
biom_file = biom.load_table(filepath)
if biom_file.metadata(axis="observation") is not None:
obs_data = biom_file.metadata_to_dataframe("observation")
col_names = list(obs_data.columns.values)
col_names_low = [col.lower() for col in col_names]
avail_col_names = [
colname
for tax_name in BIOM_TAXONOMY_NAMES
for colname in col_names_low
if colname[::-1].find(tax_name[::-1]) < 3
and colname[::-1].find(tax_name[::-1]) > -1
]
metadata_cols = [
col for col in col_names if col.lower() not in avail_col_names
]
if len(avail_col_names) == 1:
tmp_col_index = col_names_low.index(avail_col_names[0])
taxonomy_frame = obs_data[col_names[tmp_col_index]]
else:
taxonomy_frame = obs_data
tmp_metadata = obs_data.loc[:, metadata_cols].to_dict()
return taxonomy_frame, tmp_metadata
else:
raise ValueError("Biom file does not contain observation metadata.")
def _remove_features_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by features ids and ratify action.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
tmp_ids = np.asarray(ids, dtype=self.__internal_taxonomy.index.dtype)
if len(tmp_ids) > 0:
self.__internal_taxonomy.drop(tmp_ids, inplace=True)
return self._ratify_action("_remove_features_by_id", ids, **kwargs)
def _merge_features_by_map(
self, map_dict: Mapper, done: bool = False, **kwargs: Any
) -> Optional[Mapper]:
"""Merge features and ratify action.
Parameters
----------
map_dict
Map to use for merging
done
Whether merging was completed or not. Compatibility.
kwargs
Compatibility
"""
if not done:
raise NotImplementedError
if map_dict:
return self._ratify_action(
"_merge_features_by_map",
map_dict,
_annotations=self.__internal_taxonomy.loc[:, "lineage"].to_dict(),
**kwargs
)
def drop_feature_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by feature `ids`.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() == len(target_ids):
return self._remove_features_by_id(target_ids, **kwargs)
else:
raise ValueError("Invalid feature ids are provided.")
# MASKED: get_taxonomy_by_id function (lines 261-282)
def get_lineage_by_id(
self,
ids: Optional[AnyGenericIdentifier] = None,
missing_rank: bool = False,
desired_ranks: Union[bool, Sequence[str]] = False,
drop_ranks: Union[bool, Sequence[str]] = False,
**kwargs: Any
) -> pd.Series:
"""Get taxonomy lineages by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
missing_rank
If True will generate prefix like `s__` or `d__`
desired_ranks
List of desired ranks to generate.
If False then will generate all main ranks
drop_ranks
List of ranks to drop from desired ranks.
This parameter only useful if `missing_rank` is True
kwargs
Compatibility.
Returns
-------
class:`pandas.Series` with consensus lineages and corresponding IDs
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
tmp_desired_ranks = VALID_RANKS if desired_ranks is False else desired_ranks
total_valid_rids = self.xrid.isin(target_ids).sum()
if total_valid_rids == len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[target_ids],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
elif total_valid_rids < len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[np.unique(target_ids)],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
else:
raise ValueError("Invalid feature ids are provided.")
def find_features_by_pattern(
self, pattern_str: str, case_sensitive: bool = False, regex: bool = False
) -> np.ndarray:
"""Searches for features with taxa that matches `pattern_str`
Parameters
----------
pattern_str
Pattern to search for
case_sensitive
Case sensitive mode
regex
Use regular expressions
Returns
-------
class:`~numpy.ndarray` with indices
"""
return self.__internal_taxonomy[
self.__internal_taxonomy.loc[:, "lineage"].str.contains(
pattern_str, case=case_sensitive, regex=regex
)
].index.values
def drop_features_without_taxa(
self, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features that do not contain taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ids_to_drop = self.find_features_without_taxa()
return self._remove_features_by_id(ids_to_drop, **kwargs)
def drop_features_without_ranks(
self, ranks: Sequence[str], any: bool = False, **kwargs: Any
) -> Optional[AnyGenericIdentifier]: # Done
"""Remove features that do not contain `ranks`
Parameters
----------
ranks
Ranks to look for
any
If True removes feature with single occurrence of missing rank.
If False all `ranks` must be missing.
kwargs
Compatibility
"""
target_ranks = np.asarray(ranks)
if self.__internal_taxonomy.columns.isin(target_ranks).sum() == len(
target_ranks
):
no_rank_mask = self.__internal_taxonomy.loc[:, ranks].isna()
no_rank_mask_adjusted = (
no_rank_mask.any(axis=1) if any else no_rank_mask.all(axis=1)
)
ids_to_drop = self.__internal_taxonomy.loc[no_rank_mask_adjusted].index
return self._remove_features_by_id(ids_to_drop, **kwargs)
else:
raise ValueError("Invalid ranks are provided.")
def merge_duplicated_features(self, **kwargs: Any) -> Optional[Mapper]:
"""Merge features with duplicated taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ret = {}
groupby = self.__internal_taxonomy.groupby("lineage")
if any([len(group) > 1 for group in groupby.groups.values()]):
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groupby.groups)))
for lineage, feature_ids in groupby.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
return self._merge_features_by_map(ret, True, **kwargs)
def merge_features_by_rank(self, level: str, **kwargs: Any) -> Optional[Mapper]:
"""Merge features by taxonomic rank/level.
Parameters
----------
level
Taxonomic rank/level to use for merging.
kwargs
Compatibility
"""
ret = {}
if not isinstance(level, str):
raise TypeError("`rank` must have str type.")
if level in self.__avail_ranks:
target_ranks = get_rank_upto(self.avail_ranks, level, True)
if target_ranks:
tmp_lineages = generate_lineages_from_taxa(
self.__internal_taxonomy, False, target_ranks, False
)
groups = tmp_lineages.groupby(tmp_lineages)
if len(groups.groups) > 1:
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groups.groups)))
for lineage, feature_ids in groups.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
else:
raise ValueError("Invalid rank are provided.")
return self._merge_features_by_map(ret, True, **kwargs)
def find_features_without_taxa(self) -> np.ndarray:
"""Find features without taxa.
Returns
-------
class:`~numpy.ndarray` with feature indices.
"""
return self.__internal_taxonomy.loc[
self.__internal_taxonomy.loc[:, VALID_RANKS].agg(
lambda rank: len("".join(map(lambda x: (str(x or "")), rank))), axis=1
)
< 1
].index.values
def get_subset(
self, rids: Optional[AnyGenericIdentifier] = None, *args, **kwargs: Any
) -> "RepTaxonomy":
"""Get subset of the :class:`.RepTaxonomy`.
Parameters
----------
rids
Feature identifiers.
args
Compatibility
kwargs
Compatibility
Returns
-------
class:`.RepTaxonomy`
"""
if rids is None:
target_rids = self.xrid
else:
target_rids = np.asarray(rids).astype(self.__internal_taxonomy.index.dtype)
if not self.xrid.isin(target_rids).sum() == len(target_rids):
raise ValueError("Invalid feature ids are provided.")
return type(self)(
taxonomy=self.__internal_taxonomy.loc[target_rids, "lineage"],
metadata=self.metadata,
name=self.name,
)
def _export(
self, taxlike: str = "lineage", ascending: bool = True, **kwargs: Any
) -> Tuple[pd.Series, dict]:
"""Creates taxonomy for export.
Parameters
----------
taxlike
Generate taxonomy in format(currently only `lineage` is supported.)
ascending
Sorting
kwargs
Compatibility
"""
if taxlike == "lineage":
return (
self.get_lineage_by_id(**kwargs).sort_values(ascending=ascending),
kwargs,
)
else:
raise NotImplemented
def export(
self,
output_fp: str,
*args,
_add_ext: bool = False,
sep: str = ",",
**kwargs: Any
) -> None:
"""Exports the taxonomy into the specified file.
Parameters
----------
output_fp
Export filepath
args
Compatibility
_add_ext
Add file extension or not.
sep
Delimiter
kwargs
Compatibility
"""
tmp_export, rkwarg = self._export(*args, **kwargs)
if _add_ext:
tmp_export.to_csv("{}.csv".format(output_fp), sep=sep)
else:
tmp_export.to_csv(output_fp, sep=sep)
def copy(self) -> "RepTaxonomy":
"""Copy of the instance."""
return type(self)(
taxonomy=self.__internal_taxonomy.loc[:, "lineage"],
metadata=self.metadata,
name=self.name,
)
def __fix_taxon_names(self) -> None:
"""Fix invalid taxon names."""
def taxon_fixer(taxon):
if taxon is not None and pd.notna(taxon):
tmp_taxon_trimmed = taxon.lower().strip()
if len(tmp_taxon_trimmed) > 0:
if tmp_taxon_trimmed[0] == "[":
tmp_taxon_trimmed = tmp_taxon_trimmed[1:]
if tmp_taxon_trimmed[-1] == "]":
tmp_taxon_trimmed = tmp_taxon_trimmed[:-1]
return tmp_taxon_trimmed.capitalize()
else:
return None
else:
return None
self.__internal_taxonomy.loc[:, VALID_RANKS] = self.__internal_taxonomy.loc[
:, VALID_RANKS
].applymap(taxon_fixer)
def __reconstruct_internal_lineages(self) -> None:
"""Reconstruct the internal lineages."""
self.__internal_taxonomy.loc[:, "lineage"] = generate_lineages_from_taxa(
self.__internal_taxonomy, True, self.__avail_ranks, False
)
def __init_internal_taxonomy(
self,
taxonomy_data: Union[pd.Series, pd.DataFrame],
taxonomy_notation: Optional[str] = "greengenes",
order_ranks: Optional[Sequence[str]] = None,
**kwargs: Any
) -> None:
"""Main method to initialize taxonomy.
Parameters
----------
taxonomy_data
Incoming parsed taxonomy data
taxonomy_notation
Taxonomy lineage notation style. Can be one of
:const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None.
The 'silva' notation require `order_ranks`.
kwargs
Compatibility
"""
if isinstance(taxonomy_data, pd.Series):
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data, taxonomy_notation, order_ranks
)
elif isinstance(taxonomy_data, pd.DataFrame):
if taxonomy_data.shape[1] == 1:
taxonomy_data_series = pd.Series(
data=taxonomy_data.iloc[:, 0], index=taxonomy_data.index
)
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data_series, taxonomy_notation, order_ranks
)
else:
new_taxonomy = self.__init_taxonomy_from_frame(
taxonomy_data, taxonomy_notation, order_ranks
)
else:
raise RuntimeError(
"`taxonomy_data` must be either pd.Series or pd.Dataframe"
)
if new_taxonomy is None:
raise ValueError("Provided taxonomy is invalid.")
# Assign newly constructed taxonomy to the self.__internal_taxonomy
self.__internal_taxonomy = new_taxonomy
self.__fix_taxon_names() # Fix incorrect taxa
tmp_avail_ranks = [rank for rank in VALID_RANKS if rank in new_taxonomy.columns]
self.__avail_ranks = [
rank for rank in tmp_avail_ranks if new_taxonomy.loc[:, rank].notna().any()
]
# Reconstruct internal lineages for default greengenes notation
self.__reconstruct_internal_lineages()
self._init_state = True
def __init_taxonomy_from_lineages(
self,
taxonomy_series: pd.Series,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done
"""Main method that produces taxonomy dataframe from lineages.
Parameters
----------
taxonomy_series
:class:`pandas.Series` with taxonomy lineages
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
"""
# Check if taxonomy is known and is available for parsing. Otherwise indentify_taxon_notation() will try to identify notation
if taxonomy_notation in AVAIL_TAXONOMY_NOTATIONS:
notation = taxonomy_notation
else:
# Get first lineage _sample for notation testing assuming the rest have the the same notations
sample_taxon = taxonomy_series.iloc[0]
# Identify notation of the lineage string
notation = indentify_taxon_notation(sample_taxon)
if order_ranks is not None:
if all([rank in VALID_RANKS for rank in order_ranks]):
target_order_ranks = order_ranks
else:
raise NotImplementedError
else:
target_order_ranks = VALID_RANKS
if notation == "greengenes":
lineages = taxonomy_series.reset_index().values.tolist()
ordered_taxa_list = []
ordered_indices_list = [elem[0] for elem in lineages]
for lineage in lineages:
tmp_lineage = jRegexGG.findall(lineage[1])
tmp_taxa_dict = {
elem[0]: elem[1] for elem in tmp_lineage if elem[0] in VALID_RANKS
}
for rank in VALID_RANKS:
if rank not in tmp_taxa_dict.keys():
tmp_taxa_dict.update({rank: None})
tmp_taxa_ordered = [tmp_taxa_dict[rank] for rank in VALID_RANKS]
ordered_taxa_list.append([None] + tmp_taxa_ordered)
taxonomy = pd.DataFrame(
index=ordered_indices_list,
data=ordered_taxa_list,
columns=["lineage"] + VALID_RANKS,
)
return taxonomy
elif notation == "qiime":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = jRegexQIIME.findall(lineage[1])
tmp_lineage.sort(key=lambda x: x[0])
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank, taxon in tmp_lineage:
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, sorted(list(tmp_ranks))]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
elif notation == "silva":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = lineage[1].split(";")
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank_i, taxon in enumerate(tmp_lineage):
rank = target_order_ranks[rank_i]
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_rank_ordered = [
rank for rank in target_order_ranks if rank in VALID_RANKS
]
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, tmp_rank_ordered]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
else:
raise NotImplementedError
def __init_taxonomy_from_frame(
self,
taxonomy_dataframe: pd.DataFrame,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done # For now only pass to _init_taxonomy_from_series
"""Main method that produces taxonomy sheet from dataframe.
Parameters
----------
taxonomy_dataframe
:class:`~pandas.DataFrame` with taxa split by ranks.
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
Returns
-------
:class:`~pandas.DataFrame`
"""
valid_ranks = extract_valid_ranks(taxonomy_dataframe.columns, VALID_RANKS)
if valid_ranks is not None:
if len(valid_ranks) > 0:
return pd.concat(
[
taxonomy_dataframe,
pd.DataFrame(
data="",
index=taxonomy_dataframe.index,
columns=[
rank for rank in VALID_RANKS if rank not in valid_ranks
],
),
],
axis=1,
)
else:
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join(taxa.values.tolist()), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
else:
valid_ranks = cols2ranks(taxonomy_dataframe.columns)
taxonomy_dataframe.columns = valid_ranks
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join([(t if isinstance(t,str) else '') for t in taxa.values]), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
@property
def avail_ranks(self) -> Sequence[str]:
"""List of available taxonomic ranks."""
return self.__avail_ranks
@property
def duplicated(self) -> pd.Index:
"""List of duplicated feature indices."""
return self.__internal_taxonomy.index[
self.__internal_taxonomy["lineage"].duplicated(keep=False)
]
@property
def data(self) -> pd.DataFrame:
"""Actual data representation as pd.DataFrame."""
return self.__internal_taxonomy
@property
def xrid(self) -> pd.Index:
"""Feature indices as pd.Index."""
return self.__internal_taxonomy.index
|
def get_taxonomy_by_id(
self, ids: Optional[AnyGenericIdentifier] = None
) -> pd.DataFrame:
"""Get taxonomy :class:`~pandas.DataFrame` by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
Returns
-------
class:`pandas.DataFrame` with taxonomy data
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() <= len(target_ids):
return self.__internal_taxonomy.loc[target_ids, self.__avail_ranks]
else:
raise ValueError("Invalid feature ids are provided.")
| 261 | 282 |
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from pmaf.biome.essentials._metakit import EssentialFeatureMetabase
from pmaf.biome.essentials._base import EssentialBackboneBase
from pmaf.internal._constants import (
AVAIL_TAXONOMY_NOTATIONS,
jRegexGG,
jRegexQIIME,
BIOM_TAXONOMY_NAMES,
VALID_RANKS,
)
from pmaf.internal._shared import (
generate_lineages_from_taxa,
get_rank_upto,
indentify_taxon_notation,
validate_ranks,
extract_valid_ranks,
cols2ranks,
)
from collections import defaultdict
from os import path
import pandas as pd
import numpy as np
import biom
from typing import Union, Sequence, Tuple, Any, Optional
from pmaf.internal._typing import AnyGenericIdentifier, Mapper
class RepTaxonomy(EssentialBackboneBase, EssentialFeatureMetabase):
"""An `essential` class for handling taxonomy data."""
def __init__(
self,
taxonomy: Union[pd.DataFrame, pd.Series, str],
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> None:
"""Constructor for :class:`.RepTaxonomy`
Parameters
----------
taxonomy
Data containing feature taxonomy
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to :func:`~pandas.read_csv` or :mod:`biome` loader.
"""
tmp_metadata = kwargs.pop("metadata", {})
self.__avail_ranks = []
self.__internal_taxonomy = None
if isinstance(taxonomy, pd.DataFrame):
if taxonomy.shape[0] > 0:
if taxonomy.shape[1] > 1:
if validate_ranks(list(taxonomy.columns.values), VALID_RANKS):
tmp_taxonomy = taxonomy
else:
raise ValueError(
"Provided `taxonomy` Datafame has invalid ranks."
)
else:
tmp_taxonomy = taxonomy.iloc[:, 0]
else:
raise ValueError("Provided `taxonomy` Datafame is invalid.")
elif isinstance(taxonomy, pd.Series):
if taxonomy.shape[0] > 0:
tmp_taxonomy = taxonomy
else:
raise ValueError("Provided `taxonomy` Series is invalid.")
elif isinstance(taxonomy, str):
if path.isfile(taxonomy):
file_extension = path.splitext(taxonomy)[-1].lower()
if file_extension in [".csv", ".tsv"]:
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).loc[:, taxonomy_columns]
elif file_extension in [".biom", ".biome"]:
tmp_taxonomy, new_metadata = self.__load_biom(taxonomy, **kwargs)
tmp_metadata.update({"biom": new_metadata})
else:
raise NotImplementedError("File type is not supported.")
else:
raise FileNotFoundError("Provided `taxonomy` file path is invalid.")
else:
raise TypeError("Provided `taxonomy` has invalid type.")
self.__init_internal_taxonomy(tmp_taxonomy, **kwargs)
super().__init__(metadata=tmp_metadata, **kwargs)
@classmethod
def from_csv(
cls,
filepath: str,
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from CSV file.
Parameters
----------
filepath
Path to .csv File
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to the constructor.
filepath:
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(filepath, **kwargs)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(filepath, **kwargs).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(filepath, **kwargs).loc[:, taxonomy_columns]
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"filepath": path.abspath(filepath)})
return cls(taxonomy=tmp_taxonomy, metadata=tmp_metadata, **kwargs)
@classmethod
def from_biom(cls, filepath: str, **kwargs: Any) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from :mod:`biom`
file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Passed to the constructor.
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
taxonomy_frame, new_metadata = cls.__load_biom(filepath, **kwargs)
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"biom": new_metadata})
return cls(taxonomy=taxonomy_frame, metadata=tmp_metadata, **kwargs)
@classmethod
def __load_biom(cls, filepath: str, **kwargs: Any) -> Tuple[pd.DataFrame, dict]:
"""Actual private method to process :mod:`biom` file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Compatibility
"""
biom_file = biom.load_table(filepath)
if biom_file.metadata(axis="observation") is not None:
obs_data = biom_file.metadata_to_dataframe("observation")
col_names = list(obs_data.columns.values)
col_names_low = [col.lower() for col in col_names]
avail_col_names = [
colname
for tax_name in BIOM_TAXONOMY_NAMES
for colname in col_names_low
if colname[::-1].find(tax_name[::-1]) < 3
and colname[::-1].find(tax_name[::-1]) > -1
]
metadata_cols = [
col for col in col_names if col.lower() not in avail_col_names
]
if len(avail_col_names) == 1:
tmp_col_index = col_names_low.index(avail_col_names[0])
taxonomy_frame = obs_data[col_names[tmp_col_index]]
else:
taxonomy_frame = obs_data
tmp_metadata = obs_data.loc[:, metadata_cols].to_dict()
return taxonomy_frame, tmp_metadata
else:
raise ValueError("Biom file does not contain observation metadata.")
def _remove_features_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by features ids and ratify action.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
tmp_ids = np.asarray(ids, dtype=self.__internal_taxonomy.index.dtype)
if len(tmp_ids) > 0:
self.__internal_taxonomy.drop(tmp_ids, inplace=True)
return self._ratify_action("_remove_features_by_id", ids, **kwargs)
def _merge_features_by_map(
self, map_dict: Mapper, done: bool = False, **kwargs: Any
) -> Optional[Mapper]:
"""Merge features and ratify action.
Parameters
----------
map_dict
Map to use for merging
done
Whether merging was completed or not. Compatibility.
kwargs
Compatibility
"""
if not done:
raise NotImplementedError
if map_dict:
return self._ratify_action(
"_merge_features_by_map",
map_dict,
_annotations=self.__internal_taxonomy.loc[:, "lineage"].to_dict(),
**kwargs
)
def drop_feature_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by feature `ids`.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() == len(target_ids):
return self._remove_features_by_id(target_ids, **kwargs)
else:
raise ValueError("Invalid feature ids are provided.")
def get_taxonomy_by_id(
self, ids: Optional[AnyGenericIdentifier] = None
) -> pd.DataFrame:
"""Get taxonomy :class:`~pandas.DataFrame` by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
Returns
-------
class:`pandas.DataFrame` with taxonomy data
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() <= len(target_ids):
return self.__internal_taxonomy.loc[target_ids, self.__avail_ranks]
else:
raise ValueError("Invalid feature ids are provided.")
def get_lineage_by_id(
self,
ids: Optional[AnyGenericIdentifier] = None,
missing_rank: bool = False,
desired_ranks: Union[bool, Sequence[str]] = False,
drop_ranks: Union[bool, Sequence[str]] = False,
**kwargs: Any
) -> pd.Series:
"""Get taxonomy lineages by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
missing_rank
If True will generate prefix like `s__` or `d__`
desired_ranks
List of desired ranks to generate.
If False then will generate all main ranks
drop_ranks
List of ranks to drop from desired ranks.
This parameter only useful if `missing_rank` is True
kwargs
Compatibility.
Returns
-------
class:`pandas.Series` with consensus lineages and corresponding IDs
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
tmp_desired_ranks = VALID_RANKS if desired_ranks is False else desired_ranks
total_valid_rids = self.xrid.isin(target_ids).sum()
if total_valid_rids == len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[target_ids],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
elif total_valid_rids < len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[np.unique(target_ids)],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
else:
raise ValueError("Invalid feature ids are provided.")
def find_features_by_pattern(
self, pattern_str: str, case_sensitive: bool = False, regex: bool = False
) -> np.ndarray:
"""Searches for features with taxa that matches `pattern_str`
Parameters
----------
pattern_str
Pattern to search for
case_sensitive
Case sensitive mode
regex
Use regular expressions
Returns
-------
class:`~numpy.ndarray` with indices
"""
return self.__internal_taxonomy[
self.__internal_taxonomy.loc[:, "lineage"].str.contains(
pattern_str, case=case_sensitive, regex=regex
)
].index.values
def drop_features_without_taxa(
self, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features that do not contain taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ids_to_drop = self.find_features_without_taxa()
return self._remove_features_by_id(ids_to_drop, **kwargs)
def drop_features_without_ranks(
self, ranks: Sequence[str], any: bool = False, **kwargs: Any
) -> Optional[AnyGenericIdentifier]: # Done
"""Remove features that do not contain `ranks`
Parameters
----------
ranks
Ranks to look for
any
If True removes feature with single occurrence of missing rank.
If False all `ranks` must be missing.
kwargs
Compatibility
"""
target_ranks = np.asarray(ranks)
if self.__internal_taxonomy.columns.isin(target_ranks).sum() == len(
target_ranks
):
no_rank_mask = self.__internal_taxonomy.loc[:, ranks].isna()
no_rank_mask_adjusted = (
no_rank_mask.any(axis=1) if any else no_rank_mask.all(axis=1)
)
ids_to_drop = self.__internal_taxonomy.loc[no_rank_mask_adjusted].index
return self._remove_features_by_id(ids_to_drop, **kwargs)
else:
raise ValueError("Invalid ranks are provided.")
def merge_duplicated_features(self, **kwargs: Any) -> Optional[Mapper]:
"""Merge features with duplicated taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ret = {}
groupby = self.__internal_taxonomy.groupby("lineage")
if any([len(group) > 1 for group in groupby.groups.values()]):
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groupby.groups)))
for lineage, feature_ids in groupby.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
return self._merge_features_by_map(ret, True, **kwargs)
def merge_features_by_rank(self, level: str, **kwargs: Any) -> Optional[Mapper]:
"""Merge features by taxonomic rank/level.
Parameters
----------
level
Taxonomic rank/level to use for merging.
kwargs
Compatibility
"""
ret = {}
if not isinstance(level, str):
raise TypeError("`rank` must have str type.")
if level in self.__avail_ranks:
target_ranks = get_rank_upto(self.avail_ranks, level, True)
if target_ranks:
tmp_lineages = generate_lineages_from_taxa(
self.__internal_taxonomy, False, target_ranks, False
)
groups = tmp_lineages.groupby(tmp_lineages)
if len(groups.groups) > 1:
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groups.groups)))
for lineage, feature_ids in groups.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
else:
raise ValueError("Invalid rank are provided.")
return self._merge_features_by_map(ret, True, **kwargs)
def find_features_without_taxa(self) -> np.ndarray:
"""Find features without taxa.
Returns
-------
class:`~numpy.ndarray` with feature indices.
"""
return self.__internal_taxonomy.loc[
self.__internal_taxonomy.loc[:, VALID_RANKS].agg(
lambda rank: len("".join(map(lambda x: (str(x or "")), rank))), axis=1
)
< 1
].index.values
def get_subset(
self, rids: Optional[AnyGenericIdentifier] = None, *args, **kwargs: Any
) -> "RepTaxonomy":
"""Get subset of the :class:`.RepTaxonomy`.
Parameters
----------
rids
Feature identifiers.
args
Compatibility
kwargs
Compatibility
Returns
-------
class:`.RepTaxonomy`
"""
if rids is None:
target_rids = self.xrid
else:
target_rids = np.asarray(rids).astype(self.__internal_taxonomy.index.dtype)
if not self.xrid.isin(target_rids).sum() == len(target_rids):
raise ValueError("Invalid feature ids are provided.")
return type(self)(
taxonomy=self.__internal_taxonomy.loc[target_rids, "lineage"],
metadata=self.metadata,
name=self.name,
)
def _export(
self, taxlike: str = "lineage", ascending: bool = True, **kwargs: Any
) -> Tuple[pd.Series, dict]:
"""Creates taxonomy for export.
Parameters
----------
taxlike
Generate taxonomy in format(currently only `lineage` is supported.)
ascending
Sorting
kwargs
Compatibility
"""
if taxlike == "lineage":
return (
self.get_lineage_by_id(**kwargs).sort_values(ascending=ascending),
kwargs,
)
else:
raise NotImplemented
def export(
self,
output_fp: str,
*args,
_add_ext: bool = False,
sep: str = ",",
**kwargs: Any
) -> None:
"""Exports the taxonomy into the specified file.
Parameters
----------
output_fp
Export filepath
args
Compatibility
_add_ext
Add file extension or not.
sep
Delimiter
kwargs
Compatibility
"""
tmp_export, rkwarg = self._export(*args, **kwargs)
if _add_ext:
tmp_export.to_csv("{}.csv".format(output_fp), sep=sep)
else:
tmp_export.to_csv(output_fp, sep=sep)
def copy(self) -> "RepTaxonomy":
"""Copy of the instance."""
return type(self)(
taxonomy=self.__internal_taxonomy.loc[:, "lineage"],
metadata=self.metadata,
name=self.name,
)
def __fix_taxon_names(self) -> None:
"""Fix invalid taxon names."""
def taxon_fixer(taxon):
if taxon is not None and pd.notna(taxon):
tmp_taxon_trimmed = taxon.lower().strip()
if len(tmp_taxon_trimmed) > 0:
if tmp_taxon_trimmed[0] == "[":
tmp_taxon_trimmed = tmp_taxon_trimmed[1:]
if tmp_taxon_trimmed[-1] == "]":
tmp_taxon_trimmed = tmp_taxon_trimmed[:-1]
return tmp_taxon_trimmed.capitalize()
else:
return None
else:
return None
self.__internal_taxonomy.loc[:, VALID_RANKS] = self.__internal_taxonomy.loc[
:, VALID_RANKS
].applymap(taxon_fixer)
def __reconstruct_internal_lineages(self) -> None:
"""Reconstruct the internal lineages."""
self.__internal_taxonomy.loc[:, "lineage"] = generate_lineages_from_taxa(
self.__internal_taxonomy, True, self.__avail_ranks, False
)
def __init_internal_taxonomy(
self,
taxonomy_data: Union[pd.Series, pd.DataFrame],
taxonomy_notation: Optional[str] = "greengenes",
order_ranks: Optional[Sequence[str]] = None,
**kwargs: Any
) -> None:
"""Main method to initialize taxonomy.
Parameters
----------
taxonomy_data
Incoming parsed taxonomy data
taxonomy_notation
Taxonomy lineage notation style. Can be one of
:const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None.
The 'silva' notation require `order_ranks`.
kwargs
Compatibility
"""
if isinstance(taxonomy_data, pd.Series):
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data, taxonomy_notation, order_ranks
)
elif isinstance(taxonomy_data, pd.DataFrame):
if taxonomy_data.shape[1] == 1:
taxonomy_data_series = pd.Series(
data=taxonomy_data.iloc[:, 0], index=taxonomy_data.index
)
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data_series, taxonomy_notation, order_ranks
)
else:
new_taxonomy = self.__init_taxonomy_from_frame(
taxonomy_data, taxonomy_notation, order_ranks
)
else:
raise RuntimeError(
"`taxonomy_data` must be either pd.Series or pd.Dataframe"
)
if new_taxonomy is None:
raise ValueError("Provided taxonomy is invalid.")
# Assign newly constructed taxonomy to the self.__internal_taxonomy
self.__internal_taxonomy = new_taxonomy
self.__fix_taxon_names() # Fix incorrect taxa
tmp_avail_ranks = [rank for rank in VALID_RANKS if rank in new_taxonomy.columns]
self.__avail_ranks = [
rank for rank in tmp_avail_ranks if new_taxonomy.loc[:, rank].notna().any()
]
# Reconstruct internal lineages for default greengenes notation
self.__reconstruct_internal_lineages()
self._init_state = True
def __init_taxonomy_from_lineages(
self,
taxonomy_series: pd.Series,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done
"""Main method that produces taxonomy dataframe from lineages.
Parameters
----------
taxonomy_series
:class:`pandas.Series` with taxonomy lineages
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
"""
# Check if taxonomy is known and is available for parsing. Otherwise indentify_taxon_notation() will try to identify notation
if taxonomy_notation in AVAIL_TAXONOMY_NOTATIONS:
notation = taxonomy_notation
else:
# Get first lineage _sample for notation testing assuming the rest have the the same notations
sample_taxon = taxonomy_series.iloc[0]
# Identify notation of the lineage string
notation = indentify_taxon_notation(sample_taxon)
if order_ranks is not None:
if all([rank in VALID_RANKS for rank in order_ranks]):
target_order_ranks = order_ranks
else:
raise NotImplementedError
else:
target_order_ranks = VALID_RANKS
if notation == "greengenes":
lineages = taxonomy_series.reset_index().values.tolist()
ordered_taxa_list = []
ordered_indices_list = [elem[0] for elem in lineages]
for lineage in lineages:
tmp_lineage = jRegexGG.findall(lineage[1])
tmp_taxa_dict = {
elem[0]: elem[1] for elem in tmp_lineage if elem[0] in VALID_RANKS
}
for rank in VALID_RANKS:
if rank not in tmp_taxa_dict.keys():
tmp_taxa_dict.update({rank: None})
tmp_taxa_ordered = [tmp_taxa_dict[rank] for rank in VALID_RANKS]
ordered_taxa_list.append([None] + tmp_taxa_ordered)
taxonomy = pd.DataFrame(
index=ordered_indices_list,
data=ordered_taxa_list,
columns=["lineage"] + VALID_RANKS,
)
return taxonomy
elif notation == "qiime":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = jRegexQIIME.findall(lineage[1])
tmp_lineage.sort(key=lambda x: x[0])
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank, taxon in tmp_lineage:
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, sorted(list(tmp_ranks))]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
elif notation == "silva":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = lineage[1].split(";")
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank_i, taxon in enumerate(tmp_lineage):
rank = target_order_ranks[rank_i]
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_rank_ordered = [
rank for rank in target_order_ranks if rank in VALID_RANKS
]
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, tmp_rank_ordered]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
else:
raise NotImplementedError
def __init_taxonomy_from_frame(
self,
taxonomy_dataframe: pd.DataFrame,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done # For now only pass to _init_taxonomy_from_series
"""Main method that produces taxonomy sheet from dataframe.
Parameters
----------
taxonomy_dataframe
:class:`~pandas.DataFrame` with taxa split by ranks.
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
Returns
-------
:class:`~pandas.DataFrame`
"""
valid_ranks = extract_valid_ranks(taxonomy_dataframe.columns, VALID_RANKS)
if valid_ranks is not None:
if len(valid_ranks) > 0:
return pd.concat(
[
taxonomy_dataframe,
pd.DataFrame(
data="",
index=taxonomy_dataframe.index,
columns=[
rank for rank in VALID_RANKS if rank not in valid_ranks
],
),
],
axis=1,
)
else:
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join(taxa.values.tolist()), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
else:
valid_ranks = cols2ranks(taxonomy_dataframe.columns)
taxonomy_dataframe.columns = valid_ranks
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join([(t if isinstance(t,str) else '') for t in taxa.values]), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
@property
def avail_ranks(self) -> Sequence[str]:
"""List of available taxonomic ranks."""
return self.__avail_ranks
@property
def duplicated(self) -> pd.Index:
"""List of duplicated feature indices."""
return self.__internal_taxonomy.index[
self.__internal_taxonomy["lineage"].duplicated(keep=False)
]
@property
def data(self) -> pd.DataFrame:
"""Actual data representation as pd.DataFrame."""
return self.__internal_taxonomy
@property
def xrid(self) -> pd.Index:
"""Feature indices as pd.Index."""
return self.__internal_taxonomy.index
|
drop_features_without_ranks
|
Remove features that do not contain `ranks`
Parameters
----------
ranks
Ranks to look for
any
If True removes feature with single occurrence of missing rank.
If False all `ranks` must be missing.
kwargs
Compatibility
|
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from pmaf.biome.essentials._metakit import EssentialFeatureMetabase
from pmaf.biome.essentials._base import EssentialBackboneBase
from pmaf.internal._constants import (
AVAIL_TAXONOMY_NOTATIONS,
jRegexGG,
jRegexQIIME,
BIOM_TAXONOMY_NAMES,
VALID_RANKS,
)
from pmaf.internal._shared import (
generate_lineages_from_taxa,
get_rank_upto,
indentify_taxon_notation,
validate_ranks,
extract_valid_ranks,
cols2ranks,
)
from collections import defaultdict
from os import path
import pandas as pd
import numpy as np
import biom
from typing import Union, Sequence, Tuple, Any, Optional
from pmaf.internal._typing import AnyGenericIdentifier, Mapper
class RepTaxonomy(EssentialBackboneBase, EssentialFeatureMetabase):
"""An `essential` class for handling taxonomy data."""
def __init__(
self,
taxonomy: Union[pd.DataFrame, pd.Series, str],
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> None:
"""Constructor for :class:`.RepTaxonomy`
Parameters
----------
taxonomy
Data containing feature taxonomy
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to :func:`~pandas.read_csv` or :mod:`biome` loader.
"""
tmp_metadata = kwargs.pop("metadata", {})
self.__avail_ranks = []
self.__internal_taxonomy = None
if isinstance(taxonomy, pd.DataFrame):
if taxonomy.shape[0] > 0:
if taxonomy.shape[1] > 1:
if validate_ranks(list(taxonomy.columns.values), VALID_RANKS):
tmp_taxonomy = taxonomy
else:
raise ValueError(
"Provided `taxonomy` Datafame has invalid ranks."
)
else:
tmp_taxonomy = taxonomy.iloc[:, 0]
else:
raise ValueError("Provided `taxonomy` Datafame is invalid.")
elif isinstance(taxonomy, pd.Series):
if taxonomy.shape[0] > 0:
tmp_taxonomy = taxonomy
else:
raise ValueError("Provided `taxonomy` Series is invalid.")
elif isinstance(taxonomy, str):
if path.isfile(taxonomy):
file_extension = path.splitext(taxonomy)[-1].lower()
if file_extension in [".csv", ".tsv"]:
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).loc[:, taxonomy_columns]
elif file_extension in [".biom", ".biome"]:
tmp_taxonomy, new_metadata = self.__load_biom(taxonomy, **kwargs)
tmp_metadata.update({"biom": new_metadata})
else:
raise NotImplementedError("File type is not supported.")
else:
raise FileNotFoundError("Provided `taxonomy` file path is invalid.")
else:
raise TypeError("Provided `taxonomy` has invalid type.")
self.__init_internal_taxonomy(tmp_taxonomy, **kwargs)
super().__init__(metadata=tmp_metadata, **kwargs)
@classmethod
def from_csv(
cls,
filepath: str,
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from CSV file.
Parameters
----------
filepath
Path to .csv File
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to the constructor.
filepath:
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(filepath, **kwargs)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(filepath, **kwargs).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(filepath, **kwargs).loc[:, taxonomy_columns]
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"filepath": path.abspath(filepath)})
return cls(taxonomy=tmp_taxonomy, metadata=tmp_metadata, **kwargs)
@classmethod
def from_biom(cls, filepath: str, **kwargs: Any) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from :mod:`biom`
file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Passed to the constructor.
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
taxonomy_frame, new_metadata = cls.__load_biom(filepath, **kwargs)
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"biom": new_metadata})
return cls(taxonomy=taxonomy_frame, metadata=tmp_metadata, **kwargs)
@classmethod
def __load_biom(cls, filepath: str, **kwargs: Any) -> Tuple[pd.DataFrame, dict]:
"""Actual private method to process :mod:`biom` file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Compatibility
"""
biom_file = biom.load_table(filepath)
if biom_file.metadata(axis="observation") is not None:
obs_data = biom_file.metadata_to_dataframe("observation")
col_names = list(obs_data.columns.values)
col_names_low = [col.lower() for col in col_names]
avail_col_names = [
colname
for tax_name in BIOM_TAXONOMY_NAMES
for colname in col_names_low
if colname[::-1].find(tax_name[::-1]) < 3
and colname[::-1].find(tax_name[::-1]) > -1
]
metadata_cols = [
col for col in col_names if col.lower() not in avail_col_names
]
if len(avail_col_names) == 1:
tmp_col_index = col_names_low.index(avail_col_names[0])
taxonomy_frame = obs_data[col_names[tmp_col_index]]
else:
taxonomy_frame = obs_data
tmp_metadata = obs_data.loc[:, metadata_cols].to_dict()
return taxonomy_frame, tmp_metadata
else:
raise ValueError("Biom file does not contain observation metadata.")
def _remove_features_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by features ids and ratify action.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
tmp_ids = np.asarray(ids, dtype=self.__internal_taxonomy.index.dtype)
if len(tmp_ids) > 0:
self.__internal_taxonomy.drop(tmp_ids, inplace=True)
return self._ratify_action("_remove_features_by_id", ids, **kwargs)
def _merge_features_by_map(
self, map_dict: Mapper, done: bool = False, **kwargs: Any
) -> Optional[Mapper]:
"""Merge features and ratify action.
Parameters
----------
map_dict
Map to use for merging
done
Whether merging was completed or not. Compatibility.
kwargs
Compatibility
"""
if not done:
raise NotImplementedError
if map_dict:
return self._ratify_action(
"_merge_features_by_map",
map_dict,
_annotations=self.__internal_taxonomy.loc[:, "lineage"].to_dict(),
**kwargs
)
def drop_feature_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by feature `ids`.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() == len(target_ids):
return self._remove_features_by_id(target_ids, **kwargs)
else:
raise ValueError("Invalid feature ids are provided.")
def get_taxonomy_by_id(
self, ids: Optional[AnyGenericIdentifier] = None
) -> pd.DataFrame:
"""Get taxonomy :class:`~pandas.DataFrame` by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
Returns
-------
class:`pandas.DataFrame` with taxonomy data
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() <= len(target_ids):
return self.__internal_taxonomy.loc[target_ids, self.__avail_ranks]
else:
raise ValueError("Invalid feature ids are provided.")
def get_lineage_by_id(
self,
ids: Optional[AnyGenericIdentifier] = None,
missing_rank: bool = False,
desired_ranks: Union[bool, Sequence[str]] = False,
drop_ranks: Union[bool, Sequence[str]] = False,
**kwargs: Any
) -> pd.Series:
"""Get taxonomy lineages by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
missing_rank
If True will generate prefix like `s__` or `d__`
desired_ranks
List of desired ranks to generate.
If False then will generate all main ranks
drop_ranks
List of ranks to drop from desired ranks.
This parameter only useful if `missing_rank` is True
kwargs
Compatibility.
Returns
-------
class:`pandas.Series` with consensus lineages and corresponding IDs
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
tmp_desired_ranks = VALID_RANKS if desired_ranks is False else desired_ranks
total_valid_rids = self.xrid.isin(target_ids).sum()
if total_valid_rids == len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[target_ids],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
elif total_valid_rids < len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[np.unique(target_ids)],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
else:
raise ValueError("Invalid feature ids are provided.")
def find_features_by_pattern(
self, pattern_str: str, case_sensitive: bool = False, regex: bool = False
) -> np.ndarray:
"""Searches for features with taxa that matches `pattern_str`
Parameters
----------
pattern_str
Pattern to search for
case_sensitive
Case sensitive mode
regex
Use regular expressions
Returns
-------
class:`~numpy.ndarray` with indices
"""
return self.__internal_taxonomy[
self.__internal_taxonomy.loc[:, "lineage"].str.contains(
pattern_str, case=case_sensitive, regex=regex
)
].index.values
def drop_features_without_taxa(
self, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features that do not contain taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ids_to_drop = self.find_features_without_taxa()
return self._remove_features_by_id(ids_to_drop, **kwargs)
# MASKED: drop_features_without_ranks function (lines 374-400)
def merge_duplicated_features(self, **kwargs: Any) -> Optional[Mapper]:
"""Merge features with duplicated taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ret = {}
groupby = self.__internal_taxonomy.groupby("lineage")
if any([len(group) > 1 for group in groupby.groups.values()]):
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groupby.groups)))
for lineage, feature_ids in groupby.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
return self._merge_features_by_map(ret, True, **kwargs)
def merge_features_by_rank(self, level: str, **kwargs: Any) -> Optional[Mapper]:
"""Merge features by taxonomic rank/level.
Parameters
----------
level
Taxonomic rank/level to use for merging.
kwargs
Compatibility
"""
ret = {}
if not isinstance(level, str):
raise TypeError("`rank` must have str type.")
if level in self.__avail_ranks:
target_ranks = get_rank_upto(self.avail_ranks, level, True)
if target_ranks:
tmp_lineages = generate_lineages_from_taxa(
self.__internal_taxonomy, False, target_ranks, False
)
groups = tmp_lineages.groupby(tmp_lineages)
if len(groups.groups) > 1:
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groups.groups)))
for lineage, feature_ids in groups.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
else:
raise ValueError("Invalid rank are provided.")
return self._merge_features_by_map(ret, True, **kwargs)
def find_features_without_taxa(self) -> np.ndarray:
"""Find features without taxa.
Returns
-------
class:`~numpy.ndarray` with feature indices.
"""
return self.__internal_taxonomy.loc[
self.__internal_taxonomy.loc[:, VALID_RANKS].agg(
lambda rank: len("".join(map(lambda x: (str(x or "")), rank))), axis=1
)
< 1
].index.values
def get_subset(
self, rids: Optional[AnyGenericIdentifier] = None, *args, **kwargs: Any
) -> "RepTaxonomy":
"""Get subset of the :class:`.RepTaxonomy`.
Parameters
----------
rids
Feature identifiers.
args
Compatibility
kwargs
Compatibility
Returns
-------
class:`.RepTaxonomy`
"""
if rids is None:
target_rids = self.xrid
else:
target_rids = np.asarray(rids).astype(self.__internal_taxonomy.index.dtype)
if not self.xrid.isin(target_rids).sum() == len(target_rids):
raise ValueError("Invalid feature ids are provided.")
return type(self)(
taxonomy=self.__internal_taxonomy.loc[target_rids, "lineage"],
metadata=self.metadata,
name=self.name,
)
def _export(
self, taxlike: str = "lineage", ascending: bool = True, **kwargs: Any
) -> Tuple[pd.Series, dict]:
"""Creates taxonomy for export.
Parameters
----------
taxlike
Generate taxonomy in format(currently only `lineage` is supported.)
ascending
Sorting
kwargs
Compatibility
"""
if taxlike == "lineage":
return (
self.get_lineage_by_id(**kwargs).sort_values(ascending=ascending),
kwargs,
)
else:
raise NotImplemented
def export(
self,
output_fp: str,
*args,
_add_ext: bool = False,
sep: str = ",",
**kwargs: Any
) -> None:
"""Exports the taxonomy into the specified file.
Parameters
----------
output_fp
Export filepath
args
Compatibility
_add_ext
Add file extension or not.
sep
Delimiter
kwargs
Compatibility
"""
tmp_export, rkwarg = self._export(*args, **kwargs)
if _add_ext:
tmp_export.to_csv("{}.csv".format(output_fp), sep=sep)
else:
tmp_export.to_csv(output_fp, sep=sep)
def copy(self) -> "RepTaxonomy":
"""Copy of the instance."""
return type(self)(
taxonomy=self.__internal_taxonomy.loc[:, "lineage"],
metadata=self.metadata,
name=self.name,
)
def __fix_taxon_names(self) -> None:
"""Fix invalid taxon names."""
def taxon_fixer(taxon):
if taxon is not None and pd.notna(taxon):
tmp_taxon_trimmed = taxon.lower().strip()
if len(tmp_taxon_trimmed) > 0:
if tmp_taxon_trimmed[0] == "[":
tmp_taxon_trimmed = tmp_taxon_trimmed[1:]
if tmp_taxon_trimmed[-1] == "]":
tmp_taxon_trimmed = tmp_taxon_trimmed[:-1]
return tmp_taxon_trimmed.capitalize()
else:
return None
else:
return None
self.__internal_taxonomy.loc[:, VALID_RANKS] = self.__internal_taxonomy.loc[
:, VALID_RANKS
].applymap(taxon_fixer)
def __reconstruct_internal_lineages(self) -> None:
"""Reconstruct the internal lineages."""
self.__internal_taxonomy.loc[:, "lineage"] = generate_lineages_from_taxa(
self.__internal_taxonomy, True, self.__avail_ranks, False
)
def __init_internal_taxonomy(
self,
taxonomy_data: Union[pd.Series, pd.DataFrame],
taxonomy_notation: Optional[str] = "greengenes",
order_ranks: Optional[Sequence[str]] = None,
**kwargs: Any
) -> None:
"""Main method to initialize taxonomy.
Parameters
----------
taxonomy_data
Incoming parsed taxonomy data
taxonomy_notation
Taxonomy lineage notation style. Can be one of
:const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None.
The 'silva' notation require `order_ranks`.
kwargs
Compatibility
"""
if isinstance(taxonomy_data, pd.Series):
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data, taxonomy_notation, order_ranks
)
elif isinstance(taxonomy_data, pd.DataFrame):
if taxonomy_data.shape[1] == 1:
taxonomy_data_series = pd.Series(
data=taxonomy_data.iloc[:, 0], index=taxonomy_data.index
)
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data_series, taxonomy_notation, order_ranks
)
else:
new_taxonomy = self.__init_taxonomy_from_frame(
taxonomy_data, taxonomy_notation, order_ranks
)
else:
raise RuntimeError(
"`taxonomy_data` must be either pd.Series or pd.Dataframe"
)
if new_taxonomy is None:
raise ValueError("Provided taxonomy is invalid.")
# Assign newly constructed taxonomy to the self.__internal_taxonomy
self.__internal_taxonomy = new_taxonomy
self.__fix_taxon_names() # Fix incorrect taxa
tmp_avail_ranks = [rank for rank in VALID_RANKS if rank in new_taxonomy.columns]
self.__avail_ranks = [
rank for rank in tmp_avail_ranks if new_taxonomy.loc[:, rank].notna().any()
]
# Reconstruct internal lineages for default greengenes notation
self.__reconstruct_internal_lineages()
self._init_state = True
def __init_taxonomy_from_lineages(
self,
taxonomy_series: pd.Series,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done
"""Main method that produces taxonomy dataframe from lineages.
Parameters
----------
taxonomy_series
:class:`pandas.Series` with taxonomy lineages
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
"""
# Check if taxonomy is known and is available for parsing. Otherwise indentify_taxon_notation() will try to identify notation
if taxonomy_notation in AVAIL_TAXONOMY_NOTATIONS:
notation = taxonomy_notation
else:
# Get first lineage _sample for notation testing assuming the rest have the the same notations
sample_taxon = taxonomy_series.iloc[0]
# Identify notation of the lineage string
notation = indentify_taxon_notation(sample_taxon)
if order_ranks is not None:
if all([rank in VALID_RANKS for rank in order_ranks]):
target_order_ranks = order_ranks
else:
raise NotImplementedError
else:
target_order_ranks = VALID_RANKS
if notation == "greengenes":
lineages = taxonomy_series.reset_index().values.tolist()
ordered_taxa_list = []
ordered_indices_list = [elem[0] for elem in lineages]
for lineage in lineages:
tmp_lineage = jRegexGG.findall(lineage[1])
tmp_taxa_dict = {
elem[0]: elem[1] for elem in tmp_lineage if elem[0] in VALID_RANKS
}
for rank in VALID_RANKS:
if rank not in tmp_taxa_dict.keys():
tmp_taxa_dict.update({rank: None})
tmp_taxa_ordered = [tmp_taxa_dict[rank] for rank in VALID_RANKS]
ordered_taxa_list.append([None] + tmp_taxa_ordered)
taxonomy = pd.DataFrame(
index=ordered_indices_list,
data=ordered_taxa_list,
columns=["lineage"] + VALID_RANKS,
)
return taxonomy
elif notation == "qiime":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = jRegexQIIME.findall(lineage[1])
tmp_lineage.sort(key=lambda x: x[0])
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank, taxon in tmp_lineage:
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, sorted(list(tmp_ranks))]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
elif notation == "silva":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = lineage[1].split(";")
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank_i, taxon in enumerate(tmp_lineage):
rank = target_order_ranks[rank_i]
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_rank_ordered = [
rank for rank in target_order_ranks if rank in VALID_RANKS
]
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, tmp_rank_ordered]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
else:
raise NotImplementedError
def __init_taxonomy_from_frame(
self,
taxonomy_dataframe: pd.DataFrame,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done # For now only pass to _init_taxonomy_from_series
"""Main method that produces taxonomy sheet from dataframe.
Parameters
----------
taxonomy_dataframe
:class:`~pandas.DataFrame` with taxa split by ranks.
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
Returns
-------
:class:`~pandas.DataFrame`
"""
valid_ranks = extract_valid_ranks(taxonomy_dataframe.columns, VALID_RANKS)
if valid_ranks is not None:
if len(valid_ranks) > 0:
return pd.concat(
[
taxonomy_dataframe,
pd.DataFrame(
data="",
index=taxonomy_dataframe.index,
columns=[
rank for rank in VALID_RANKS if rank not in valid_ranks
],
),
],
axis=1,
)
else:
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join(taxa.values.tolist()), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
else:
valid_ranks = cols2ranks(taxonomy_dataframe.columns)
taxonomy_dataframe.columns = valid_ranks
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join([(t if isinstance(t,str) else '') for t in taxa.values]), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
@property
def avail_ranks(self) -> Sequence[str]:
"""List of available taxonomic ranks."""
return self.__avail_ranks
@property
def duplicated(self) -> pd.Index:
"""List of duplicated feature indices."""
return self.__internal_taxonomy.index[
self.__internal_taxonomy["lineage"].duplicated(keep=False)
]
@property
def data(self) -> pd.DataFrame:
"""Actual data representation as pd.DataFrame."""
return self.__internal_taxonomy
@property
def xrid(self) -> pd.Index:
"""Feature indices as pd.Index."""
return self.__internal_taxonomy.index
|
def drop_features_without_ranks(
self, ranks: Sequence[str], any: bool = False, **kwargs: Any
) -> Optional[AnyGenericIdentifier]: # Done
"""Remove features that do not contain `ranks`
Parameters
----------
ranks
Ranks to look for
any
If True removes feature with single occurrence of missing rank.
If False all `ranks` must be missing.
kwargs
Compatibility
"""
target_ranks = np.asarray(ranks)
if self.__internal_taxonomy.columns.isin(target_ranks).sum() == len(
target_ranks
):
no_rank_mask = self.__internal_taxonomy.loc[:, ranks].isna()
no_rank_mask_adjusted = (
no_rank_mask.any(axis=1) if any else no_rank_mask.all(axis=1)
)
ids_to_drop = self.__internal_taxonomy.loc[no_rank_mask_adjusted].index
return self._remove_features_by_id(ids_to_drop, **kwargs)
else:
raise ValueError("Invalid ranks are provided.")
| 374 | 400 |
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from pmaf.biome.essentials._metakit import EssentialFeatureMetabase
from pmaf.biome.essentials._base import EssentialBackboneBase
from pmaf.internal._constants import (
AVAIL_TAXONOMY_NOTATIONS,
jRegexGG,
jRegexQIIME,
BIOM_TAXONOMY_NAMES,
VALID_RANKS,
)
from pmaf.internal._shared import (
generate_lineages_from_taxa,
get_rank_upto,
indentify_taxon_notation,
validate_ranks,
extract_valid_ranks,
cols2ranks,
)
from collections import defaultdict
from os import path
import pandas as pd
import numpy as np
import biom
from typing import Union, Sequence, Tuple, Any, Optional
from pmaf.internal._typing import AnyGenericIdentifier, Mapper
class RepTaxonomy(EssentialBackboneBase, EssentialFeatureMetabase):
"""An `essential` class for handling taxonomy data."""
def __init__(
self,
taxonomy: Union[pd.DataFrame, pd.Series, str],
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> None:
"""Constructor for :class:`.RepTaxonomy`
Parameters
----------
taxonomy
Data containing feature taxonomy
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to :func:`~pandas.read_csv` or :mod:`biome` loader.
"""
tmp_metadata = kwargs.pop("metadata", {})
self.__avail_ranks = []
self.__internal_taxonomy = None
if isinstance(taxonomy, pd.DataFrame):
if taxonomy.shape[0] > 0:
if taxonomy.shape[1] > 1:
if validate_ranks(list(taxonomy.columns.values), VALID_RANKS):
tmp_taxonomy = taxonomy
else:
raise ValueError(
"Provided `taxonomy` Datafame has invalid ranks."
)
else:
tmp_taxonomy = taxonomy.iloc[:, 0]
else:
raise ValueError("Provided `taxonomy` Datafame is invalid.")
elif isinstance(taxonomy, pd.Series):
if taxonomy.shape[0] > 0:
tmp_taxonomy = taxonomy
else:
raise ValueError("Provided `taxonomy` Series is invalid.")
elif isinstance(taxonomy, str):
if path.isfile(taxonomy):
file_extension = path.splitext(taxonomy)[-1].lower()
if file_extension in [".csv", ".tsv"]:
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).loc[:, taxonomy_columns]
elif file_extension in [".biom", ".biome"]:
tmp_taxonomy, new_metadata = self.__load_biom(taxonomy, **kwargs)
tmp_metadata.update({"biom": new_metadata})
else:
raise NotImplementedError("File type is not supported.")
else:
raise FileNotFoundError("Provided `taxonomy` file path is invalid.")
else:
raise TypeError("Provided `taxonomy` has invalid type.")
self.__init_internal_taxonomy(tmp_taxonomy, **kwargs)
super().__init__(metadata=tmp_metadata, **kwargs)
@classmethod
def from_csv(
cls,
filepath: str,
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from CSV file.
Parameters
----------
filepath
Path to .csv File
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to the constructor.
filepath:
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(filepath, **kwargs)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(filepath, **kwargs).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(filepath, **kwargs).loc[:, taxonomy_columns]
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"filepath": path.abspath(filepath)})
return cls(taxonomy=tmp_taxonomy, metadata=tmp_metadata, **kwargs)
@classmethod
def from_biom(cls, filepath: str, **kwargs: Any) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from :mod:`biom`
file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Passed to the constructor.
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
taxonomy_frame, new_metadata = cls.__load_biom(filepath, **kwargs)
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"biom": new_metadata})
return cls(taxonomy=taxonomy_frame, metadata=tmp_metadata, **kwargs)
@classmethod
def __load_biom(cls, filepath: str, **kwargs: Any) -> Tuple[pd.DataFrame, dict]:
"""Actual private method to process :mod:`biom` file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Compatibility
"""
biom_file = biom.load_table(filepath)
if biom_file.metadata(axis="observation") is not None:
obs_data = biom_file.metadata_to_dataframe("observation")
col_names = list(obs_data.columns.values)
col_names_low = [col.lower() for col in col_names]
avail_col_names = [
colname
for tax_name in BIOM_TAXONOMY_NAMES
for colname in col_names_low
if colname[::-1].find(tax_name[::-1]) < 3
and colname[::-1].find(tax_name[::-1]) > -1
]
metadata_cols = [
col for col in col_names if col.lower() not in avail_col_names
]
if len(avail_col_names) == 1:
tmp_col_index = col_names_low.index(avail_col_names[0])
taxonomy_frame = obs_data[col_names[tmp_col_index]]
else:
taxonomy_frame = obs_data
tmp_metadata = obs_data.loc[:, metadata_cols].to_dict()
return taxonomy_frame, tmp_metadata
else:
raise ValueError("Biom file does not contain observation metadata.")
def _remove_features_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by features ids and ratify action.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
tmp_ids = np.asarray(ids, dtype=self.__internal_taxonomy.index.dtype)
if len(tmp_ids) > 0:
self.__internal_taxonomy.drop(tmp_ids, inplace=True)
return self._ratify_action("_remove_features_by_id", ids, **kwargs)
def _merge_features_by_map(
self, map_dict: Mapper, done: bool = False, **kwargs: Any
) -> Optional[Mapper]:
"""Merge features and ratify action.
Parameters
----------
map_dict
Map to use for merging
done
Whether merging was completed or not. Compatibility.
kwargs
Compatibility
"""
if not done:
raise NotImplementedError
if map_dict:
return self._ratify_action(
"_merge_features_by_map",
map_dict,
_annotations=self.__internal_taxonomy.loc[:, "lineage"].to_dict(),
**kwargs
)
def drop_feature_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by feature `ids`.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() == len(target_ids):
return self._remove_features_by_id(target_ids, **kwargs)
else:
raise ValueError("Invalid feature ids are provided.")
def get_taxonomy_by_id(
self, ids: Optional[AnyGenericIdentifier] = None
) -> pd.DataFrame:
"""Get taxonomy :class:`~pandas.DataFrame` by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
Returns
-------
class:`pandas.DataFrame` with taxonomy data
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() <= len(target_ids):
return self.__internal_taxonomy.loc[target_ids, self.__avail_ranks]
else:
raise ValueError("Invalid feature ids are provided.")
def get_lineage_by_id(
self,
ids: Optional[AnyGenericIdentifier] = None,
missing_rank: bool = False,
desired_ranks: Union[bool, Sequence[str]] = False,
drop_ranks: Union[bool, Sequence[str]] = False,
**kwargs: Any
) -> pd.Series:
"""Get taxonomy lineages by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
missing_rank
If True will generate prefix like `s__` or `d__`
desired_ranks
List of desired ranks to generate.
If False then will generate all main ranks
drop_ranks
List of ranks to drop from desired ranks.
This parameter only useful if `missing_rank` is True
kwargs
Compatibility.
Returns
-------
class:`pandas.Series` with consensus lineages and corresponding IDs
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
tmp_desired_ranks = VALID_RANKS if desired_ranks is False else desired_ranks
total_valid_rids = self.xrid.isin(target_ids).sum()
if total_valid_rids == len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[target_ids],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
elif total_valid_rids < len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[np.unique(target_ids)],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
else:
raise ValueError("Invalid feature ids are provided.")
def find_features_by_pattern(
self, pattern_str: str, case_sensitive: bool = False, regex: bool = False
) -> np.ndarray:
"""Searches for features with taxa that matches `pattern_str`
Parameters
----------
pattern_str
Pattern to search for
case_sensitive
Case sensitive mode
regex
Use regular expressions
Returns
-------
class:`~numpy.ndarray` with indices
"""
return self.__internal_taxonomy[
self.__internal_taxonomy.loc[:, "lineage"].str.contains(
pattern_str, case=case_sensitive, regex=regex
)
].index.values
def drop_features_without_taxa(
self, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features that do not contain taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ids_to_drop = self.find_features_without_taxa()
return self._remove_features_by_id(ids_to_drop, **kwargs)
def drop_features_without_ranks(
self, ranks: Sequence[str], any: bool = False, **kwargs: Any
) -> Optional[AnyGenericIdentifier]: # Done
"""Remove features that do not contain `ranks`
Parameters
----------
ranks
Ranks to look for
any
If True removes feature with single occurrence of missing rank.
If False all `ranks` must be missing.
kwargs
Compatibility
"""
target_ranks = np.asarray(ranks)
if self.__internal_taxonomy.columns.isin(target_ranks).sum() == len(
target_ranks
):
no_rank_mask = self.__internal_taxonomy.loc[:, ranks].isna()
no_rank_mask_adjusted = (
no_rank_mask.any(axis=1) if any else no_rank_mask.all(axis=1)
)
ids_to_drop = self.__internal_taxonomy.loc[no_rank_mask_adjusted].index
return self._remove_features_by_id(ids_to_drop, **kwargs)
else:
raise ValueError("Invalid ranks are provided.")
def merge_duplicated_features(self, **kwargs: Any) -> Optional[Mapper]:
"""Merge features with duplicated taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ret = {}
groupby = self.__internal_taxonomy.groupby("lineage")
if any([len(group) > 1 for group in groupby.groups.values()]):
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groupby.groups)))
for lineage, feature_ids in groupby.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
return self._merge_features_by_map(ret, True, **kwargs)
def merge_features_by_rank(self, level: str, **kwargs: Any) -> Optional[Mapper]:
"""Merge features by taxonomic rank/level.
Parameters
----------
level
Taxonomic rank/level to use for merging.
kwargs
Compatibility
"""
ret = {}
if not isinstance(level, str):
raise TypeError("`rank` must have str type.")
if level in self.__avail_ranks:
target_ranks = get_rank_upto(self.avail_ranks, level, True)
if target_ranks:
tmp_lineages = generate_lineages_from_taxa(
self.__internal_taxonomy, False, target_ranks, False
)
groups = tmp_lineages.groupby(tmp_lineages)
if len(groups.groups) > 1:
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groups.groups)))
for lineage, feature_ids in groups.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
else:
raise ValueError("Invalid rank are provided.")
return self._merge_features_by_map(ret, True, **kwargs)
def find_features_without_taxa(self) -> np.ndarray:
"""Find features without taxa.
Returns
-------
class:`~numpy.ndarray` with feature indices.
"""
return self.__internal_taxonomy.loc[
self.__internal_taxonomy.loc[:, VALID_RANKS].agg(
lambda rank: len("".join(map(lambda x: (str(x or "")), rank))), axis=1
)
< 1
].index.values
def get_subset(
self, rids: Optional[AnyGenericIdentifier] = None, *args, **kwargs: Any
) -> "RepTaxonomy":
"""Get subset of the :class:`.RepTaxonomy`.
Parameters
----------
rids
Feature identifiers.
args
Compatibility
kwargs
Compatibility
Returns
-------
class:`.RepTaxonomy`
"""
if rids is None:
target_rids = self.xrid
else:
target_rids = np.asarray(rids).astype(self.__internal_taxonomy.index.dtype)
if not self.xrid.isin(target_rids).sum() == len(target_rids):
raise ValueError("Invalid feature ids are provided.")
return type(self)(
taxonomy=self.__internal_taxonomy.loc[target_rids, "lineage"],
metadata=self.metadata,
name=self.name,
)
def _export(
self, taxlike: str = "lineage", ascending: bool = True, **kwargs: Any
) -> Tuple[pd.Series, dict]:
"""Creates taxonomy for export.
Parameters
----------
taxlike
Generate taxonomy in format(currently only `lineage` is supported.)
ascending
Sorting
kwargs
Compatibility
"""
if taxlike == "lineage":
return (
self.get_lineage_by_id(**kwargs).sort_values(ascending=ascending),
kwargs,
)
else:
raise NotImplemented
def export(
self,
output_fp: str,
*args,
_add_ext: bool = False,
sep: str = ",",
**kwargs: Any
) -> None:
"""Exports the taxonomy into the specified file.
Parameters
----------
output_fp
Export filepath
args
Compatibility
_add_ext
Add file extension or not.
sep
Delimiter
kwargs
Compatibility
"""
tmp_export, rkwarg = self._export(*args, **kwargs)
if _add_ext:
tmp_export.to_csv("{}.csv".format(output_fp), sep=sep)
else:
tmp_export.to_csv(output_fp, sep=sep)
def copy(self) -> "RepTaxonomy":
"""Copy of the instance."""
return type(self)(
taxonomy=self.__internal_taxonomy.loc[:, "lineage"],
metadata=self.metadata,
name=self.name,
)
def __fix_taxon_names(self) -> None:
"""Fix invalid taxon names."""
def taxon_fixer(taxon):
if taxon is not None and pd.notna(taxon):
tmp_taxon_trimmed = taxon.lower().strip()
if len(tmp_taxon_trimmed) > 0:
if tmp_taxon_trimmed[0] == "[":
tmp_taxon_trimmed = tmp_taxon_trimmed[1:]
if tmp_taxon_trimmed[-1] == "]":
tmp_taxon_trimmed = tmp_taxon_trimmed[:-1]
return tmp_taxon_trimmed.capitalize()
else:
return None
else:
return None
self.__internal_taxonomy.loc[:, VALID_RANKS] = self.__internal_taxonomy.loc[
:, VALID_RANKS
].applymap(taxon_fixer)
def __reconstruct_internal_lineages(self) -> None:
"""Reconstruct the internal lineages."""
self.__internal_taxonomy.loc[:, "lineage"] = generate_lineages_from_taxa(
self.__internal_taxonomy, True, self.__avail_ranks, False
)
def __init_internal_taxonomy(
self,
taxonomy_data: Union[pd.Series, pd.DataFrame],
taxonomy_notation: Optional[str] = "greengenes",
order_ranks: Optional[Sequence[str]] = None,
**kwargs: Any
) -> None:
"""Main method to initialize taxonomy.
Parameters
----------
taxonomy_data
Incoming parsed taxonomy data
taxonomy_notation
Taxonomy lineage notation style. Can be one of
:const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None.
The 'silva' notation require `order_ranks`.
kwargs
Compatibility
"""
if isinstance(taxonomy_data, pd.Series):
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data, taxonomy_notation, order_ranks
)
elif isinstance(taxonomy_data, pd.DataFrame):
if taxonomy_data.shape[1] == 1:
taxonomy_data_series = pd.Series(
data=taxonomy_data.iloc[:, 0], index=taxonomy_data.index
)
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data_series, taxonomy_notation, order_ranks
)
else:
new_taxonomy = self.__init_taxonomy_from_frame(
taxonomy_data, taxonomy_notation, order_ranks
)
else:
raise RuntimeError(
"`taxonomy_data` must be either pd.Series or pd.Dataframe"
)
if new_taxonomy is None:
raise ValueError("Provided taxonomy is invalid.")
# Assign newly constructed taxonomy to the self.__internal_taxonomy
self.__internal_taxonomy = new_taxonomy
self.__fix_taxon_names() # Fix incorrect taxa
tmp_avail_ranks = [rank for rank in VALID_RANKS if rank in new_taxonomy.columns]
self.__avail_ranks = [
rank for rank in tmp_avail_ranks if new_taxonomy.loc[:, rank].notna().any()
]
# Reconstruct internal lineages for default greengenes notation
self.__reconstruct_internal_lineages()
self._init_state = True
def __init_taxonomy_from_lineages(
self,
taxonomy_series: pd.Series,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done
"""Main method that produces taxonomy dataframe from lineages.
Parameters
----------
taxonomy_series
:class:`pandas.Series` with taxonomy lineages
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
"""
# Check if taxonomy is known and is available for parsing. Otherwise indentify_taxon_notation() will try to identify notation
if taxonomy_notation in AVAIL_TAXONOMY_NOTATIONS:
notation = taxonomy_notation
else:
# Get first lineage _sample for notation testing assuming the rest have the the same notations
sample_taxon = taxonomy_series.iloc[0]
# Identify notation of the lineage string
notation = indentify_taxon_notation(sample_taxon)
if order_ranks is not None:
if all([rank in VALID_RANKS for rank in order_ranks]):
target_order_ranks = order_ranks
else:
raise NotImplementedError
else:
target_order_ranks = VALID_RANKS
if notation == "greengenes":
lineages = taxonomy_series.reset_index().values.tolist()
ordered_taxa_list = []
ordered_indices_list = [elem[0] for elem in lineages]
for lineage in lineages:
tmp_lineage = jRegexGG.findall(lineage[1])
tmp_taxa_dict = {
elem[0]: elem[1] for elem in tmp_lineage if elem[0] in VALID_RANKS
}
for rank in VALID_RANKS:
if rank not in tmp_taxa_dict.keys():
tmp_taxa_dict.update({rank: None})
tmp_taxa_ordered = [tmp_taxa_dict[rank] for rank in VALID_RANKS]
ordered_taxa_list.append([None] + tmp_taxa_ordered)
taxonomy = pd.DataFrame(
index=ordered_indices_list,
data=ordered_taxa_list,
columns=["lineage"] + VALID_RANKS,
)
return taxonomy
elif notation == "qiime":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = jRegexQIIME.findall(lineage[1])
tmp_lineage.sort(key=lambda x: x[0])
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank, taxon in tmp_lineage:
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, sorted(list(tmp_ranks))]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
elif notation == "silva":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = lineage[1].split(";")
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank_i, taxon in enumerate(tmp_lineage):
rank = target_order_ranks[rank_i]
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_rank_ordered = [
rank for rank in target_order_ranks if rank in VALID_RANKS
]
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, tmp_rank_ordered]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
else:
raise NotImplementedError
def __init_taxonomy_from_frame(
self,
taxonomy_dataframe: pd.DataFrame,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done # For now only pass to _init_taxonomy_from_series
"""Main method that produces taxonomy sheet from dataframe.
Parameters
----------
taxonomy_dataframe
:class:`~pandas.DataFrame` with taxa split by ranks.
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
Returns
-------
:class:`~pandas.DataFrame`
"""
valid_ranks = extract_valid_ranks(taxonomy_dataframe.columns, VALID_RANKS)
if valid_ranks is not None:
if len(valid_ranks) > 0:
return pd.concat(
[
taxonomy_dataframe,
pd.DataFrame(
data="",
index=taxonomy_dataframe.index,
columns=[
rank for rank in VALID_RANKS if rank not in valid_ranks
],
),
],
axis=1,
)
else:
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join(taxa.values.tolist()), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
else:
valid_ranks = cols2ranks(taxonomy_dataframe.columns)
taxonomy_dataframe.columns = valid_ranks
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join([(t if isinstance(t,str) else '') for t in taxa.values]), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
@property
def avail_ranks(self) -> Sequence[str]:
"""List of available taxonomic ranks."""
return self.__avail_ranks
@property
def duplicated(self) -> pd.Index:
"""List of duplicated feature indices."""
return self.__internal_taxonomy.index[
self.__internal_taxonomy["lineage"].duplicated(keep=False)
]
@property
def data(self) -> pd.DataFrame:
"""Actual data representation as pd.DataFrame."""
return self.__internal_taxonomy
@property
def xrid(self) -> pd.Index:
"""Feature indices as pd.Index."""
return self.__internal_taxonomy.index
|
merge_features_by_rank
|
Merge features by taxonomic rank/level.
Parameters
----------
level
Taxonomic rank/level to use for merging.
kwargs
Compatibility
|
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from pmaf.biome.essentials._metakit import EssentialFeatureMetabase
from pmaf.biome.essentials._base import EssentialBackboneBase
from pmaf.internal._constants import (
AVAIL_TAXONOMY_NOTATIONS,
jRegexGG,
jRegexQIIME,
BIOM_TAXONOMY_NAMES,
VALID_RANKS,
)
from pmaf.internal._shared import (
generate_lineages_from_taxa,
get_rank_upto,
indentify_taxon_notation,
validate_ranks,
extract_valid_ranks,
cols2ranks,
)
from collections import defaultdict
from os import path
import pandas as pd
import numpy as np
import biom
from typing import Union, Sequence, Tuple, Any, Optional
from pmaf.internal._typing import AnyGenericIdentifier, Mapper
class RepTaxonomy(EssentialBackboneBase, EssentialFeatureMetabase):
"""An `essential` class for handling taxonomy data."""
def __init__(
self,
taxonomy: Union[pd.DataFrame, pd.Series, str],
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> None:
"""Constructor for :class:`.RepTaxonomy`
Parameters
----------
taxonomy
Data containing feature taxonomy
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to :func:`~pandas.read_csv` or :mod:`biome` loader.
"""
tmp_metadata = kwargs.pop("metadata", {})
self.__avail_ranks = []
self.__internal_taxonomy = None
if isinstance(taxonomy, pd.DataFrame):
if taxonomy.shape[0] > 0:
if taxonomy.shape[1] > 1:
if validate_ranks(list(taxonomy.columns.values), VALID_RANKS):
tmp_taxonomy = taxonomy
else:
raise ValueError(
"Provided `taxonomy` Datafame has invalid ranks."
)
else:
tmp_taxonomy = taxonomy.iloc[:, 0]
else:
raise ValueError("Provided `taxonomy` Datafame is invalid.")
elif isinstance(taxonomy, pd.Series):
if taxonomy.shape[0] > 0:
tmp_taxonomy = taxonomy
else:
raise ValueError("Provided `taxonomy` Series is invalid.")
elif isinstance(taxonomy, str):
if path.isfile(taxonomy):
file_extension = path.splitext(taxonomy)[-1].lower()
if file_extension in [".csv", ".tsv"]:
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).loc[:, taxonomy_columns]
elif file_extension in [".biom", ".biome"]:
tmp_taxonomy, new_metadata = self.__load_biom(taxonomy, **kwargs)
tmp_metadata.update({"biom": new_metadata})
else:
raise NotImplementedError("File type is not supported.")
else:
raise FileNotFoundError("Provided `taxonomy` file path is invalid.")
else:
raise TypeError("Provided `taxonomy` has invalid type.")
self.__init_internal_taxonomy(tmp_taxonomy, **kwargs)
super().__init__(metadata=tmp_metadata, **kwargs)
@classmethod
def from_csv(
cls,
filepath: str,
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from CSV file.
Parameters
----------
filepath
Path to .csv File
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to the constructor.
filepath:
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(filepath, **kwargs)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(filepath, **kwargs).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(filepath, **kwargs).loc[:, taxonomy_columns]
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"filepath": path.abspath(filepath)})
return cls(taxonomy=tmp_taxonomy, metadata=tmp_metadata, **kwargs)
@classmethod
def from_biom(cls, filepath: str, **kwargs: Any) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from :mod:`biom`
file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Passed to the constructor.
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
taxonomy_frame, new_metadata = cls.__load_biom(filepath, **kwargs)
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"biom": new_metadata})
return cls(taxonomy=taxonomy_frame, metadata=tmp_metadata, **kwargs)
@classmethod
def __load_biom(cls, filepath: str, **kwargs: Any) -> Tuple[pd.DataFrame, dict]:
"""Actual private method to process :mod:`biom` file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Compatibility
"""
biom_file = biom.load_table(filepath)
if biom_file.metadata(axis="observation") is not None:
obs_data = biom_file.metadata_to_dataframe("observation")
col_names = list(obs_data.columns.values)
col_names_low = [col.lower() for col in col_names]
avail_col_names = [
colname
for tax_name in BIOM_TAXONOMY_NAMES
for colname in col_names_low
if colname[::-1].find(tax_name[::-1]) < 3
and colname[::-1].find(tax_name[::-1]) > -1
]
metadata_cols = [
col for col in col_names if col.lower() not in avail_col_names
]
if len(avail_col_names) == 1:
tmp_col_index = col_names_low.index(avail_col_names[0])
taxonomy_frame = obs_data[col_names[tmp_col_index]]
else:
taxonomy_frame = obs_data
tmp_metadata = obs_data.loc[:, metadata_cols].to_dict()
return taxonomy_frame, tmp_metadata
else:
raise ValueError("Biom file does not contain observation metadata.")
def _remove_features_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by features ids and ratify action.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
tmp_ids = np.asarray(ids, dtype=self.__internal_taxonomy.index.dtype)
if len(tmp_ids) > 0:
self.__internal_taxonomy.drop(tmp_ids, inplace=True)
return self._ratify_action("_remove_features_by_id", ids, **kwargs)
def _merge_features_by_map(
self, map_dict: Mapper, done: bool = False, **kwargs: Any
) -> Optional[Mapper]:
"""Merge features and ratify action.
Parameters
----------
map_dict
Map to use for merging
done
Whether merging was completed or not. Compatibility.
kwargs
Compatibility
"""
if not done:
raise NotImplementedError
if map_dict:
return self._ratify_action(
"_merge_features_by_map",
map_dict,
_annotations=self.__internal_taxonomy.loc[:, "lineage"].to_dict(),
**kwargs
)
def drop_feature_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by feature `ids`.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() == len(target_ids):
return self._remove_features_by_id(target_ids, **kwargs)
else:
raise ValueError("Invalid feature ids are provided.")
def get_taxonomy_by_id(
self, ids: Optional[AnyGenericIdentifier] = None
) -> pd.DataFrame:
"""Get taxonomy :class:`~pandas.DataFrame` by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
Returns
-------
class:`pandas.DataFrame` with taxonomy data
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() <= len(target_ids):
return self.__internal_taxonomy.loc[target_ids, self.__avail_ranks]
else:
raise ValueError("Invalid feature ids are provided.")
def get_lineage_by_id(
self,
ids: Optional[AnyGenericIdentifier] = None,
missing_rank: bool = False,
desired_ranks: Union[bool, Sequence[str]] = False,
drop_ranks: Union[bool, Sequence[str]] = False,
**kwargs: Any
) -> pd.Series:
"""Get taxonomy lineages by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
missing_rank
If True will generate prefix like `s__` or `d__`
desired_ranks
List of desired ranks to generate.
If False then will generate all main ranks
drop_ranks
List of ranks to drop from desired ranks.
This parameter only useful if `missing_rank` is True
kwargs
Compatibility.
Returns
-------
class:`pandas.Series` with consensus lineages and corresponding IDs
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
tmp_desired_ranks = VALID_RANKS if desired_ranks is False else desired_ranks
total_valid_rids = self.xrid.isin(target_ids).sum()
if total_valid_rids == len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[target_ids],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
elif total_valid_rids < len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[np.unique(target_ids)],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
else:
raise ValueError("Invalid feature ids are provided.")
def find_features_by_pattern(
self, pattern_str: str, case_sensitive: bool = False, regex: bool = False
) -> np.ndarray:
"""Searches for features with taxa that matches `pattern_str`
Parameters
----------
pattern_str
Pattern to search for
case_sensitive
Case sensitive mode
regex
Use regular expressions
Returns
-------
class:`~numpy.ndarray` with indices
"""
return self.__internal_taxonomy[
self.__internal_taxonomy.loc[:, "lineage"].str.contains(
pattern_str, case=case_sensitive, regex=regex
)
].index.values
def drop_features_without_taxa(
self, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features that do not contain taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ids_to_drop = self.find_features_without_taxa()
return self._remove_features_by_id(ids_to_drop, **kwargs)
def drop_features_without_ranks(
self, ranks: Sequence[str], any: bool = False, **kwargs: Any
) -> Optional[AnyGenericIdentifier]: # Done
"""Remove features that do not contain `ranks`
Parameters
----------
ranks
Ranks to look for
any
If True removes feature with single occurrence of missing rank.
If False all `ranks` must be missing.
kwargs
Compatibility
"""
target_ranks = np.asarray(ranks)
if self.__internal_taxonomy.columns.isin(target_ranks).sum() == len(
target_ranks
):
no_rank_mask = self.__internal_taxonomy.loc[:, ranks].isna()
no_rank_mask_adjusted = (
no_rank_mask.any(axis=1) if any else no_rank_mask.all(axis=1)
)
ids_to_drop = self.__internal_taxonomy.loc[no_rank_mask_adjusted].index
return self._remove_features_by_id(ids_to_drop, **kwargs)
else:
raise ValueError("Invalid ranks are provided.")
def merge_duplicated_features(self, **kwargs: Any) -> Optional[Mapper]:
"""Merge features with duplicated taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ret = {}
groupby = self.__internal_taxonomy.groupby("lineage")
if any([len(group) > 1 for group in groupby.groups.values()]):
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groupby.groups)))
for lineage, feature_ids in groupby.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
return self._merge_features_by_map(ret, True, **kwargs)
# MASKED: merge_features_by_rank function (lines 425-458)
def find_features_without_taxa(self) -> np.ndarray:
"""Find features without taxa.
Returns
-------
class:`~numpy.ndarray` with feature indices.
"""
return self.__internal_taxonomy.loc[
self.__internal_taxonomy.loc[:, VALID_RANKS].agg(
lambda rank: len("".join(map(lambda x: (str(x or "")), rank))), axis=1
)
< 1
].index.values
def get_subset(
self, rids: Optional[AnyGenericIdentifier] = None, *args, **kwargs: Any
) -> "RepTaxonomy":
"""Get subset of the :class:`.RepTaxonomy`.
Parameters
----------
rids
Feature identifiers.
args
Compatibility
kwargs
Compatibility
Returns
-------
class:`.RepTaxonomy`
"""
if rids is None:
target_rids = self.xrid
else:
target_rids = np.asarray(rids).astype(self.__internal_taxonomy.index.dtype)
if not self.xrid.isin(target_rids).sum() == len(target_rids):
raise ValueError("Invalid feature ids are provided.")
return type(self)(
taxonomy=self.__internal_taxonomy.loc[target_rids, "lineage"],
metadata=self.metadata,
name=self.name,
)
def _export(
self, taxlike: str = "lineage", ascending: bool = True, **kwargs: Any
) -> Tuple[pd.Series, dict]:
"""Creates taxonomy for export.
Parameters
----------
taxlike
Generate taxonomy in format(currently only `lineage` is supported.)
ascending
Sorting
kwargs
Compatibility
"""
if taxlike == "lineage":
return (
self.get_lineage_by_id(**kwargs).sort_values(ascending=ascending),
kwargs,
)
else:
raise NotImplemented
def export(
self,
output_fp: str,
*args,
_add_ext: bool = False,
sep: str = ",",
**kwargs: Any
) -> None:
"""Exports the taxonomy into the specified file.
Parameters
----------
output_fp
Export filepath
args
Compatibility
_add_ext
Add file extension or not.
sep
Delimiter
kwargs
Compatibility
"""
tmp_export, rkwarg = self._export(*args, **kwargs)
if _add_ext:
tmp_export.to_csv("{}.csv".format(output_fp), sep=sep)
else:
tmp_export.to_csv(output_fp, sep=sep)
def copy(self) -> "RepTaxonomy":
"""Copy of the instance."""
return type(self)(
taxonomy=self.__internal_taxonomy.loc[:, "lineage"],
metadata=self.metadata,
name=self.name,
)
def __fix_taxon_names(self) -> None:
"""Fix invalid taxon names."""
def taxon_fixer(taxon):
if taxon is not None and pd.notna(taxon):
tmp_taxon_trimmed = taxon.lower().strip()
if len(tmp_taxon_trimmed) > 0:
if tmp_taxon_trimmed[0] == "[":
tmp_taxon_trimmed = tmp_taxon_trimmed[1:]
if tmp_taxon_trimmed[-1] == "]":
tmp_taxon_trimmed = tmp_taxon_trimmed[:-1]
return tmp_taxon_trimmed.capitalize()
else:
return None
else:
return None
self.__internal_taxonomy.loc[:, VALID_RANKS] = self.__internal_taxonomy.loc[
:, VALID_RANKS
].applymap(taxon_fixer)
def __reconstruct_internal_lineages(self) -> None:
"""Reconstruct the internal lineages."""
self.__internal_taxonomy.loc[:, "lineage"] = generate_lineages_from_taxa(
self.__internal_taxonomy, True, self.__avail_ranks, False
)
def __init_internal_taxonomy(
self,
taxonomy_data: Union[pd.Series, pd.DataFrame],
taxonomy_notation: Optional[str] = "greengenes",
order_ranks: Optional[Sequence[str]] = None,
**kwargs: Any
) -> None:
"""Main method to initialize taxonomy.
Parameters
----------
taxonomy_data
Incoming parsed taxonomy data
taxonomy_notation
Taxonomy lineage notation style. Can be one of
:const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None.
The 'silva' notation require `order_ranks`.
kwargs
Compatibility
"""
if isinstance(taxonomy_data, pd.Series):
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data, taxonomy_notation, order_ranks
)
elif isinstance(taxonomy_data, pd.DataFrame):
if taxonomy_data.shape[1] == 1:
taxonomy_data_series = pd.Series(
data=taxonomy_data.iloc[:, 0], index=taxonomy_data.index
)
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data_series, taxonomy_notation, order_ranks
)
else:
new_taxonomy = self.__init_taxonomy_from_frame(
taxonomy_data, taxonomy_notation, order_ranks
)
else:
raise RuntimeError(
"`taxonomy_data` must be either pd.Series or pd.Dataframe"
)
if new_taxonomy is None:
raise ValueError("Provided taxonomy is invalid.")
# Assign newly constructed taxonomy to the self.__internal_taxonomy
self.__internal_taxonomy = new_taxonomy
self.__fix_taxon_names() # Fix incorrect taxa
tmp_avail_ranks = [rank for rank in VALID_RANKS if rank in new_taxonomy.columns]
self.__avail_ranks = [
rank for rank in tmp_avail_ranks if new_taxonomy.loc[:, rank].notna().any()
]
# Reconstruct internal lineages for default greengenes notation
self.__reconstruct_internal_lineages()
self._init_state = True
def __init_taxonomy_from_lineages(
self,
taxonomy_series: pd.Series,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done
"""Main method that produces taxonomy dataframe from lineages.
Parameters
----------
taxonomy_series
:class:`pandas.Series` with taxonomy lineages
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
"""
# Check if taxonomy is known and is available for parsing. Otherwise indentify_taxon_notation() will try to identify notation
if taxonomy_notation in AVAIL_TAXONOMY_NOTATIONS:
notation = taxonomy_notation
else:
# Get first lineage _sample for notation testing assuming the rest have the the same notations
sample_taxon = taxonomy_series.iloc[0]
# Identify notation of the lineage string
notation = indentify_taxon_notation(sample_taxon)
if order_ranks is not None:
if all([rank in VALID_RANKS for rank in order_ranks]):
target_order_ranks = order_ranks
else:
raise NotImplementedError
else:
target_order_ranks = VALID_RANKS
if notation == "greengenes":
lineages = taxonomy_series.reset_index().values.tolist()
ordered_taxa_list = []
ordered_indices_list = [elem[0] for elem in lineages]
for lineage in lineages:
tmp_lineage = jRegexGG.findall(lineage[1])
tmp_taxa_dict = {
elem[0]: elem[1] for elem in tmp_lineage if elem[0] in VALID_RANKS
}
for rank in VALID_RANKS:
if rank not in tmp_taxa_dict.keys():
tmp_taxa_dict.update({rank: None})
tmp_taxa_ordered = [tmp_taxa_dict[rank] for rank in VALID_RANKS]
ordered_taxa_list.append([None] + tmp_taxa_ordered)
taxonomy = pd.DataFrame(
index=ordered_indices_list,
data=ordered_taxa_list,
columns=["lineage"] + VALID_RANKS,
)
return taxonomy
elif notation == "qiime":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = jRegexQIIME.findall(lineage[1])
tmp_lineage.sort(key=lambda x: x[0])
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank, taxon in tmp_lineage:
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, sorted(list(tmp_ranks))]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
elif notation == "silva":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = lineage[1].split(";")
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank_i, taxon in enumerate(tmp_lineage):
rank = target_order_ranks[rank_i]
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_rank_ordered = [
rank for rank in target_order_ranks if rank in VALID_RANKS
]
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, tmp_rank_ordered]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
else:
raise NotImplementedError
def __init_taxonomy_from_frame(
self,
taxonomy_dataframe: pd.DataFrame,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done # For now only pass to _init_taxonomy_from_series
"""Main method that produces taxonomy sheet from dataframe.
Parameters
----------
taxonomy_dataframe
:class:`~pandas.DataFrame` with taxa split by ranks.
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
Returns
-------
:class:`~pandas.DataFrame`
"""
valid_ranks = extract_valid_ranks(taxonomy_dataframe.columns, VALID_RANKS)
if valid_ranks is not None:
if len(valid_ranks) > 0:
return pd.concat(
[
taxonomy_dataframe,
pd.DataFrame(
data="",
index=taxonomy_dataframe.index,
columns=[
rank for rank in VALID_RANKS if rank not in valid_ranks
],
),
],
axis=1,
)
else:
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join(taxa.values.tolist()), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
else:
valid_ranks = cols2ranks(taxonomy_dataframe.columns)
taxonomy_dataframe.columns = valid_ranks
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join([(t if isinstance(t,str) else '') for t in taxa.values]), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
@property
def avail_ranks(self) -> Sequence[str]:
"""List of available taxonomic ranks."""
return self.__avail_ranks
@property
def duplicated(self) -> pd.Index:
"""List of duplicated feature indices."""
return self.__internal_taxonomy.index[
self.__internal_taxonomy["lineage"].duplicated(keep=False)
]
@property
def data(self) -> pd.DataFrame:
"""Actual data representation as pd.DataFrame."""
return self.__internal_taxonomy
@property
def xrid(self) -> pd.Index:
"""Feature indices as pd.Index."""
return self.__internal_taxonomy.index
|
def merge_features_by_rank(self, level: str, **kwargs: Any) -> Optional[Mapper]:
"""Merge features by taxonomic rank/level.
Parameters
----------
level
Taxonomic rank/level to use for merging.
kwargs
Compatibility
"""
ret = {}
if not isinstance(level, str):
raise TypeError("`rank` must have str type.")
if level in self.__avail_ranks:
target_ranks = get_rank_upto(self.avail_ranks, level, True)
if target_ranks:
tmp_lineages = generate_lineages_from_taxa(
self.__internal_taxonomy, False, target_ranks, False
)
groups = tmp_lineages.groupby(tmp_lineages)
if len(groups.groups) > 1:
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groups.groups)))
for lineage, feature_ids in groups.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
else:
raise ValueError("Invalid rank are provided.")
return self._merge_features_by_map(ret, True, **kwargs)
| 425 | 458 |
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from pmaf.biome.essentials._metakit import EssentialFeatureMetabase
from pmaf.biome.essentials._base import EssentialBackboneBase
from pmaf.internal._constants import (
AVAIL_TAXONOMY_NOTATIONS,
jRegexGG,
jRegexQIIME,
BIOM_TAXONOMY_NAMES,
VALID_RANKS,
)
from pmaf.internal._shared import (
generate_lineages_from_taxa,
get_rank_upto,
indentify_taxon_notation,
validate_ranks,
extract_valid_ranks,
cols2ranks,
)
from collections import defaultdict
from os import path
import pandas as pd
import numpy as np
import biom
from typing import Union, Sequence, Tuple, Any, Optional
from pmaf.internal._typing import AnyGenericIdentifier, Mapper
class RepTaxonomy(EssentialBackboneBase, EssentialFeatureMetabase):
"""An `essential` class for handling taxonomy data."""
def __init__(
self,
taxonomy: Union[pd.DataFrame, pd.Series, str],
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> None:
"""Constructor for :class:`.RepTaxonomy`
Parameters
----------
taxonomy
Data containing feature taxonomy
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to :func:`~pandas.read_csv` or :mod:`biome` loader.
"""
tmp_metadata = kwargs.pop("metadata", {})
self.__avail_ranks = []
self.__internal_taxonomy = None
if isinstance(taxonomy, pd.DataFrame):
if taxonomy.shape[0] > 0:
if taxonomy.shape[1] > 1:
if validate_ranks(list(taxonomy.columns.values), VALID_RANKS):
tmp_taxonomy = taxonomy
else:
raise ValueError(
"Provided `taxonomy` Datafame has invalid ranks."
)
else:
tmp_taxonomy = taxonomy.iloc[:, 0]
else:
raise ValueError("Provided `taxonomy` Datafame is invalid.")
elif isinstance(taxonomy, pd.Series):
if taxonomy.shape[0] > 0:
tmp_taxonomy = taxonomy
else:
raise ValueError("Provided `taxonomy` Series is invalid.")
elif isinstance(taxonomy, str):
if path.isfile(taxonomy):
file_extension = path.splitext(taxonomy)[-1].lower()
if file_extension in [".csv", ".tsv"]:
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).loc[:, taxonomy_columns]
elif file_extension in [".biom", ".biome"]:
tmp_taxonomy, new_metadata = self.__load_biom(taxonomy, **kwargs)
tmp_metadata.update({"biom": new_metadata})
else:
raise NotImplementedError("File type is not supported.")
else:
raise FileNotFoundError("Provided `taxonomy` file path is invalid.")
else:
raise TypeError("Provided `taxonomy` has invalid type.")
self.__init_internal_taxonomy(tmp_taxonomy, **kwargs)
super().__init__(metadata=tmp_metadata, **kwargs)
@classmethod
def from_csv(
cls,
filepath: str,
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from CSV file.
Parameters
----------
filepath
Path to .csv File
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to the constructor.
filepath:
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(filepath, **kwargs)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(filepath, **kwargs).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(filepath, **kwargs).loc[:, taxonomy_columns]
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"filepath": path.abspath(filepath)})
return cls(taxonomy=tmp_taxonomy, metadata=tmp_metadata, **kwargs)
@classmethod
def from_biom(cls, filepath: str, **kwargs: Any) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from :mod:`biom`
file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Passed to the constructor.
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
taxonomy_frame, new_metadata = cls.__load_biom(filepath, **kwargs)
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"biom": new_metadata})
return cls(taxonomy=taxonomy_frame, metadata=tmp_metadata, **kwargs)
@classmethod
def __load_biom(cls, filepath: str, **kwargs: Any) -> Tuple[pd.DataFrame, dict]:
"""Actual private method to process :mod:`biom` file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Compatibility
"""
biom_file = biom.load_table(filepath)
if biom_file.metadata(axis="observation") is not None:
obs_data = biom_file.metadata_to_dataframe("observation")
col_names = list(obs_data.columns.values)
col_names_low = [col.lower() for col in col_names]
avail_col_names = [
colname
for tax_name in BIOM_TAXONOMY_NAMES
for colname in col_names_low
if colname[::-1].find(tax_name[::-1]) < 3
and colname[::-1].find(tax_name[::-1]) > -1
]
metadata_cols = [
col for col in col_names if col.lower() not in avail_col_names
]
if len(avail_col_names) == 1:
tmp_col_index = col_names_low.index(avail_col_names[0])
taxonomy_frame = obs_data[col_names[tmp_col_index]]
else:
taxonomy_frame = obs_data
tmp_metadata = obs_data.loc[:, metadata_cols].to_dict()
return taxonomy_frame, tmp_metadata
else:
raise ValueError("Biom file does not contain observation metadata.")
def _remove_features_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by features ids and ratify action.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
tmp_ids = np.asarray(ids, dtype=self.__internal_taxonomy.index.dtype)
if len(tmp_ids) > 0:
self.__internal_taxonomy.drop(tmp_ids, inplace=True)
return self._ratify_action("_remove_features_by_id", ids, **kwargs)
def _merge_features_by_map(
self, map_dict: Mapper, done: bool = False, **kwargs: Any
) -> Optional[Mapper]:
"""Merge features and ratify action.
Parameters
----------
map_dict
Map to use for merging
done
Whether merging was completed or not. Compatibility.
kwargs
Compatibility
"""
if not done:
raise NotImplementedError
if map_dict:
return self._ratify_action(
"_merge_features_by_map",
map_dict,
_annotations=self.__internal_taxonomy.loc[:, "lineage"].to_dict(),
**kwargs
)
def drop_feature_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by feature `ids`.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() == len(target_ids):
return self._remove_features_by_id(target_ids, **kwargs)
else:
raise ValueError("Invalid feature ids are provided.")
def get_taxonomy_by_id(
self, ids: Optional[AnyGenericIdentifier] = None
) -> pd.DataFrame:
"""Get taxonomy :class:`~pandas.DataFrame` by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
Returns
-------
class:`pandas.DataFrame` with taxonomy data
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() <= len(target_ids):
return self.__internal_taxonomy.loc[target_ids, self.__avail_ranks]
else:
raise ValueError("Invalid feature ids are provided.")
def get_lineage_by_id(
self,
ids: Optional[AnyGenericIdentifier] = None,
missing_rank: bool = False,
desired_ranks: Union[bool, Sequence[str]] = False,
drop_ranks: Union[bool, Sequence[str]] = False,
**kwargs: Any
) -> pd.Series:
"""Get taxonomy lineages by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
missing_rank
If True will generate prefix like `s__` or `d__`
desired_ranks
List of desired ranks to generate.
If False then will generate all main ranks
drop_ranks
List of ranks to drop from desired ranks.
This parameter only useful if `missing_rank` is True
kwargs
Compatibility.
Returns
-------
class:`pandas.Series` with consensus lineages and corresponding IDs
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
tmp_desired_ranks = VALID_RANKS if desired_ranks is False else desired_ranks
total_valid_rids = self.xrid.isin(target_ids).sum()
if total_valid_rids == len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[target_ids],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
elif total_valid_rids < len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[np.unique(target_ids)],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
else:
raise ValueError("Invalid feature ids are provided.")
def find_features_by_pattern(
self, pattern_str: str, case_sensitive: bool = False, regex: bool = False
) -> np.ndarray:
"""Searches for features with taxa that matches `pattern_str`
Parameters
----------
pattern_str
Pattern to search for
case_sensitive
Case sensitive mode
regex
Use regular expressions
Returns
-------
class:`~numpy.ndarray` with indices
"""
return self.__internal_taxonomy[
self.__internal_taxonomy.loc[:, "lineage"].str.contains(
pattern_str, case=case_sensitive, regex=regex
)
].index.values
def drop_features_without_taxa(
self, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features that do not contain taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ids_to_drop = self.find_features_without_taxa()
return self._remove_features_by_id(ids_to_drop, **kwargs)
def drop_features_without_ranks(
self, ranks: Sequence[str], any: bool = False, **kwargs: Any
) -> Optional[AnyGenericIdentifier]: # Done
"""Remove features that do not contain `ranks`
Parameters
----------
ranks
Ranks to look for
any
If True removes feature with single occurrence of missing rank.
If False all `ranks` must be missing.
kwargs
Compatibility
"""
target_ranks = np.asarray(ranks)
if self.__internal_taxonomy.columns.isin(target_ranks).sum() == len(
target_ranks
):
no_rank_mask = self.__internal_taxonomy.loc[:, ranks].isna()
no_rank_mask_adjusted = (
no_rank_mask.any(axis=1) if any else no_rank_mask.all(axis=1)
)
ids_to_drop = self.__internal_taxonomy.loc[no_rank_mask_adjusted].index
return self._remove_features_by_id(ids_to_drop, **kwargs)
else:
raise ValueError("Invalid ranks are provided.")
def merge_duplicated_features(self, **kwargs: Any) -> Optional[Mapper]:
"""Merge features with duplicated taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ret = {}
groupby = self.__internal_taxonomy.groupby("lineage")
if any([len(group) > 1 for group in groupby.groups.values()]):
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groupby.groups)))
for lineage, feature_ids in groupby.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
return self._merge_features_by_map(ret, True, **kwargs)
def merge_features_by_rank(self, level: str, **kwargs: Any) -> Optional[Mapper]:
"""Merge features by taxonomic rank/level.
Parameters
----------
level
Taxonomic rank/level to use for merging.
kwargs
Compatibility
"""
ret = {}
if not isinstance(level, str):
raise TypeError("`rank` must have str type.")
if level in self.__avail_ranks:
target_ranks = get_rank_upto(self.avail_ranks, level, True)
if target_ranks:
tmp_lineages = generate_lineages_from_taxa(
self.__internal_taxonomy, False, target_ranks, False
)
groups = tmp_lineages.groupby(tmp_lineages)
if len(groups.groups) > 1:
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groups.groups)))
for lineage, feature_ids in groups.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
else:
raise ValueError("Invalid rank are provided.")
return self._merge_features_by_map(ret, True, **kwargs)
def find_features_without_taxa(self) -> np.ndarray:
"""Find features without taxa.
Returns
-------
class:`~numpy.ndarray` with feature indices.
"""
return self.__internal_taxonomy.loc[
self.__internal_taxonomy.loc[:, VALID_RANKS].agg(
lambda rank: len("".join(map(lambda x: (str(x or "")), rank))), axis=1
)
< 1
].index.values
def get_subset(
self, rids: Optional[AnyGenericIdentifier] = None, *args, **kwargs: Any
) -> "RepTaxonomy":
"""Get subset of the :class:`.RepTaxonomy`.
Parameters
----------
rids
Feature identifiers.
args
Compatibility
kwargs
Compatibility
Returns
-------
class:`.RepTaxonomy`
"""
if rids is None:
target_rids = self.xrid
else:
target_rids = np.asarray(rids).astype(self.__internal_taxonomy.index.dtype)
if not self.xrid.isin(target_rids).sum() == len(target_rids):
raise ValueError("Invalid feature ids are provided.")
return type(self)(
taxonomy=self.__internal_taxonomy.loc[target_rids, "lineage"],
metadata=self.metadata,
name=self.name,
)
def _export(
self, taxlike: str = "lineage", ascending: bool = True, **kwargs: Any
) -> Tuple[pd.Series, dict]:
"""Creates taxonomy for export.
Parameters
----------
taxlike
Generate taxonomy in format(currently only `lineage` is supported.)
ascending
Sorting
kwargs
Compatibility
"""
if taxlike == "lineage":
return (
self.get_lineage_by_id(**kwargs).sort_values(ascending=ascending),
kwargs,
)
else:
raise NotImplemented
def export(
self,
output_fp: str,
*args,
_add_ext: bool = False,
sep: str = ",",
**kwargs: Any
) -> None:
"""Exports the taxonomy into the specified file.
Parameters
----------
output_fp
Export filepath
args
Compatibility
_add_ext
Add file extension or not.
sep
Delimiter
kwargs
Compatibility
"""
tmp_export, rkwarg = self._export(*args, **kwargs)
if _add_ext:
tmp_export.to_csv("{}.csv".format(output_fp), sep=sep)
else:
tmp_export.to_csv(output_fp, sep=sep)
def copy(self) -> "RepTaxonomy":
"""Copy of the instance."""
return type(self)(
taxonomy=self.__internal_taxonomy.loc[:, "lineage"],
metadata=self.metadata,
name=self.name,
)
def __fix_taxon_names(self) -> None:
"""Fix invalid taxon names."""
def taxon_fixer(taxon):
if taxon is not None and pd.notna(taxon):
tmp_taxon_trimmed = taxon.lower().strip()
if len(tmp_taxon_trimmed) > 0:
if tmp_taxon_trimmed[0] == "[":
tmp_taxon_trimmed = tmp_taxon_trimmed[1:]
if tmp_taxon_trimmed[-1] == "]":
tmp_taxon_trimmed = tmp_taxon_trimmed[:-1]
return tmp_taxon_trimmed.capitalize()
else:
return None
else:
return None
self.__internal_taxonomy.loc[:, VALID_RANKS] = self.__internal_taxonomy.loc[
:, VALID_RANKS
].applymap(taxon_fixer)
def __reconstruct_internal_lineages(self) -> None:
"""Reconstruct the internal lineages."""
self.__internal_taxonomy.loc[:, "lineage"] = generate_lineages_from_taxa(
self.__internal_taxonomy, True, self.__avail_ranks, False
)
def __init_internal_taxonomy(
self,
taxonomy_data: Union[pd.Series, pd.DataFrame],
taxonomy_notation: Optional[str] = "greengenes",
order_ranks: Optional[Sequence[str]] = None,
**kwargs: Any
) -> None:
"""Main method to initialize taxonomy.
Parameters
----------
taxonomy_data
Incoming parsed taxonomy data
taxonomy_notation
Taxonomy lineage notation style. Can be one of
:const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None.
The 'silva' notation require `order_ranks`.
kwargs
Compatibility
"""
if isinstance(taxonomy_data, pd.Series):
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data, taxonomy_notation, order_ranks
)
elif isinstance(taxonomy_data, pd.DataFrame):
if taxonomy_data.shape[1] == 1:
taxonomy_data_series = pd.Series(
data=taxonomy_data.iloc[:, 0], index=taxonomy_data.index
)
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data_series, taxonomy_notation, order_ranks
)
else:
new_taxonomy = self.__init_taxonomy_from_frame(
taxonomy_data, taxonomy_notation, order_ranks
)
else:
raise RuntimeError(
"`taxonomy_data` must be either pd.Series or pd.Dataframe"
)
if new_taxonomy is None:
raise ValueError("Provided taxonomy is invalid.")
# Assign newly constructed taxonomy to the self.__internal_taxonomy
self.__internal_taxonomy = new_taxonomy
self.__fix_taxon_names() # Fix incorrect taxa
tmp_avail_ranks = [rank for rank in VALID_RANKS if rank in new_taxonomy.columns]
self.__avail_ranks = [
rank for rank in tmp_avail_ranks if new_taxonomy.loc[:, rank].notna().any()
]
# Reconstruct internal lineages for default greengenes notation
self.__reconstruct_internal_lineages()
self._init_state = True
def __init_taxonomy_from_lineages(
self,
taxonomy_series: pd.Series,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done
"""Main method that produces taxonomy dataframe from lineages.
Parameters
----------
taxonomy_series
:class:`pandas.Series` with taxonomy lineages
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
"""
# Check if taxonomy is known and is available for parsing. Otherwise indentify_taxon_notation() will try to identify notation
if taxonomy_notation in AVAIL_TAXONOMY_NOTATIONS:
notation = taxonomy_notation
else:
# Get first lineage _sample for notation testing assuming the rest have the the same notations
sample_taxon = taxonomy_series.iloc[0]
# Identify notation of the lineage string
notation = indentify_taxon_notation(sample_taxon)
if order_ranks is not None:
if all([rank in VALID_RANKS for rank in order_ranks]):
target_order_ranks = order_ranks
else:
raise NotImplementedError
else:
target_order_ranks = VALID_RANKS
if notation == "greengenes":
lineages = taxonomy_series.reset_index().values.tolist()
ordered_taxa_list = []
ordered_indices_list = [elem[0] for elem in lineages]
for lineage in lineages:
tmp_lineage = jRegexGG.findall(lineage[1])
tmp_taxa_dict = {
elem[0]: elem[1] for elem in tmp_lineage if elem[0] in VALID_RANKS
}
for rank in VALID_RANKS:
if rank not in tmp_taxa_dict.keys():
tmp_taxa_dict.update({rank: None})
tmp_taxa_ordered = [tmp_taxa_dict[rank] for rank in VALID_RANKS]
ordered_taxa_list.append([None] + tmp_taxa_ordered)
taxonomy = pd.DataFrame(
index=ordered_indices_list,
data=ordered_taxa_list,
columns=["lineage"] + VALID_RANKS,
)
return taxonomy
elif notation == "qiime":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = jRegexQIIME.findall(lineage[1])
tmp_lineage.sort(key=lambda x: x[0])
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank, taxon in tmp_lineage:
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, sorted(list(tmp_ranks))]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
elif notation == "silva":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = lineage[1].split(";")
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank_i, taxon in enumerate(tmp_lineage):
rank = target_order_ranks[rank_i]
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_rank_ordered = [
rank for rank in target_order_ranks if rank in VALID_RANKS
]
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, tmp_rank_ordered]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
else:
raise NotImplementedError
def __init_taxonomy_from_frame(
self,
taxonomy_dataframe: pd.DataFrame,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done # For now only pass to _init_taxonomy_from_series
"""Main method that produces taxonomy sheet from dataframe.
Parameters
----------
taxonomy_dataframe
:class:`~pandas.DataFrame` with taxa split by ranks.
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
Returns
-------
:class:`~pandas.DataFrame`
"""
valid_ranks = extract_valid_ranks(taxonomy_dataframe.columns, VALID_RANKS)
if valid_ranks is not None:
if len(valid_ranks) > 0:
return pd.concat(
[
taxonomy_dataframe,
pd.DataFrame(
data="",
index=taxonomy_dataframe.index,
columns=[
rank for rank in VALID_RANKS if rank not in valid_ranks
],
),
],
axis=1,
)
else:
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join(taxa.values.tolist()), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
else:
valid_ranks = cols2ranks(taxonomy_dataframe.columns)
taxonomy_dataframe.columns = valid_ranks
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join([(t if isinstance(t,str) else '') for t in taxa.values]), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
@property
def avail_ranks(self) -> Sequence[str]:
"""List of available taxonomic ranks."""
return self.__avail_ranks
@property
def duplicated(self) -> pd.Index:
"""List of duplicated feature indices."""
return self.__internal_taxonomy.index[
self.__internal_taxonomy["lineage"].duplicated(keep=False)
]
@property
def data(self) -> pd.DataFrame:
"""Actual data representation as pd.DataFrame."""
return self.__internal_taxonomy
@property
def xrid(self) -> pd.Index:
"""Feature indices as pd.Index."""
return self.__internal_taxonomy.index
|
get_subset
|
Get subset of the :class:`.RepTaxonomy`.
Parameters
----------
rids
Feature identifiers.
args
Compatibility
kwargs
Compatibility
Returns
-------
class:`.RepTaxonomy`
|
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from pmaf.biome.essentials._metakit import EssentialFeatureMetabase
from pmaf.biome.essentials._base import EssentialBackboneBase
from pmaf.internal._constants import (
AVAIL_TAXONOMY_NOTATIONS,
jRegexGG,
jRegexQIIME,
BIOM_TAXONOMY_NAMES,
VALID_RANKS,
)
from pmaf.internal._shared import (
generate_lineages_from_taxa,
get_rank_upto,
indentify_taxon_notation,
validate_ranks,
extract_valid_ranks,
cols2ranks,
)
from collections import defaultdict
from os import path
import pandas as pd
import numpy as np
import biom
from typing import Union, Sequence, Tuple, Any, Optional
from pmaf.internal._typing import AnyGenericIdentifier, Mapper
class RepTaxonomy(EssentialBackboneBase, EssentialFeatureMetabase):
"""An `essential` class for handling taxonomy data."""
def __init__(
self,
taxonomy: Union[pd.DataFrame, pd.Series, str],
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> None:
"""Constructor for :class:`.RepTaxonomy`
Parameters
----------
taxonomy
Data containing feature taxonomy
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to :func:`~pandas.read_csv` or :mod:`biome` loader.
"""
tmp_metadata = kwargs.pop("metadata", {})
self.__avail_ranks = []
self.__internal_taxonomy = None
if isinstance(taxonomy, pd.DataFrame):
if taxonomy.shape[0] > 0:
if taxonomy.shape[1] > 1:
if validate_ranks(list(taxonomy.columns.values), VALID_RANKS):
tmp_taxonomy = taxonomy
else:
raise ValueError(
"Provided `taxonomy` Datafame has invalid ranks."
)
else:
tmp_taxonomy = taxonomy.iloc[:, 0]
else:
raise ValueError("Provided `taxonomy` Datafame is invalid.")
elif isinstance(taxonomy, pd.Series):
if taxonomy.shape[0] > 0:
tmp_taxonomy = taxonomy
else:
raise ValueError("Provided `taxonomy` Series is invalid.")
elif isinstance(taxonomy, str):
if path.isfile(taxonomy):
file_extension = path.splitext(taxonomy)[-1].lower()
if file_extension in [".csv", ".tsv"]:
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).loc[:, taxonomy_columns]
elif file_extension in [".biom", ".biome"]:
tmp_taxonomy, new_metadata = self.__load_biom(taxonomy, **kwargs)
tmp_metadata.update({"biom": new_metadata})
else:
raise NotImplementedError("File type is not supported.")
else:
raise FileNotFoundError("Provided `taxonomy` file path is invalid.")
else:
raise TypeError("Provided `taxonomy` has invalid type.")
self.__init_internal_taxonomy(tmp_taxonomy, **kwargs)
super().__init__(metadata=tmp_metadata, **kwargs)
@classmethod
def from_csv(
cls,
filepath: str,
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from CSV file.
Parameters
----------
filepath
Path to .csv File
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to the constructor.
filepath:
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(filepath, **kwargs)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(filepath, **kwargs).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(filepath, **kwargs).loc[:, taxonomy_columns]
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"filepath": path.abspath(filepath)})
return cls(taxonomy=tmp_taxonomy, metadata=tmp_metadata, **kwargs)
@classmethod
def from_biom(cls, filepath: str, **kwargs: Any) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from :mod:`biom`
file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Passed to the constructor.
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
taxonomy_frame, new_metadata = cls.__load_biom(filepath, **kwargs)
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"biom": new_metadata})
return cls(taxonomy=taxonomy_frame, metadata=tmp_metadata, **kwargs)
@classmethod
def __load_biom(cls, filepath: str, **kwargs: Any) -> Tuple[pd.DataFrame, dict]:
"""Actual private method to process :mod:`biom` file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Compatibility
"""
biom_file = biom.load_table(filepath)
if biom_file.metadata(axis="observation") is not None:
obs_data = biom_file.metadata_to_dataframe("observation")
col_names = list(obs_data.columns.values)
col_names_low = [col.lower() for col in col_names]
avail_col_names = [
colname
for tax_name in BIOM_TAXONOMY_NAMES
for colname in col_names_low
if colname[::-1].find(tax_name[::-1]) < 3
and colname[::-1].find(tax_name[::-1]) > -1
]
metadata_cols = [
col for col in col_names if col.lower() not in avail_col_names
]
if len(avail_col_names) == 1:
tmp_col_index = col_names_low.index(avail_col_names[0])
taxonomy_frame = obs_data[col_names[tmp_col_index]]
else:
taxonomy_frame = obs_data
tmp_metadata = obs_data.loc[:, metadata_cols].to_dict()
return taxonomy_frame, tmp_metadata
else:
raise ValueError("Biom file does not contain observation metadata.")
def _remove_features_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by features ids and ratify action.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
tmp_ids = np.asarray(ids, dtype=self.__internal_taxonomy.index.dtype)
if len(tmp_ids) > 0:
self.__internal_taxonomy.drop(tmp_ids, inplace=True)
return self._ratify_action("_remove_features_by_id", ids, **kwargs)
def _merge_features_by_map(
self, map_dict: Mapper, done: bool = False, **kwargs: Any
) -> Optional[Mapper]:
"""Merge features and ratify action.
Parameters
----------
map_dict
Map to use for merging
done
Whether merging was completed or not. Compatibility.
kwargs
Compatibility
"""
if not done:
raise NotImplementedError
if map_dict:
return self._ratify_action(
"_merge_features_by_map",
map_dict,
_annotations=self.__internal_taxonomy.loc[:, "lineage"].to_dict(),
**kwargs
)
def drop_feature_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by feature `ids`.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() == len(target_ids):
return self._remove_features_by_id(target_ids, **kwargs)
else:
raise ValueError("Invalid feature ids are provided.")
def get_taxonomy_by_id(
self, ids: Optional[AnyGenericIdentifier] = None
) -> pd.DataFrame:
"""Get taxonomy :class:`~pandas.DataFrame` by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
Returns
-------
class:`pandas.DataFrame` with taxonomy data
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() <= len(target_ids):
return self.__internal_taxonomy.loc[target_ids, self.__avail_ranks]
else:
raise ValueError("Invalid feature ids are provided.")
def get_lineage_by_id(
self,
ids: Optional[AnyGenericIdentifier] = None,
missing_rank: bool = False,
desired_ranks: Union[bool, Sequence[str]] = False,
drop_ranks: Union[bool, Sequence[str]] = False,
**kwargs: Any
) -> pd.Series:
"""Get taxonomy lineages by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
missing_rank
If True will generate prefix like `s__` or `d__`
desired_ranks
List of desired ranks to generate.
If False then will generate all main ranks
drop_ranks
List of ranks to drop from desired ranks.
This parameter only useful if `missing_rank` is True
kwargs
Compatibility.
Returns
-------
class:`pandas.Series` with consensus lineages and corresponding IDs
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
tmp_desired_ranks = VALID_RANKS if desired_ranks is False else desired_ranks
total_valid_rids = self.xrid.isin(target_ids).sum()
if total_valid_rids == len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[target_ids],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
elif total_valid_rids < len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[np.unique(target_ids)],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
else:
raise ValueError("Invalid feature ids are provided.")
def find_features_by_pattern(
self, pattern_str: str, case_sensitive: bool = False, regex: bool = False
) -> np.ndarray:
"""Searches for features with taxa that matches `pattern_str`
Parameters
----------
pattern_str
Pattern to search for
case_sensitive
Case sensitive mode
regex
Use regular expressions
Returns
-------
class:`~numpy.ndarray` with indices
"""
return self.__internal_taxonomy[
self.__internal_taxonomy.loc[:, "lineage"].str.contains(
pattern_str, case=case_sensitive, regex=regex
)
].index.values
def drop_features_without_taxa(
self, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features that do not contain taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ids_to_drop = self.find_features_without_taxa()
return self._remove_features_by_id(ids_to_drop, **kwargs)
def drop_features_without_ranks(
self, ranks: Sequence[str], any: bool = False, **kwargs: Any
) -> Optional[AnyGenericIdentifier]: # Done
"""Remove features that do not contain `ranks`
Parameters
----------
ranks
Ranks to look for
any
If True removes feature with single occurrence of missing rank.
If False all `ranks` must be missing.
kwargs
Compatibility
"""
target_ranks = np.asarray(ranks)
if self.__internal_taxonomy.columns.isin(target_ranks).sum() == len(
target_ranks
):
no_rank_mask = self.__internal_taxonomy.loc[:, ranks].isna()
no_rank_mask_adjusted = (
no_rank_mask.any(axis=1) if any else no_rank_mask.all(axis=1)
)
ids_to_drop = self.__internal_taxonomy.loc[no_rank_mask_adjusted].index
return self._remove_features_by_id(ids_to_drop, **kwargs)
else:
raise ValueError("Invalid ranks are provided.")
def merge_duplicated_features(self, **kwargs: Any) -> Optional[Mapper]:
"""Merge features with duplicated taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ret = {}
groupby = self.__internal_taxonomy.groupby("lineage")
if any([len(group) > 1 for group in groupby.groups.values()]):
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groupby.groups)))
for lineage, feature_ids in groupby.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
return self._merge_features_by_map(ret, True, **kwargs)
def merge_features_by_rank(self, level: str, **kwargs: Any) -> Optional[Mapper]:
"""Merge features by taxonomic rank/level.
Parameters
----------
level
Taxonomic rank/level to use for merging.
kwargs
Compatibility
"""
ret = {}
if not isinstance(level, str):
raise TypeError("`rank` must have str type.")
if level in self.__avail_ranks:
target_ranks = get_rank_upto(self.avail_ranks, level, True)
if target_ranks:
tmp_lineages = generate_lineages_from_taxa(
self.__internal_taxonomy, False, target_ranks, False
)
groups = tmp_lineages.groupby(tmp_lineages)
if len(groups.groups) > 1:
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groups.groups)))
for lineage, feature_ids in groups.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
else:
raise ValueError("Invalid rank are provided.")
return self._merge_features_by_map(ret, True, **kwargs)
def find_features_without_taxa(self) -> np.ndarray:
"""Find features without taxa.
Returns
-------
class:`~numpy.ndarray` with feature indices.
"""
return self.__internal_taxonomy.loc[
self.__internal_taxonomy.loc[:, VALID_RANKS].agg(
lambda rank: len("".join(map(lambda x: (str(x or "")), rank))), axis=1
)
< 1
].index.values
# MASKED: get_subset function (lines 474-502)
def _export(
self, taxlike: str = "lineage", ascending: bool = True, **kwargs: Any
) -> Tuple[pd.Series, dict]:
"""Creates taxonomy for export.
Parameters
----------
taxlike
Generate taxonomy in format(currently only `lineage` is supported.)
ascending
Sorting
kwargs
Compatibility
"""
if taxlike == "lineage":
return (
self.get_lineage_by_id(**kwargs).sort_values(ascending=ascending),
kwargs,
)
else:
raise NotImplemented
def export(
self,
output_fp: str,
*args,
_add_ext: bool = False,
sep: str = ",",
**kwargs: Any
) -> None:
"""Exports the taxonomy into the specified file.
Parameters
----------
output_fp
Export filepath
args
Compatibility
_add_ext
Add file extension or not.
sep
Delimiter
kwargs
Compatibility
"""
tmp_export, rkwarg = self._export(*args, **kwargs)
if _add_ext:
tmp_export.to_csv("{}.csv".format(output_fp), sep=sep)
else:
tmp_export.to_csv(output_fp, sep=sep)
def copy(self) -> "RepTaxonomy":
"""Copy of the instance."""
return type(self)(
taxonomy=self.__internal_taxonomy.loc[:, "lineage"],
metadata=self.metadata,
name=self.name,
)
def __fix_taxon_names(self) -> None:
"""Fix invalid taxon names."""
def taxon_fixer(taxon):
if taxon is not None and pd.notna(taxon):
tmp_taxon_trimmed = taxon.lower().strip()
if len(tmp_taxon_trimmed) > 0:
if tmp_taxon_trimmed[0] == "[":
tmp_taxon_trimmed = tmp_taxon_trimmed[1:]
if tmp_taxon_trimmed[-1] == "]":
tmp_taxon_trimmed = tmp_taxon_trimmed[:-1]
return tmp_taxon_trimmed.capitalize()
else:
return None
else:
return None
self.__internal_taxonomy.loc[:, VALID_RANKS] = self.__internal_taxonomy.loc[
:, VALID_RANKS
].applymap(taxon_fixer)
def __reconstruct_internal_lineages(self) -> None:
"""Reconstruct the internal lineages."""
self.__internal_taxonomy.loc[:, "lineage"] = generate_lineages_from_taxa(
self.__internal_taxonomy, True, self.__avail_ranks, False
)
def __init_internal_taxonomy(
self,
taxonomy_data: Union[pd.Series, pd.DataFrame],
taxonomy_notation: Optional[str] = "greengenes",
order_ranks: Optional[Sequence[str]] = None,
**kwargs: Any
) -> None:
"""Main method to initialize taxonomy.
Parameters
----------
taxonomy_data
Incoming parsed taxonomy data
taxonomy_notation
Taxonomy lineage notation style. Can be one of
:const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None.
The 'silva' notation require `order_ranks`.
kwargs
Compatibility
"""
if isinstance(taxonomy_data, pd.Series):
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data, taxonomy_notation, order_ranks
)
elif isinstance(taxonomy_data, pd.DataFrame):
if taxonomy_data.shape[1] == 1:
taxonomy_data_series = pd.Series(
data=taxonomy_data.iloc[:, 0], index=taxonomy_data.index
)
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data_series, taxonomy_notation, order_ranks
)
else:
new_taxonomy = self.__init_taxonomy_from_frame(
taxonomy_data, taxonomy_notation, order_ranks
)
else:
raise RuntimeError(
"`taxonomy_data` must be either pd.Series or pd.Dataframe"
)
if new_taxonomy is None:
raise ValueError("Provided taxonomy is invalid.")
# Assign newly constructed taxonomy to the self.__internal_taxonomy
self.__internal_taxonomy = new_taxonomy
self.__fix_taxon_names() # Fix incorrect taxa
tmp_avail_ranks = [rank for rank in VALID_RANKS if rank in new_taxonomy.columns]
self.__avail_ranks = [
rank for rank in tmp_avail_ranks if new_taxonomy.loc[:, rank].notna().any()
]
# Reconstruct internal lineages for default greengenes notation
self.__reconstruct_internal_lineages()
self._init_state = True
def __init_taxonomy_from_lineages(
self,
taxonomy_series: pd.Series,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done
"""Main method that produces taxonomy dataframe from lineages.
Parameters
----------
taxonomy_series
:class:`pandas.Series` with taxonomy lineages
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
"""
# Check if taxonomy is known and is available for parsing. Otherwise indentify_taxon_notation() will try to identify notation
if taxonomy_notation in AVAIL_TAXONOMY_NOTATIONS:
notation = taxonomy_notation
else:
# Get first lineage _sample for notation testing assuming the rest have the the same notations
sample_taxon = taxonomy_series.iloc[0]
# Identify notation of the lineage string
notation = indentify_taxon_notation(sample_taxon)
if order_ranks is not None:
if all([rank in VALID_RANKS for rank in order_ranks]):
target_order_ranks = order_ranks
else:
raise NotImplementedError
else:
target_order_ranks = VALID_RANKS
if notation == "greengenes":
lineages = taxonomy_series.reset_index().values.tolist()
ordered_taxa_list = []
ordered_indices_list = [elem[0] for elem in lineages]
for lineage in lineages:
tmp_lineage = jRegexGG.findall(lineage[1])
tmp_taxa_dict = {
elem[0]: elem[1] for elem in tmp_lineage if elem[0] in VALID_RANKS
}
for rank in VALID_RANKS:
if rank not in tmp_taxa_dict.keys():
tmp_taxa_dict.update({rank: None})
tmp_taxa_ordered = [tmp_taxa_dict[rank] for rank in VALID_RANKS]
ordered_taxa_list.append([None] + tmp_taxa_ordered)
taxonomy = pd.DataFrame(
index=ordered_indices_list,
data=ordered_taxa_list,
columns=["lineage"] + VALID_RANKS,
)
return taxonomy
elif notation == "qiime":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = jRegexQIIME.findall(lineage[1])
tmp_lineage.sort(key=lambda x: x[0])
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank, taxon in tmp_lineage:
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, sorted(list(tmp_ranks))]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
elif notation == "silva":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = lineage[1].split(";")
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank_i, taxon in enumerate(tmp_lineage):
rank = target_order_ranks[rank_i]
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_rank_ordered = [
rank for rank in target_order_ranks if rank in VALID_RANKS
]
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, tmp_rank_ordered]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
else:
raise NotImplementedError
def __init_taxonomy_from_frame(
self,
taxonomy_dataframe: pd.DataFrame,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done # For now only pass to _init_taxonomy_from_series
"""Main method that produces taxonomy sheet from dataframe.
Parameters
----------
taxonomy_dataframe
:class:`~pandas.DataFrame` with taxa split by ranks.
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
Returns
-------
:class:`~pandas.DataFrame`
"""
valid_ranks = extract_valid_ranks(taxonomy_dataframe.columns, VALID_RANKS)
if valid_ranks is not None:
if len(valid_ranks) > 0:
return pd.concat(
[
taxonomy_dataframe,
pd.DataFrame(
data="",
index=taxonomy_dataframe.index,
columns=[
rank for rank in VALID_RANKS if rank not in valid_ranks
],
),
],
axis=1,
)
else:
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join(taxa.values.tolist()), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
else:
valid_ranks = cols2ranks(taxonomy_dataframe.columns)
taxonomy_dataframe.columns = valid_ranks
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join([(t if isinstance(t,str) else '') for t in taxa.values]), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
@property
def avail_ranks(self) -> Sequence[str]:
"""List of available taxonomic ranks."""
return self.__avail_ranks
@property
def duplicated(self) -> pd.Index:
"""List of duplicated feature indices."""
return self.__internal_taxonomy.index[
self.__internal_taxonomy["lineage"].duplicated(keep=False)
]
@property
def data(self) -> pd.DataFrame:
"""Actual data representation as pd.DataFrame."""
return self.__internal_taxonomy
@property
def xrid(self) -> pd.Index:
"""Feature indices as pd.Index."""
return self.__internal_taxonomy.index
|
def get_subset(
self, rids: Optional[AnyGenericIdentifier] = None, *args, **kwargs: Any
) -> "RepTaxonomy":
"""Get subset of the :class:`.RepTaxonomy`.
Parameters
----------
rids
Feature identifiers.
args
Compatibility
kwargs
Compatibility
Returns
-------
class:`.RepTaxonomy`
"""
if rids is None:
target_rids = self.xrid
else:
target_rids = np.asarray(rids).astype(self.__internal_taxonomy.index.dtype)
if not self.xrid.isin(target_rids).sum() == len(target_rids):
raise ValueError("Invalid feature ids are provided.")
return type(self)(
taxonomy=self.__internal_taxonomy.loc[target_rids, "lineage"],
metadata=self.metadata,
name=self.name,
)
| 474 | 502 |
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from pmaf.biome.essentials._metakit import EssentialFeatureMetabase
from pmaf.biome.essentials._base import EssentialBackboneBase
from pmaf.internal._constants import (
AVAIL_TAXONOMY_NOTATIONS,
jRegexGG,
jRegexQIIME,
BIOM_TAXONOMY_NAMES,
VALID_RANKS,
)
from pmaf.internal._shared import (
generate_lineages_from_taxa,
get_rank_upto,
indentify_taxon_notation,
validate_ranks,
extract_valid_ranks,
cols2ranks,
)
from collections import defaultdict
from os import path
import pandas as pd
import numpy as np
import biom
from typing import Union, Sequence, Tuple, Any, Optional
from pmaf.internal._typing import AnyGenericIdentifier, Mapper
class RepTaxonomy(EssentialBackboneBase, EssentialFeatureMetabase):
"""An `essential` class for handling taxonomy data."""
def __init__(
self,
taxonomy: Union[pd.DataFrame, pd.Series, str],
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> None:
"""Constructor for :class:`.RepTaxonomy`
Parameters
----------
taxonomy
Data containing feature taxonomy
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to :func:`~pandas.read_csv` or :mod:`biome` loader.
"""
tmp_metadata = kwargs.pop("metadata", {})
self.__avail_ranks = []
self.__internal_taxonomy = None
if isinstance(taxonomy, pd.DataFrame):
if taxonomy.shape[0] > 0:
if taxonomy.shape[1] > 1:
if validate_ranks(list(taxonomy.columns.values), VALID_RANKS):
tmp_taxonomy = taxonomy
else:
raise ValueError(
"Provided `taxonomy` Datafame has invalid ranks."
)
else:
tmp_taxonomy = taxonomy.iloc[:, 0]
else:
raise ValueError("Provided `taxonomy` Datafame is invalid.")
elif isinstance(taxonomy, pd.Series):
if taxonomy.shape[0] > 0:
tmp_taxonomy = taxonomy
else:
raise ValueError("Provided `taxonomy` Series is invalid.")
elif isinstance(taxonomy, str):
if path.isfile(taxonomy):
file_extension = path.splitext(taxonomy)[-1].lower()
if file_extension in [".csv", ".tsv"]:
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).loc[:, taxonomy_columns]
elif file_extension in [".biom", ".biome"]:
tmp_taxonomy, new_metadata = self.__load_biom(taxonomy, **kwargs)
tmp_metadata.update({"biom": new_metadata})
else:
raise NotImplementedError("File type is not supported.")
else:
raise FileNotFoundError("Provided `taxonomy` file path is invalid.")
else:
raise TypeError("Provided `taxonomy` has invalid type.")
self.__init_internal_taxonomy(tmp_taxonomy, **kwargs)
super().__init__(metadata=tmp_metadata, **kwargs)
@classmethod
def from_csv(
cls,
filepath: str,
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from CSV file.
Parameters
----------
filepath
Path to .csv File
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to the constructor.
filepath:
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(filepath, **kwargs)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(filepath, **kwargs).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(filepath, **kwargs).loc[:, taxonomy_columns]
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"filepath": path.abspath(filepath)})
return cls(taxonomy=tmp_taxonomy, metadata=tmp_metadata, **kwargs)
@classmethod
def from_biom(cls, filepath: str, **kwargs: Any) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from :mod:`biom`
file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Passed to the constructor.
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
taxonomy_frame, new_metadata = cls.__load_biom(filepath, **kwargs)
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"biom": new_metadata})
return cls(taxonomy=taxonomy_frame, metadata=tmp_metadata, **kwargs)
@classmethod
def __load_biom(cls, filepath: str, **kwargs: Any) -> Tuple[pd.DataFrame, dict]:
"""Actual private method to process :mod:`biom` file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Compatibility
"""
biom_file = biom.load_table(filepath)
if biom_file.metadata(axis="observation") is not None:
obs_data = biom_file.metadata_to_dataframe("observation")
col_names = list(obs_data.columns.values)
col_names_low = [col.lower() for col in col_names]
avail_col_names = [
colname
for tax_name in BIOM_TAXONOMY_NAMES
for colname in col_names_low
if colname[::-1].find(tax_name[::-1]) < 3
and colname[::-1].find(tax_name[::-1]) > -1
]
metadata_cols = [
col for col in col_names if col.lower() not in avail_col_names
]
if len(avail_col_names) == 1:
tmp_col_index = col_names_low.index(avail_col_names[0])
taxonomy_frame = obs_data[col_names[tmp_col_index]]
else:
taxonomy_frame = obs_data
tmp_metadata = obs_data.loc[:, metadata_cols].to_dict()
return taxonomy_frame, tmp_metadata
else:
raise ValueError("Biom file does not contain observation metadata.")
def _remove_features_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by features ids and ratify action.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
tmp_ids = np.asarray(ids, dtype=self.__internal_taxonomy.index.dtype)
if len(tmp_ids) > 0:
self.__internal_taxonomy.drop(tmp_ids, inplace=True)
return self._ratify_action("_remove_features_by_id", ids, **kwargs)
def _merge_features_by_map(
self, map_dict: Mapper, done: bool = False, **kwargs: Any
) -> Optional[Mapper]:
"""Merge features and ratify action.
Parameters
----------
map_dict
Map to use for merging
done
Whether merging was completed or not. Compatibility.
kwargs
Compatibility
"""
if not done:
raise NotImplementedError
if map_dict:
return self._ratify_action(
"_merge_features_by_map",
map_dict,
_annotations=self.__internal_taxonomy.loc[:, "lineage"].to_dict(),
**kwargs
)
def drop_feature_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by feature `ids`.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() == len(target_ids):
return self._remove_features_by_id(target_ids, **kwargs)
else:
raise ValueError("Invalid feature ids are provided.")
def get_taxonomy_by_id(
self, ids: Optional[AnyGenericIdentifier] = None
) -> pd.DataFrame:
"""Get taxonomy :class:`~pandas.DataFrame` by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
Returns
-------
class:`pandas.DataFrame` with taxonomy data
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() <= len(target_ids):
return self.__internal_taxonomy.loc[target_ids, self.__avail_ranks]
else:
raise ValueError("Invalid feature ids are provided.")
def get_lineage_by_id(
self,
ids: Optional[AnyGenericIdentifier] = None,
missing_rank: bool = False,
desired_ranks: Union[bool, Sequence[str]] = False,
drop_ranks: Union[bool, Sequence[str]] = False,
**kwargs: Any
) -> pd.Series:
"""Get taxonomy lineages by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
missing_rank
If True will generate prefix like `s__` or `d__`
desired_ranks
List of desired ranks to generate.
If False then will generate all main ranks
drop_ranks
List of ranks to drop from desired ranks.
This parameter only useful if `missing_rank` is True
kwargs
Compatibility.
Returns
-------
class:`pandas.Series` with consensus lineages and corresponding IDs
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
tmp_desired_ranks = VALID_RANKS if desired_ranks is False else desired_ranks
total_valid_rids = self.xrid.isin(target_ids).sum()
if total_valid_rids == len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[target_ids],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
elif total_valid_rids < len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[np.unique(target_ids)],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
else:
raise ValueError("Invalid feature ids are provided.")
def find_features_by_pattern(
self, pattern_str: str, case_sensitive: bool = False, regex: bool = False
) -> np.ndarray:
"""Searches for features with taxa that matches `pattern_str`
Parameters
----------
pattern_str
Pattern to search for
case_sensitive
Case sensitive mode
regex
Use regular expressions
Returns
-------
class:`~numpy.ndarray` with indices
"""
return self.__internal_taxonomy[
self.__internal_taxonomy.loc[:, "lineage"].str.contains(
pattern_str, case=case_sensitive, regex=regex
)
].index.values
def drop_features_without_taxa(
self, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features that do not contain taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ids_to_drop = self.find_features_without_taxa()
return self._remove_features_by_id(ids_to_drop, **kwargs)
def drop_features_without_ranks(
self, ranks: Sequence[str], any: bool = False, **kwargs: Any
) -> Optional[AnyGenericIdentifier]: # Done
"""Remove features that do not contain `ranks`
Parameters
----------
ranks
Ranks to look for
any
If True removes feature with single occurrence of missing rank.
If False all `ranks` must be missing.
kwargs
Compatibility
"""
target_ranks = np.asarray(ranks)
if self.__internal_taxonomy.columns.isin(target_ranks).sum() == len(
target_ranks
):
no_rank_mask = self.__internal_taxonomy.loc[:, ranks].isna()
no_rank_mask_adjusted = (
no_rank_mask.any(axis=1) if any else no_rank_mask.all(axis=1)
)
ids_to_drop = self.__internal_taxonomy.loc[no_rank_mask_adjusted].index
return self._remove_features_by_id(ids_to_drop, **kwargs)
else:
raise ValueError("Invalid ranks are provided.")
def merge_duplicated_features(self, **kwargs: Any) -> Optional[Mapper]:
"""Merge features with duplicated taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ret = {}
groupby = self.__internal_taxonomy.groupby("lineage")
if any([len(group) > 1 for group in groupby.groups.values()]):
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groupby.groups)))
for lineage, feature_ids in groupby.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
return self._merge_features_by_map(ret, True, **kwargs)
def merge_features_by_rank(self, level: str, **kwargs: Any) -> Optional[Mapper]:
"""Merge features by taxonomic rank/level.
Parameters
----------
level
Taxonomic rank/level to use for merging.
kwargs
Compatibility
"""
ret = {}
if not isinstance(level, str):
raise TypeError("`rank` must have str type.")
if level in self.__avail_ranks:
target_ranks = get_rank_upto(self.avail_ranks, level, True)
if target_ranks:
tmp_lineages = generate_lineages_from_taxa(
self.__internal_taxonomy, False, target_ranks, False
)
groups = tmp_lineages.groupby(tmp_lineages)
if len(groups.groups) > 1:
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groups.groups)))
for lineage, feature_ids in groups.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
else:
raise ValueError("Invalid rank are provided.")
return self._merge_features_by_map(ret, True, **kwargs)
def find_features_without_taxa(self) -> np.ndarray:
"""Find features without taxa.
Returns
-------
class:`~numpy.ndarray` with feature indices.
"""
return self.__internal_taxonomy.loc[
self.__internal_taxonomy.loc[:, VALID_RANKS].agg(
lambda rank: len("".join(map(lambda x: (str(x or "")), rank))), axis=1
)
< 1
].index.values
def get_subset(
self, rids: Optional[AnyGenericIdentifier] = None, *args, **kwargs: Any
) -> "RepTaxonomy":
"""Get subset of the :class:`.RepTaxonomy`.
Parameters
----------
rids
Feature identifiers.
args
Compatibility
kwargs
Compatibility
Returns
-------
class:`.RepTaxonomy`
"""
if rids is None:
target_rids = self.xrid
else:
target_rids = np.asarray(rids).astype(self.__internal_taxonomy.index.dtype)
if not self.xrid.isin(target_rids).sum() == len(target_rids):
raise ValueError("Invalid feature ids are provided.")
return type(self)(
taxonomy=self.__internal_taxonomy.loc[target_rids, "lineage"],
metadata=self.metadata,
name=self.name,
)
def _export(
self, taxlike: str = "lineage", ascending: bool = True, **kwargs: Any
) -> Tuple[pd.Series, dict]:
"""Creates taxonomy for export.
Parameters
----------
taxlike
Generate taxonomy in format(currently only `lineage` is supported.)
ascending
Sorting
kwargs
Compatibility
"""
if taxlike == "lineage":
return (
self.get_lineage_by_id(**kwargs).sort_values(ascending=ascending),
kwargs,
)
else:
raise NotImplemented
def export(
self,
output_fp: str,
*args,
_add_ext: bool = False,
sep: str = ",",
**kwargs: Any
) -> None:
"""Exports the taxonomy into the specified file.
Parameters
----------
output_fp
Export filepath
args
Compatibility
_add_ext
Add file extension or not.
sep
Delimiter
kwargs
Compatibility
"""
tmp_export, rkwarg = self._export(*args, **kwargs)
if _add_ext:
tmp_export.to_csv("{}.csv".format(output_fp), sep=sep)
else:
tmp_export.to_csv(output_fp, sep=sep)
def copy(self) -> "RepTaxonomy":
"""Copy of the instance."""
return type(self)(
taxonomy=self.__internal_taxonomy.loc[:, "lineage"],
metadata=self.metadata,
name=self.name,
)
def __fix_taxon_names(self) -> None:
"""Fix invalid taxon names."""
def taxon_fixer(taxon):
if taxon is not None and pd.notna(taxon):
tmp_taxon_trimmed = taxon.lower().strip()
if len(tmp_taxon_trimmed) > 0:
if tmp_taxon_trimmed[0] == "[":
tmp_taxon_trimmed = tmp_taxon_trimmed[1:]
if tmp_taxon_trimmed[-1] == "]":
tmp_taxon_trimmed = tmp_taxon_trimmed[:-1]
return tmp_taxon_trimmed.capitalize()
else:
return None
else:
return None
self.__internal_taxonomy.loc[:, VALID_RANKS] = self.__internal_taxonomy.loc[
:, VALID_RANKS
].applymap(taxon_fixer)
def __reconstruct_internal_lineages(self) -> None:
"""Reconstruct the internal lineages."""
self.__internal_taxonomy.loc[:, "lineage"] = generate_lineages_from_taxa(
self.__internal_taxonomy, True, self.__avail_ranks, False
)
def __init_internal_taxonomy(
self,
taxonomy_data: Union[pd.Series, pd.DataFrame],
taxonomy_notation: Optional[str] = "greengenes",
order_ranks: Optional[Sequence[str]] = None,
**kwargs: Any
) -> None:
"""Main method to initialize taxonomy.
Parameters
----------
taxonomy_data
Incoming parsed taxonomy data
taxonomy_notation
Taxonomy lineage notation style. Can be one of
:const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None.
The 'silva' notation require `order_ranks`.
kwargs
Compatibility
"""
if isinstance(taxonomy_data, pd.Series):
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data, taxonomy_notation, order_ranks
)
elif isinstance(taxonomy_data, pd.DataFrame):
if taxonomy_data.shape[1] == 1:
taxonomy_data_series = pd.Series(
data=taxonomy_data.iloc[:, 0], index=taxonomy_data.index
)
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data_series, taxonomy_notation, order_ranks
)
else:
new_taxonomy = self.__init_taxonomy_from_frame(
taxonomy_data, taxonomy_notation, order_ranks
)
else:
raise RuntimeError(
"`taxonomy_data` must be either pd.Series or pd.Dataframe"
)
if new_taxonomy is None:
raise ValueError("Provided taxonomy is invalid.")
# Assign newly constructed taxonomy to the self.__internal_taxonomy
self.__internal_taxonomy = new_taxonomy
self.__fix_taxon_names() # Fix incorrect taxa
tmp_avail_ranks = [rank for rank in VALID_RANKS if rank in new_taxonomy.columns]
self.__avail_ranks = [
rank for rank in tmp_avail_ranks if new_taxonomy.loc[:, rank].notna().any()
]
# Reconstruct internal lineages for default greengenes notation
self.__reconstruct_internal_lineages()
self._init_state = True
def __init_taxonomy_from_lineages(
self,
taxonomy_series: pd.Series,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done
"""Main method that produces taxonomy dataframe from lineages.
Parameters
----------
taxonomy_series
:class:`pandas.Series` with taxonomy lineages
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
"""
# Check if taxonomy is known and is available for parsing. Otherwise indentify_taxon_notation() will try to identify notation
if taxonomy_notation in AVAIL_TAXONOMY_NOTATIONS:
notation = taxonomy_notation
else:
# Get first lineage _sample for notation testing assuming the rest have the the same notations
sample_taxon = taxonomy_series.iloc[0]
# Identify notation of the lineage string
notation = indentify_taxon_notation(sample_taxon)
if order_ranks is not None:
if all([rank in VALID_RANKS for rank in order_ranks]):
target_order_ranks = order_ranks
else:
raise NotImplementedError
else:
target_order_ranks = VALID_RANKS
if notation == "greengenes":
lineages = taxonomy_series.reset_index().values.tolist()
ordered_taxa_list = []
ordered_indices_list = [elem[0] for elem in lineages]
for lineage in lineages:
tmp_lineage = jRegexGG.findall(lineage[1])
tmp_taxa_dict = {
elem[0]: elem[1] for elem in tmp_lineage if elem[0] in VALID_RANKS
}
for rank in VALID_RANKS:
if rank not in tmp_taxa_dict.keys():
tmp_taxa_dict.update({rank: None})
tmp_taxa_ordered = [tmp_taxa_dict[rank] for rank in VALID_RANKS]
ordered_taxa_list.append([None] + tmp_taxa_ordered)
taxonomy = pd.DataFrame(
index=ordered_indices_list,
data=ordered_taxa_list,
columns=["lineage"] + VALID_RANKS,
)
return taxonomy
elif notation == "qiime":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = jRegexQIIME.findall(lineage[1])
tmp_lineage.sort(key=lambda x: x[0])
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank, taxon in tmp_lineage:
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, sorted(list(tmp_ranks))]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
elif notation == "silva":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = lineage[1].split(";")
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank_i, taxon in enumerate(tmp_lineage):
rank = target_order_ranks[rank_i]
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_rank_ordered = [
rank for rank in target_order_ranks if rank in VALID_RANKS
]
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, tmp_rank_ordered]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
else:
raise NotImplementedError
def __init_taxonomy_from_frame(
self,
taxonomy_dataframe: pd.DataFrame,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done # For now only pass to _init_taxonomy_from_series
"""Main method that produces taxonomy sheet from dataframe.
Parameters
----------
taxonomy_dataframe
:class:`~pandas.DataFrame` with taxa split by ranks.
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
Returns
-------
:class:`~pandas.DataFrame`
"""
valid_ranks = extract_valid_ranks(taxonomy_dataframe.columns, VALID_RANKS)
if valid_ranks is not None:
if len(valid_ranks) > 0:
return pd.concat(
[
taxonomy_dataframe,
pd.DataFrame(
data="",
index=taxonomy_dataframe.index,
columns=[
rank for rank in VALID_RANKS if rank not in valid_ranks
],
),
],
axis=1,
)
else:
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join(taxa.values.tolist()), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
else:
valid_ranks = cols2ranks(taxonomy_dataframe.columns)
taxonomy_dataframe.columns = valid_ranks
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join([(t if isinstance(t,str) else '') for t in taxa.values]), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
@property
def avail_ranks(self) -> Sequence[str]:
"""List of available taxonomic ranks."""
return self.__avail_ranks
@property
def duplicated(self) -> pd.Index:
"""List of duplicated feature indices."""
return self.__internal_taxonomy.index[
self.__internal_taxonomy["lineage"].duplicated(keep=False)
]
@property
def data(self) -> pd.DataFrame:
"""Actual data representation as pd.DataFrame."""
return self.__internal_taxonomy
@property
def xrid(self) -> pd.Index:
"""Feature indices as pd.Index."""
return self.__internal_taxonomy.index
|
export
|
Exports the taxonomy into the specified file.
Parameters
----------
output_fp
Export filepath
args
Compatibility
_add_ext
Add file extension or not.
sep
Delimiter
kwargs
Compatibility
|
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from pmaf.biome.essentials._metakit import EssentialFeatureMetabase
from pmaf.biome.essentials._base import EssentialBackboneBase
from pmaf.internal._constants import (
AVAIL_TAXONOMY_NOTATIONS,
jRegexGG,
jRegexQIIME,
BIOM_TAXONOMY_NAMES,
VALID_RANKS,
)
from pmaf.internal._shared import (
generate_lineages_from_taxa,
get_rank_upto,
indentify_taxon_notation,
validate_ranks,
extract_valid_ranks,
cols2ranks,
)
from collections import defaultdict
from os import path
import pandas as pd
import numpy as np
import biom
from typing import Union, Sequence, Tuple, Any, Optional
from pmaf.internal._typing import AnyGenericIdentifier, Mapper
class RepTaxonomy(EssentialBackboneBase, EssentialFeatureMetabase):
"""An `essential` class for handling taxonomy data."""
def __init__(
self,
taxonomy: Union[pd.DataFrame, pd.Series, str],
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> None:
"""Constructor for :class:`.RepTaxonomy`
Parameters
----------
taxonomy
Data containing feature taxonomy
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to :func:`~pandas.read_csv` or :mod:`biome` loader.
"""
tmp_metadata = kwargs.pop("metadata", {})
self.__avail_ranks = []
self.__internal_taxonomy = None
if isinstance(taxonomy, pd.DataFrame):
if taxonomy.shape[0] > 0:
if taxonomy.shape[1] > 1:
if validate_ranks(list(taxonomy.columns.values), VALID_RANKS):
tmp_taxonomy = taxonomy
else:
raise ValueError(
"Provided `taxonomy` Datafame has invalid ranks."
)
else:
tmp_taxonomy = taxonomy.iloc[:, 0]
else:
raise ValueError("Provided `taxonomy` Datafame is invalid.")
elif isinstance(taxonomy, pd.Series):
if taxonomy.shape[0] > 0:
tmp_taxonomy = taxonomy
else:
raise ValueError("Provided `taxonomy` Series is invalid.")
elif isinstance(taxonomy, str):
if path.isfile(taxonomy):
file_extension = path.splitext(taxonomy)[-1].lower()
if file_extension in [".csv", ".tsv"]:
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).loc[:, taxonomy_columns]
elif file_extension in [".biom", ".biome"]:
tmp_taxonomy, new_metadata = self.__load_biom(taxonomy, **kwargs)
tmp_metadata.update({"biom": new_metadata})
else:
raise NotImplementedError("File type is not supported.")
else:
raise FileNotFoundError("Provided `taxonomy` file path is invalid.")
else:
raise TypeError("Provided `taxonomy` has invalid type.")
self.__init_internal_taxonomy(tmp_taxonomy, **kwargs)
super().__init__(metadata=tmp_metadata, **kwargs)
@classmethod
def from_csv(
cls,
filepath: str,
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from CSV file.
Parameters
----------
filepath
Path to .csv File
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to the constructor.
filepath:
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(filepath, **kwargs)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(filepath, **kwargs).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(filepath, **kwargs).loc[:, taxonomy_columns]
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"filepath": path.abspath(filepath)})
return cls(taxonomy=tmp_taxonomy, metadata=tmp_metadata, **kwargs)
@classmethod
def from_biom(cls, filepath: str, **kwargs: Any) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from :mod:`biom`
file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Passed to the constructor.
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
taxonomy_frame, new_metadata = cls.__load_biom(filepath, **kwargs)
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"biom": new_metadata})
return cls(taxonomy=taxonomy_frame, metadata=tmp_metadata, **kwargs)
@classmethod
def __load_biom(cls, filepath: str, **kwargs: Any) -> Tuple[pd.DataFrame, dict]:
"""Actual private method to process :mod:`biom` file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Compatibility
"""
biom_file = biom.load_table(filepath)
if biom_file.metadata(axis="observation") is not None:
obs_data = biom_file.metadata_to_dataframe("observation")
col_names = list(obs_data.columns.values)
col_names_low = [col.lower() for col in col_names]
avail_col_names = [
colname
for tax_name in BIOM_TAXONOMY_NAMES
for colname in col_names_low
if colname[::-1].find(tax_name[::-1]) < 3
and colname[::-1].find(tax_name[::-1]) > -1
]
metadata_cols = [
col for col in col_names if col.lower() not in avail_col_names
]
if len(avail_col_names) == 1:
tmp_col_index = col_names_low.index(avail_col_names[0])
taxonomy_frame = obs_data[col_names[tmp_col_index]]
else:
taxonomy_frame = obs_data
tmp_metadata = obs_data.loc[:, metadata_cols].to_dict()
return taxonomy_frame, tmp_metadata
else:
raise ValueError("Biom file does not contain observation metadata.")
def _remove_features_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by features ids and ratify action.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
tmp_ids = np.asarray(ids, dtype=self.__internal_taxonomy.index.dtype)
if len(tmp_ids) > 0:
self.__internal_taxonomy.drop(tmp_ids, inplace=True)
return self._ratify_action("_remove_features_by_id", ids, **kwargs)
def _merge_features_by_map(
self, map_dict: Mapper, done: bool = False, **kwargs: Any
) -> Optional[Mapper]:
"""Merge features and ratify action.
Parameters
----------
map_dict
Map to use for merging
done
Whether merging was completed or not. Compatibility.
kwargs
Compatibility
"""
if not done:
raise NotImplementedError
if map_dict:
return self._ratify_action(
"_merge_features_by_map",
map_dict,
_annotations=self.__internal_taxonomy.loc[:, "lineage"].to_dict(),
**kwargs
)
def drop_feature_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by feature `ids`.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() == len(target_ids):
return self._remove_features_by_id(target_ids, **kwargs)
else:
raise ValueError("Invalid feature ids are provided.")
def get_taxonomy_by_id(
self, ids: Optional[AnyGenericIdentifier] = None
) -> pd.DataFrame:
"""Get taxonomy :class:`~pandas.DataFrame` by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
Returns
-------
class:`pandas.DataFrame` with taxonomy data
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() <= len(target_ids):
return self.__internal_taxonomy.loc[target_ids, self.__avail_ranks]
else:
raise ValueError("Invalid feature ids are provided.")
def get_lineage_by_id(
self,
ids: Optional[AnyGenericIdentifier] = None,
missing_rank: bool = False,
desired_ranks: Union[bool, Sequence[str]] = False,
drop_ranks: Union[bool, Sequence[str]] = False,
**kwargs: Any
) -> pd.Series:
"""Get taxonomy lineages by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
missing_rank
If True will generate prefix like `s__` or `d__`
desired_ranks
List of desired ranks to generate.
If False then will generate all main ranks
drop_ranks
List of ranks to drop from desired ranks.
This parameter only useful if `missing_rank` is True
kwargs
Compatibility.
Returns
-------
class:`pandas.Series` with consensus lineages and corresponding IDs
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
tmp_desired_ranks = VALID_RANKS if desired_ranks is False else desired_ranks
total_valid_rids = self.xrid.isin(target_ids).sum()
if total_valid_rids == len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[target_ids],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
elif total_valid_rids < len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[np.unique(target_ids)],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
else:
raise ValueError("Invalid feature ids are provided.")
def find_features_by_pattern(
self, pattern_str: str, case_sensitive: bool = False, regex: bool = False
) -> np.ndarray:
"""Searches for features with taxa that matches `pattern_str`
Parameters
----------
pattern_str
Pattern to search for
case_sensitive
Case sensitive mode
regex
Use regular expressions
Returns
-------
class:`~numpy.ndarray` with indices
"""
return self.__internal_taxonomy[
self.__internal_taxonomy.loc[:, "lineage"].str.contains(
pattern_str, case=case_sensitive, regex=regex
)
].index.values
def drop_features_without_taxa(
self, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features that do not contain taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ids_to_drop = self.find_features_without_taxa()
return self._remove_features_by_id(ids_to_drop, **kwargs)
def drop_features_without_ranks(
self, ranks: Sequence[str], any: bool = False, **kwargs: Any
) -> Optional[AnyGenericIdentifier]: # Done
"""Remove features that do not contain `ranks`
Parameters
----------
ranks
Ranks to look for
any
If True removes feature with single occurrence of missing rank.
If False all `ranks` must be missing.
kwargs
Compatibility
"""
target_ranks = np.asarray(ranks)
if self.__internal_taxonomy.columns.isin(target_ranks).sum() == len(
target_ranks
):
no_rank_mask = self.__internal_taxonomy.loc[:, ranks].isna()
no_rank_mask_adjusted = (
no_rank_mask.any(axis=1) if any else no_rank_mask.all(axis=1)
)
ids_to_drop = self.__internal_taxonomy.loc[no_rank_mask_adjusted].index
return self._remove_features_by_id(ids_to_drop, **kwargs)
else:
raise ValueError("Invalid ranks are provided.")
def merge_duplicated_features(self, **kwargs: Any) -> Optional[Mapper]:
"""Merge features with duplicated taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ret = {}
groupby = self.__internal_taxonomy.groupby("lineage")
if any([len(group) > 1 for group in groupby.groups.values()]):
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groupby.groups)))
for lineage, feature_ids in groupby.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
return self._merge_features_by_map(ret, True, **kwargs)
def merge_features_by_rank(self, level: str, **kwargs: Any) -> Optional[Mapper]:
"""Merge features by taxonomic rank/level.
Parameters
----------
level
Taxonomic rank/level to use for merging.
kwargs
Compatibility
"""
ret = {}
if not isinstance(level, str):
raise TypeError("`rank` must have str type.")
if level in self.__avail_ranks:
target_ranks = get_rank_upto(self.avail_ranks, level, True)
if target_ranks:
tmp_lineages = generate_lineages_from_taxa(
self.__internal_taxonomy, False, target_ranks, False
)
groups = tmp_lineages.groupby(tmp_lineages)
if len(groups.groups) > 1:
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groups.groups)))
for lineage, feature_ids in groups.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
else:
raise ValueError("Invalid rank are provided.")
return self._merge_features_by_map(ret, True, **kwargs)
def find_features_without_taxa(self) -> np.ndarray:
"""Find features without taxa.
Returns
-------
class:`~numpy.ndarray` with feature indices.
"""
return self.__internal_taxonomy.loc[
self.__internal_taxonomy.loc[:, VALID_RANKS].agg(
lambda rank: len("".join(map(lambda x: (str(x or "")), rank))), axis=1
)
< 1
].index.values
def get_subset(
self, rids: Optional[AnyGenericIdentifier] = None, *args, **kwargs: Any
) -> "RepTaxonomy":
"""Get subset of the :class:`.RepTaxonomy`.
Parameters
----------
rids
Feature identifiers.
args
Compatibility
kwargs
Compatibility
Returns
-------
class:`.RepTaxonomy`
"""
if rids is None:
target_rids = self.xrid
else:
target_rids = np.asarray(rids).astype(self.__internal_taxonomy.index.dtype)
if not self.xrid.isin(target_rids).sum() == len(target_rids):
raise ValueError("Invalid feature ids are provided.")
return type(self)(
taxonomy=self.__internal_taxonomy.loc[target_rids, "lineage"],
metadata=self.metadata,
name=self.name,
)
def _export(
self, taxlike: str = "lineage", ascending: bool = True, **kwargs: Any
) -> Tuple[pd.Series, dict]:
"""Creates taxonomy for export.
Parameters
----------
taxlike
Generate taxonomy in format(currently only `lineage` is supported.)
ascending
Sorting
kwargs
Compatibility
"""
if taxlike == "lineage":
return (
self.get_lineage_by_id(**kwargs).sort_values(ascending=ascending),
kwargs,
)
else:
raise NotImplemented
# MASKED: export function (lines 526-553)
def copy(self) -> "RepTaxonomy":
"""Copy of the instance."""
return type(self)(
taxonomy=self.__internal_taxonomy.loc[:, "lineage"],
metadata=self.metadata,
name=self.name,
)
def __fix_taxon_names(self) -> None:
"""Fix invalid taxon names."""
def taxon_fixer(taxon):
if taxon is not None and pd.notna(taxon):
tmp_taxon_trimmed = taxon.lower().strip()
if len(tmp_taxon_trimmed) > 0:
if tmp_taxon_trimmed[0] == "[":
tmp_taxon_trimmed = tmp_taxon_trimmed[1:]
if tmp_taxon_trimmed[-1] == "]":
tmp_taxon_trimmed = tmp_taxon_trimmed[:-1]
return tmp_taxon_trimmed.capitalize()
else:
return None
else:
return None
self.__internal_taxonomy.loc[:, VALID_RANKS] = self.__internal_taxonomy.loc[
:, VALID_RANKS
].applymap(taxon_fixer)
def __reconstruct_internal_lineages(self) -> None:
"""Reconstruct the internal lineages."""
self.__internal_taxonomy.loc[:, "lineage"] = generate_lineages_from_taxa(
self.__internal_taxonomy, True, self.__avail_ranks, False
)
def __init_internal_taxonomy(
self,
taxonomy_data: Union[pd.Series, pd.DataFrame],
taxonomy_notation: Optional[str] = "greengenes",
order_ranks: Optional[Sequence[str]] = None,
**kwargs: Any
) -> None:
"""Main method to initialize taxonomy.
Parameters
----------
taxonomy_data
Incoming parsed taxonomy data
taxonomy_notation
Taxonomy lineage notation style. Can be one of
:const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None.
The 'silva' notation require `order_ranks`.
kwargs
Compatibility
"""
if isinstance(taxonomy_data, pd.Series):
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data, taxonomy_notation, order_ranks
)
elif isinstance(taxonomy_data, pd.DataFrame):
if taxonomy_data.shape[1] == 1:
taxonomy_data_series = pd.Series(
data=taxonomy_data.iloc[:, 0], index=taxonomy_data.index
)
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data_series, taxonomy_notation, order_ranks
)
else:
new_taxonomy = self.__init_taxonomy_from_frame(
taxonomy_data, taxonomy_notation, order_ranks
)
else:
raise RuntimeError(
"`taxonomy_data` must be either pd.Series or pd.Dataframe"
)
if new_taxonomy is None:
raise ValueError("Provided taxonomy is invalid.")
# Assign newly constructed taxonomy to the self.__internal_taxonomy
self.__internal_taxonomy = new_taxonomy
self.__fix_taxon_names() # Fix incorrect taxa
tmp_avail_ranks = [rank for rank in VALID_RANKS if rank in new_taxonomy.columns]
self.__avail_ranks = [
rank for rank in tmp_avail_ranks if new_taxonomy.loc[:, rank].notna().any()
]
# Reconstruct internal lineages for default greengenes notation
self.__reconstruct_internal_lineages()
self._init_state = True
def __init_taxonomy_from_lineages(
self,
taxonomy_series: pd.Series,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done
"""Main method that produces taxonomy dataframe from lineages.
Parameters
----------
taxonomy_series
:class:`pandas.Series` with taxonomy lineages
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
"""
# Check if taxonomy is known and is available for parsing. Otherwise indentify_taxon_notation() will try to identify notation
if taxonomy_notation in AVAIL_TAXONOMY_NOTATIONS:
notation = taxonomy_notation
else:
# Get first lineage _sample for notation testing assuming the rest have the the same notations
sample_taxon = taxonomy_series.iloc[0]
# Identify notation of the lineage string
notation = indentify_taxon_notation(sample_taxon)
if order_ranks is not None:
if all([rank in VALID_RANKS for rank in order_ranks]):
target_order_ranks = order_ranks
else:
raise NotImplementedError
else:
target_order_ranks = VALID_RANKS
if notation == "greengenes":
lineages = taxonomy_series.reset_index().values.tolist()
ordered_taxa_list = []
ordered_indices_list = [elem[0] for elem in lineages]
for lineage in lineages:
tmp_lineage = jRegexGG.findall(lineage[1])
tmp_taxa_dict = {
elem[0]: elem[1] for elem in tmp_lineage if elem[0] in VALID_RANKS
}
for rank in VALID_RANKS:
if rank not in tmp_taxa_dict.keys():
tmp_taxa_dict.update({rank: None})
tmp_taxa_ordered = [tmp_taxa_dict[rank] for rank in VALID_RANKS]
ordered_taxa_list.append([None] + tmp_taxa_ordered)
taxonomy = pd.DataFrame(
index=ordered_indices_list,
data=ordered_taxa_list,
columns=["lineage"] + VALID_RANKS,
)
return taxonomy
elif notation == "qiime":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = jRegexQIIME.findall(lineage[1])
tmp_lineage.sort(key=lambda x: x[0])
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank, taxon in tmp_lineage:
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, sorted(list(tmp_ranks))]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
elif notation == "silva":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = lineage[1].split(";")
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank_i, taxon in enumerate(tmp_lineage):
rank = target_order_ranks[rank_i]
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_rank_ordered = [
rank for rank in target_order_ranks if rank in VALID_RANKS
]
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, tmp_rank_ordered]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
else:
raise NotImplementedError
def __init_taxonomy_from_frame(
self,
taxonomy_dataframe: pd.DataFrame,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done # For now only pass to _init_taxonomy_from_series
"""Main method that produces taxonomy sheet from dataframe.
Parameters
----------
taxonomy_dataframe
:class:`~pandas.DataFrame` with taxa split by ranks.
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
Returns
-------
:class:`~pandas.DataFrame`
"""
valid_ranks = extract_valid_ranks(taxonomy_dataframe.columns, VALID_RANKS)
if valid_ranks is not None:
if len(valid_ranks) > 0:
return pd.concat(
[
taxonomy_dataframe,
pd.DataFrame(
data="",
index=taxonomy_dataframe.index,
columns=[
rank for rank in VALID_RANKS if rank not in valid_ranks
],
),
],
axis=1,
)
else:
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join(taxa.values.tolist()), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
else:
valid_ranks = cols2ranks(taxonomy_dataframe.columns)
taxonomy_dataframe.columns = valid_ranks
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join([(t if isinstance(t,str) else '') for t in taxa.values]), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
@property
def avail_ranks(self) -> Sequence[str]:
"""List of available taxonomic ranks."""
return self.__avail_ranks
@property
def duplicated(self) -> pd.Index:
"""List of duplicated feature indices."""
return self.__internal_taxonomy.index[
self.__internal_taxonomy["lineage"].duplicated(keep=False)
]
@property
def data(self) -> pd.DataFrame:
"""Actual data representation as pd.DataFrame."""
return self.__internal_taxonomy
@property
def xrid(self) -> pd.Index:
"""Feature indices as pd.Index."""
return self.__internal_taxonomy.index
|
def export(
self,
output_fp: str,
*args,
_add_ext: bool = False,
sep: str = ",",
**kwargs: Any
) -> None:
"""Exports the taxonomy into the specified file.
Parameters
----------
output_fp
Export filepath
args
Compatibility
_add_ext
Add file extension or not.
sep
Delimiter
kwargs
Compatibility
"""
tmp_export, rkwarg = self._export(*args, **kwargs)
if _add_ext:
tmp_export.to_csv("{}.csv".format(output_fp), sep=sep)
else:
tmp_export.to_csv(output_fp, sep=sep)
| 526 | 553 |
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from pmaf.biome.essentials._metakit import EssentialFeatureMetabase
from pmaf.biome.essentials._base import EssentialBackboneBase
from pmaf.internal._constants import (
AVAIL_TAXONOMY_NOTATIONS,
jRegexGG,
jRegexQIIME,
BIOM_TAXONOMY_NAMES,
VALID_RANKS,
)
from pmaf.internal._shared import (
generate_lineages_from_taxa,
get_rank_upto,
indentify_taxon_notation,
validate_ranks,
extract_valid_ranks,
cols2ranks,
)
from collections import defaultdict
from os import path
import pandas as pd
import numpy as np
import biom
from typing import Union, Sequence, Tuple, Any, Optional
from pmaf.internal._typing import AnyGenericIdentifier, Mapper
class RepTaxonomy(EssentialBackboneBase, EssentialFeatureMetabase):
"""An `essential` class for handling taxonomy data."""
def __init__(
self,
taxonomy: Union[pd.DataFrame, pd.Series, str],
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> None:
"""Constructor for :class:`.RepTaxonomy`
Parameters
----------
taxonomy
Data containing feature taxonomy
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to :func:`~pandas.read_csv` or :mod:`biome` loader.
"""
tmp_metadata = kwargs.pop("metadata", {})
self.__avail_ranks = []
self.__internal_taxonomy = None
if isinstance(taxonomy, pd.DataFrame):
if taxonomy.shape[0] > 0:
if taxonomy.shape[1] > 1:
if validate_ranks(list(taxonomy.columns.values), VALID_RANKS):
tmp_taxonomy = taxonomy
else:
raise ValueError(
"Provided `taxonomy` Datafame has invalid ranks."
)
else:
tmp_taxonomy = taxonomy.iloc[:, 0]
else:
raise ValueError("Provided `taxonomy` Datafame is invalid.")
elif isinstance(taxonomy, pd.Series):
if taxonomy.shape[0] > 0:
tmp_taxonomy = taxonomy
else:
raise ValueError("Provided `taxonomy` Series is invalid.")
elif isinstance(taxonomy, str):
if path.isfile(taxonomy):
file_extension = path.splitext(taxonomy)[-1].lower()
if file_extension in [".csv", ".tsv"]:
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).loc[:, taxonomy_columns]
elif file_extension in [".biom", ".biome"]:
tmp_taxonomy, new_metadata = self.__load_biom(taxonomy, **kwargs)
tmp_metadata.update({"biom": new_metadata})
else:
raise NotImplementedError("File type is not supported.")
else:
raise FileNotFoundError("Provided `taxonomy` file path is invalid.")
else:
raise TypeError("Provided `taxonomy` has invalid type.")
self.__init_internal_taxonomy(tmp_taxonomy, **kwargs)
super().__init__(metadata=tmp_metadata, **kwargs)
@classmethod
def from_csv(
cls,
filepath: str,
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from CSV file.
Parameters
----------
filepath
Path to .csv File
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to the constructor.
filepath:
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(filepath, **kwargs)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(filepath, **kwargs).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(filepath, **kwargs).loc[:, taxonomy_columns]
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"filepath": path.abspath(filepath)})
return cls(taxonomy=tmp_taxonomy, metadata=tmp_metadata, **kwargs)
@classmethod
def from_biom(cls, filepath: str, **kwargs: Any) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from :mod:`biom`
file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Passed to the constructor.
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
taxonomy_frame, new_metadata = cls.__load_biom(filepath, **kwargs)
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"biom": new_metadata})
return cls(taxonomy=taxonomy_frame, metadata=tmp_metadata, **kwargs)
@classmethod
def __load_biom(cls, filepath: str, **kwargs: Any) -> Tuple[pd.DataFrame, dict]:
"""Actual private method to process :mod:`biom` file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Compatibility
"""
biom_file = biom.load_table(filepath)
if biom_file.metadata(axis="observation") is not None:
obs_data = biom_file.metadata_to_dataframe("observation")
col_names = list(obs_data.columns.values)
col_names_low = [col.lower() for col in col_names]
avail_col_names = [
colname
for tax_name in BIOM_TAXONOMY_NAMES
for colname in col_names_low
if colname[::-1].find(tax_name[::-1]) < 3
and colname[::-1].find(tax_name[::-1]) > -1
]
metadata_cols = [
col for col in col_names if col.lower() not in avail_col_names
]
if len(avail_col_names) == 1:
tmp_col_index = col_names_low.index(avail_col_names[0])
taxonomy_frame = obs_data[col_names[tmp_col_index]]
else:
taxonomy_frame = obs_data
tmp_metadata = obs_data.loc[:, metadata_cols].to_dict()
return taxonomy_frame, tmp_metadata
else:
raise ValueError("Biom file does not contain observation metadata.")
def _remove_features_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by features ids and ratify action.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
tmp_ids = np.asarray(ids, dtype=self.__internal_taxonomy.index.dtype)
if len(tmp_ids) > 0:
self.__internal_taxonomy.drop(tmp_ids, inplace=True)
return self._ratify_action("_remove_features_by_id", ids, **kwargs)
def _merge_features_by_map(
self, map_dict: Mapper, done: bool = False, **kwargs: Any
) -> Optional[Mapper]:
"""Merge features and ratify action.
Parameters
----------
map_dict
Map to use for merging
done
Whether merging was completed or not. Compatibility.
kwargs
Compatibility
"""
if not done:
raise NotImplementedError
if map_dict:
return self._ratify_action(
"_merge_features_by_map",
map_dict,
_annotations=self.__internal_taxonomy.loc[:, "lineage"].to_dict(),
**kwargs
)
def drop_feature_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by feature `ids`.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() == len(target_ids):
return self._remove_features_by_id(target_ids, **kwargs)
else:
raise ValueError("Invalid feature ids are provided.")
def get_taxonomy_by_id(
self, ids: Optional[AnyGenericIdentifier] = None
) -> pd.DataFrame:
"""Get taxonomy :class:`~pandas.DataFrame` by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
Returns
-------
class:`pandas.DataFrame` with taxonomy data
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() <= len(target_ids):
return self.__internal_taxonomy.loc[target_ids, self.__avail_ranks]
else:
raise ValueError("Invalid feature ids are provided.")
def get_lineage_by_id(
self,
ids: Optional[AnyGenericIdentifier] = None,
missing_rank: bool = False,
desired_ranks: Union[bool, Sequence[str]] = False,
drop_ranks: Union[bool, Sequence[str]] = False,
**kwargs: Any
) -> pd.Series:
"""Get taxonomy lineages by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
missing_rank
If True will generate prefix like `s__` or `d__`
desired_ranks
List of desired ranks to generate.
If False then will generate all main ranks
drop_ranks
List of ranks to drop from desired ranks.
This parameter only useful if `missing_rank` is True
kwargs
Compatibility.
Returns
-------
class:`pandas.Series` with consensus lineages and corresponding IDs
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
tmp_desired_ranks = VALID_RANKS if desired_ranks is False else desired_ranks
total_valid_rids = self.xrid.isin(target_ids).sum()
if total_valid_rids == len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[target_ids],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
elif total_valid_rids < len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[np.unique(target_ids)],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
else:
raise ValueError("Invalid feature ids are provided.")
def find_features_by_pattern(
self, pattern_str: str, case_sensitive: bool = False, regex: bool = False
) -> np.ndarray:
"""Searches for features with taxa that matches `pattern_str`
Parameters
----------
pattern_str
Pattern to search for
case_sensitive
Case sensitive mode
regex
Use regular expressions
Returns
-------
class:`~numpy.ndarray` with indices
"""
return self.__internal_taxonomy[
self.__internal_taxonomy.loc[:, "lineage"].str.contains(
pattern_str, case=case_sensitive, regex=regex
)
].index.values
def drop_features_without_taxa(
self, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features that do not contain taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ids_to_drop = self.find_features_without_taxa()
return self._remove_features_by_id(ids_to_drop, **kwargs)
def drop_features_without_ranks(
self, ranks: Sequence[str], any: bool = False, **kwargs: Any
) -> Optional[AnyGenericIdentifier]: # Done
"""Remove features that do not contain `ranks`
Parameters
----------
ranks
Ranks to look for
any
If True removes feature with single occurrence of missing rank.
If False all `ranks` must be missing.
kwargs
Compatibility
"""
target_ranks = np.asarray(ranks)
if self.__internal_taxonomy.columns.isin(target_ranks).sum() == len(
target_ranks
):
no_rank_mask = self.__internal_taxonomy.loc[:, ranks].isna()
no_rank_mask_adjusted = (
no_rank_mask.any(axis=1) if any else no_rank_mask.all(axis=1)
)
ids_to_drop = self.__internal_taxonomy.loc[no_rank_mask_adjusted].index
return self._remove_features_by_id(ids_to_drop, **kwargs)
else:
raise ValueError("Invalid ranks are provided.")
def merge_duplicated_features(self, **kwargs: Any) -> Optional[Mapper]:
"""Merge features with duplicated taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ret = {}
groupby = self.__internal_taxonomy.groupby("lineage")
if any([len(group) > 1 for group in groupby.groups.values()]):
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groupby.groups)))
for lineage, feature_ids in groupby.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
return self._merge_features_by_map(ret, True, **kwargs)
def merge_features_by_rank(self, level: str, **kwargs: Any) -> Optional[Mapper]:
"""Merge features by taxonomic rank/level.
Parameters
----------
level
Taxonomic rank/level to use for merging.
kwargs
Compatibility
"""
ret = {}
if not isinstance(level, str):
raise TypeError("`rank` must have str type.")
if level in self.__avail_ranks:
target_ranks = get_rank_upto(self.avail_ranks, level, True)
if target_ranks:
tmp_lineages = generate_lineages_from_taxa(
self.__internal_taxonomy, False, target_ranks, False
)
groups = tmp_lineages.groupby(tmp_lineages)
if len(groups.groups) > 1:
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groups.groups)))
for lineage, feature_ids in groups.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
else:
raise ValueError("Invalid rank are provided.")
return self._merge_features_by_map(ret, True, **kwargs)
def find_features_without_taxa(self) -> np.ndarray:
"""Find features without taxa.
Returns
-------
class:`~numpy.ndarray` with feature indices.
"""
return self.__internal_taxonomy.loc[
self.__internal_taxonomy.loc[:, VALID_RANKS].agg(
lambda rank: len("".join(map(lambda x: (str(x or "")), rank))), axis=1
)
< 1
].index.values
def get_subset(
self, rids: Optional[AnyGenericIdentifier] = None, *args, **kwargs: Any
) -> "RepTaxonomy":
"""Get subset of the :class:`.RepTaxonomy`.
Parameters
----------
rids
Feature identifiers.
args
Compatibility
kwargs
Compatibility
Returns
-------
class:`.RepTaxonomy`
"""
if rids is None:
target_rids = self.xrid
else:
target_rids = np.asarray(rids).astype(self.__internal_taxonomy.index.dtype)
if not self.xrid.isin(target_rids).sum() == len(target_rids):
raise ValueError("Invalid feature ids are provided.")
return type(self)(
taxonomy=self.__internal_taxonomy.loc[target_rids, "lineage"],
metadata=self.metadata,
name=self.name,
)
def _export(
self, taxlike: str = "lineage", ascending: bool = True, **kwargs: Any
) -> Tuple[pd.Series, dict]:
"""Creates taxonomy for export.
Parameters
----------
taxlike
Generate taxonomy in format(currently only `lineage` is supported.)
ascending
Sorting
kwargs
Compatibility
"""
if taxlike == "lineage":
return (
self.get_lineage_by_id(**kwargs).sort_values(ascending=ascending),
kwargs,
)
else:
raise NotImplemented
def export(
self,
output_fp: str,
*args,
_add_ext: bool = False,
sep: str = ",",
**kwargs: Any
) -> None:
"""Exports the taxonomy into the specified file.
Parameters
----------
output_fp
Export filepath
args
Compatibility
_add_ext
Add file extension or not.
sep
Delimiter
kwargs
Compatibility
"""
tmp_export, rkwarg = self._export(*args, **kwargs)
if _add_ext:
tmp_export.to_csv("{}.csv".format(output_fp), sep=sep)
else:
tmp_export.to_csv(output_fp, sep=sep)
def copy(self) -> "RepTaxonomy":
"""Copy of the instance."""
return type(self)(
taxonomy=self.__internal_taxonomy.loc[:, "lineage"],
metadata=self.metadata,
name=self.name,
)
def __fix_taxon_names(self) -> None:
"""Fix invalid taxon names."""
def taxon_fixer(taxon):
if taxon is not None and pd.notna(taxon):
tmp_taxon_trimmed = taxon.lower().strip()
if len(tmp_taxon_trimmed) > 0:
if tmp_taxon_trimmed[0] == "[":
tmp_taxon_trimmed = tmp_taxon_trimmed[1:]
if tmp_taxon_trimmed[-1] == "]":
tmp_taxon_trimmed = tmp_taxon_trimmed[:-1]
return tmp_taxon_trimmed.capitalize()
else:
return None
else:
return None
self.__internal_taxonomy.loc[:, VALID_RANKS] = self.__internal_taxonomy.loc[
:, VALID_RANKS
].applymap(taxon_fixer)
def __reconstruct_internal_lineages(self) -> None:
"""Reconstruct the internal lineages."""
self.__internal_taxonomy.loc[:, "lineage"] = generate_lineages_from_taxa(
self.__internal_taxonomy, True, self.__avail_ranks, False
)
def __init_internal_taxonomy(
self,
taxonomy_data: Union[pd.Series, pd.DataFrame],
taxonomy_notation: Optional[str] = "greengenes",
order_ranks: Optional[Sequence[str]] = None,
**kwargs: Any
) -> None:
"""Main method to initialize taxonomy.
Parameters
----------
taxonomy_data
Incoming parsed taxonomy data
taxonomy_notation
Taxonomy lineage notation style. Can be one of
:const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None.
The 'silva' notation require `order_ranks`.
kwargs
Compatibility
"""
if isinstance(taxonomy_data, pd.Series):
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data, taxonomy_notation, order_ranks
)
elif isinstance(taxonomy_data, pd.DataFrame):
if taxonomy_data.shape[1] == 1:
taxonomy_data_series = pd.Series(
data=taxonomy_data.iloc[:, 0], index=taxonomy_data.index
)
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data_series, taxonomy_notation, order_ranks
)
else:
new_taxonomy = self.__init_taxonomy_from_frame(
taxonomy_data, taxonomy_notation, order_ranks
)
else:
raise RuntimeError(
"`taxonomy_data` must be either pd.Series or pd.Dataframe"
)
if new_taxonomy is None:
raise ValueError("Provided taxonomy is invalid.")
# Assign newly constructed taxonomy to the self.__internal_taxonomy
self.__internal_taxonomy = new_taxonomy
self.__fix_taxon_names() # Fix incorrect taxa
tmp_avail_ranks = [rank for rank in VALID_RANKS if rank in new_taxonomy.columns]
self.__avail_ranks = [
rank for rank in tmp_avail_ranks if new_taxonomy.loc[:, rank].notna().any()
]
# Reconstruct internal lineages for default greengenes notation
self.__reconstruct_internal_lineages()
self._init_state = True
def __init_taxonomy_from_lineages(
self,
taxonomy_series: pd.Series,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done
"""Main method that produces taxonomy dataframe from lineages.
Parameters
----------
taxonomy_series
:class:`pandas.Series` with taxonomy lineages
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
"""
# Check if taxonomy is known and is available for parsing. Otherwise indentify_taxon_notation() will try to identify notation
if taxonomy_notation in AVAIL_TAXONOMY_NOTATIONS:
notation = taxonomy_notation
else:
# Get first lineage _sample for notation testing assuming the rest have the the same notations
sample_taxon = taxonomy_series.iloc[0]
# Identify notation of the lineage string
notation = indentify_taxon_notation(sample_taxon)
if order_ranks is not None:
if all([rank in VALID_RANKS for rank in order_ranks]):
target_order_ranks = order_ranks
else:
raise NotImplementedError
else:
target_order_ranks = VALID_RANKS
if notation == "greengenes":
lineages = taxonomy_series.reset_index().values.tolist()
ordered_taxa_list = []
ordered_indices_list = [elem[0] for elem in lineages]
for lineage in lineages:
tmp_lineage = jRegexGG.findall(lineage[1])
tmp_taxa_dict = {
elem[0]: elem[1] for elem in tmp_lineage if elem[0] in VALID_RANKS
}
for rank in VALID_RANKS:
if rank not in tmp_taxa_dict.keys():
tmp_taxa_dict.update({rank: None})
tmp_taxa_ordered = [tmp_taxa_dict[rank] for rank in VALID_RANKS]
ordered_taxa_list.append([None] + tmp_taxa_ordered)
taxonomy = pd.DataFrame(
index=ordered_indices_list,
data=ordered_taxa_list,
columns=["lineage"] + VALID_RANKS,
)
return taxonomy
elif notation == "qiime":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = jRegexQIIME.findall(lineage[1])
tmp_lineage.sort(key=lambda x: x[0])
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank, taxon in tmp_lineage:
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, sorted(list(tmp_ranks))]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
elif notation == "silva":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = lineage[1].split(";")
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank_i, taxon in enumerate(tmp_lineage):
rank = target_order_ranks[rank_i]
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_rank_ordered = [
rank for rank in target_order_ranks if rank in VALID_RANKS
]
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, tmp_rank_ordered]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
else:
raise NotImplementedError
def __init_taxonomy_from_frame(
self,
taxonomy_dataframe: pd.DataFrame,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done # For now only pass to _init_taxonomy_from_series
"""Main method that produces taxonomy sheet from dataframe.
Parameters
----------
taxonomy_dataframe
:class:`~pandas.DataFrame` with taxa split by ranks.
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
Returns
-------
:class:`~pandas.DataFrame`
"""
valid_ranks = extract_valid_ranks(taxonomy_dataframe.columns, VALID_RANKS)
if valid_ranks is not None:
if len(valid_ranks) > 0:
return pd.concat(
[
taxonomy_dataframe,
pd.DataFrame(
data="",
index=taxonomy_dataframe.index,
columns=[
rank for rank in VALID_RANKS if rank not in valid_ranks
],
),
],
axis=1,
)
else:
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join(taxa.values.tolist()), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
else:
valid_ranks = cols2ranks(taxonomy_dataframe.columns)
taxonomy_dataframe.columns = valid_ranks
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join([(t if isinstance(t,str) else '') for t in taxa.values]), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
@property
def avail_ranks(self) -> Sequence[str]:
"""List of available taxonomic ranks."""
return self.__avail_ranks
@property
def duplicated(self) -> pd.Index:
"""List of duplicated feature indices."""
return self.__internal_taxonomy.index[
self.__internal_taxonomy["lineage"].duplicated(keep=False)
]
@property
def data(self) -> pd.DataFrame:
"""Actual data representation as pd.DataFrame."""
return self.__internal_taxonomy
@property
def xrid(self) -> pd.Index:
"""Feature indices as pd.Index."""
return self.__internal_taxonomy.index
|
__init_internal_taxonomy
|
Main method to initialize taxonomy.
Parameters
----------
taxonomy_data
Incoming parsed taxonomy data
taxonomy_notation
Taxonomy lineage notation style. Can be one of
:const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None.
The 'silva' notation require `order_ranks`.
kwargs
Compatibility
|
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from pmaf.biome.essentials._metakit import EssentialFeatureMetabase
from pmaf.biome.essentials._base import EssentialBackboneBase
from pmaf.internal._constants import (
AVAIL_TAXONOMY_NOTATIONS,
jRegexGG,
jRegexQIIME,
BIOM_TAXONOMY_NAMES,
VALID_RANKS,
)
from pmaf.internal._shared import (
generate_lineages_from_taxa,
get_rank_upto,
indentify_taxon_notation,
validate_ranks,
extract_valid_ranks,
cols2ranks,
)
from collections import defaultdict
from os import path
import pandas as pd
import numpy as np
import biom
from typing import Union, Sequence, Tuple, Any, Optional
from pmaf.internal._typing import AnyGenericIdentifier, Mapper
class RepTaxonomy(EssentialBackboneBase, EssentialFeatureMetabase):
"""An `essential` class for handling taxonomy data."""
def __init__(
self,
taxonomy: Union[pd.DataFrame, pd.Series, str],
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> None:
"""Constructor for :class:`.RepTaxonomy`
Parameters
----------
taxonomy
Data containing feature taxonomy
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to :func:`~pandas.read_csv` or :mod:`biome` loader.
"""
tmp_metadata = kwargs.pop("metadata", {})
self.__avail_ranks = []
self.__internal_taxonomy = None
if isinstance(taxonomy, pd.DataFrame):
if taxonomy.shape[0] > 0:
if taxonomy.shape[1] > 1:
if validate_ranks(list(taxonomy.columns.values), VALID_RANKS):
tmp_taxonomy = taxonomy
else:
raise ValueError(
"Provided `taxonomy` Datafame has invalid ranks."
)
else:
tmp_taxonomy = taxonomy.iloc[:, 0]
else:
raise ValueError("Provided `taxonomy` Datafame is invalid.")
elif isinstance(taxonomy, pd.Series):
if taxonomy.shape[0] > 0:
tmp_taxonomy = taxonomy
else:
raise ValueError("Provided `taxonomy` Series is invalid.")
elif isinstance(taxonomy, str):
if path.isfile(taxonomy):
file_extension = path.splitext(taxonomy)[-1].lower()
if file_extension in [".csv", ".tsv"]:
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).loc[:, taxonomy_columns]
elif file_extension in [".biom", ".biome"]:
tmp_taxonomy, new_metadata = self.__load_biom(taxonomy, **kwargs)
tmp_metadata.update({"biom": new_metadata})
else:
raise NotImplementedError("File type is not supported.")
else:
raise FileNotFoundError("Provided `taxonomy` file path is invalid.")
else:
raise TypeError("Provided `taxonomy` has invalid type.")
self.__init_internal_taxonomy(tmp_taxonomy, **kwargs)
super().__init__(metadata=tmp_metadata, **kwargs)
@classmethod
def from_csv(
cls,
filepath: str,
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from CSV file.
Parameters
----------
filepath
Path to .csv File
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to the constructor.
filepath:
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(filepath, **kwargs)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(filepath, **kwargs).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(filepath, **kwargs).loc[:, taxonomy_columns]
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"filepath": path.abspath(filepath)})
return cls(taxonomy=tmp_taxonomy, metadata=tmp_metadata, **kwargs)
@classmethod
def from_biom(cls, filepath: str, **kwargs: Any) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from :mod:`biom`
file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Passed to the constructor.
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
taxonomy_frame, new_metadata = cls.__load_biom(filepath, **kwargs)
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"biom": new_metadata})
return cls(taxonomy=taxonomy_frame, metadata=tmp_metadata, **kwargs)
@classmethod
def __load_biom(cls, filepath: str, **kwargs: Any) -> Tuple[pd.DataFrame, dict]:
"""Actual private method to process :mod:`biom` file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Compatibility
"""
biom_file = biom.load_table(filepath)
if biom_file.metadata(axis="observation") is not None:
obs_data = biom_file.metadata_to_dataframe("observation")
col_names = list(obs_data.columns.values)
col_names_low = [col.lower() for col in col_names]
avail_col_names = [
colname
for tax_name in BIOM_TAXONOMY_NAMES
for colname in col_names_low
if colname[::-1].find(tax_name[::-1]) < 3
and colname[::-1].find(tax_name[::-1]) > -1
]
metadata_cols = [
col for col in col_names if col.lower() not in avail_col_names
]
if len(avail_col_names) == 1:
tmp_col_index = col_names_low.index(avail_col_names[0])
taxonomy_frame = obs_data[col_names[tmp_col_index]]
else:
taxonomy_frame = obs_data
tmp_metadata = obs_data.loc[:, metadata_cols].to_dict()
return taxonomy_frame, tmp_metadata
else:
raise ValueError("Biom file does not contain observation metadata.")
def _remove_features_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by features ids and ratify action.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
tmp_ids = np.asarray(ids, dtype=self.__internal_taxonomy.index.dtype)
if len(tmp_ids) > 0:
self.__internal_taxonomy.drop(tmp_ids, inplace=True)
return self._ratify_action("_remove_features_by_id", ids, **kwargs)
def _merge_features_by_map(
self, map_dict: Mapper, done: bool = False, **kwargs: Any
) -> Optional[Mapper]:
"""Merge features and ratify action.
Parameters
----------
map_dict
Map to use for merging
done
Whether merging was completed or not. Compatibility.
kwargs
Compatibility
"""
if not done:
raise NotImplementedError
if map_dict:
return self._ratify_action(
"_merge_features_by_map",
map_dict,
_annotations=self.__internal_taxonomy.loc[:, "lineage"].to_dict(),
**kwargs
)
def drop_feature_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by feature `ids`.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() == len(target_ids):
return self._remove_features_by_id(target_ids, **kwargs)
else:
raise ValueError("Invalid feature ids are provided.")
def get_taxonomy_by_id(
self, ids: Optional[AnyGenericIdentifier] = None
) -> pd.DataFrame:
"""Get taxonomy :class:`~pandas.DataFrame` by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
Returns
-------
class:`pandas.DataFrame` with taxonomy data
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() <= len(target_ids):
return self.__internal_taxonomy.loc[target_ids, self.__avail_ranks]
else:
raise ValueError("Invalid feature ids are provided.")
def get_lineage_by_id(
self,
ids: Optional[AnyGenericIdentifier] = None,
missing_rank: bool = False,
desired_ranks: Union[bool, Sequence[str]] = False,
drop_ranks: Union[bool, Sequence[str]] = False,
**kwargs: Any
) -> pd.Series:
"""Get taxonomy lineages by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
missing_rank
If True will generate prefix like `s__` or `d__`
desired_ranks
List of desired ranks to generate.
If False then will generate all main ranks
drop_ranks
List of ranks to drop from desired ranks.
This parameter only useful if `missing_rank` is True
kwargs
Compatibility.
Returns
-------
class:`pandas.Series` with consensus lineages and corresponding IDs
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
tmp_desired_ranks = VALID_RANKS if desired_ranks is False else desired_ranks
total_valid_rids = self.xrid.isin(target_ids).sum()
if total_valid_rids == len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[target_ids],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
elif total_valid_rids < len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[np.unique(target_ids)],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
else:
raise ValueError("Invalid feature ids are provided.")
def find_features_by_pattern(
self, pattern_str: str, case_sensitive: bool = False, regex: bool = False
) -> np.ndarray:
"""Searches for features with taxa that matches `pattern_str`
Parameters
----------
pattern_str
Pattern to search for
case_sensitive
Case sensitive mode
regex
Use regular expressions
Returns
-------
class:`~numpy.ndarray` with indices
"""
return self.__internal_taxonomy[
self.__internal_taxonomy.loc[:, "lineage"].str.contains(
pattern_str, case=case_sensitive, regex=regex
)
].index.values
def drop_features_without_taxa(
self, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features that do not contain taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ids_to_drop = self.find_features_without_taxa()
return self._remove_features_by_id(ids_to_drop, **kwargs)
def drop_features_without_ranks(
self, ranks: Sequence[str], any: bool = False, **kwargs: Any
) -> Optional[AnyGenericIdentifier]: # Done
"""Remove features that do not contain `ranks`
Parameters
----------
ranks
Ranks to look for
any
If True removes feature with single occurrence of missing rank.
If False all `ranks` must be missing.
kwargs
Compatibility
"""
target_ranks = np.asarray(ranks)
if self.__internal_taxonomy.columns.isin(target_ranks).sum() == len(
target_ranks
):
no_rank_mask = self.__internal_taxonomy.loc[:, ranks].isna()
no_rank_mask_adjusted = (
no_rank_mask.any(axis=1) if any else no_rank_mask.all(axis=1)
)
ids_to_drop = self.__internal_taxonomy.loc[no_rank_mask_adjusted].index
return self._remove_features_by_id(ids_to_drop, **kwargs)
else:
raise ValueError("Invalid ranks are provided.")
def merge_duplicated_features(self, **kwargs: Any) -> Optional[Mapper]:
"""Merge features with duplicated taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ret = {}
groupby = self.__internal_taxonomy.groupby("lineage")
if any([len(group) > 1 for group in groupby.groups.values()]):
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groupby.groups)))
for lineage, feature_ids in groupby.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
return self._merge_features_by_map(ret, True, **kwargs)
def merge_features_by_rank(self, level: str, **kwargs: Any) -> Optional[Mapper]:
"""Merge features by taxonomic rank/level.
Parameters
----------
level
Taxonomic rank/level to use for merging.
kwargs
Compatibility
"""
ret = {}
if not isinstance(level, str):
raise TypeError("`rank` must have str type.")
if level in self.__avail_ranks:
target_ranks = get_rank_upto(self.avail_ranks, level, True)
if target_ranks:
tmp_lineages = generate_lineages_from_taxa(
self.__internal_taxonomy, False, target_ranks, False
)
groups = tmp_lineages.groupby(tmp_lineages)
if len(groups.groups) > 1:
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groups.groups)))
for lineage, feature_ids in groups.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
else:
raise ValueError("Invalid rank are provided.")
return self._merge_features_by_map(ret, True, **kwargs)
def find_features_without_taxa(self) -> np.ndarray:
"""Find features without taxa.
Returns
-------
class:`~numpy.ndarray` with feature indices.
"""
return self.__internal_taxonomy.loc[
self.__internal_taxonomy.loc[:, VALID_RANKS].agg(
lambda rank: len("".join(map(lambda x: (str(x or "")), rank))), axis=1
)
< 1
].index.values
def get_subset(
self, rids: Optional[AnyGenericIdentifier] = None, *args, **kwargs: Any
) -> "RepTaxonomy":
"""Get subset of the :class:`.RepTaxonomy`.
Parameters
----------
rids
Feature identifiers.
args
Compatibility
kwargs
Compatibility
Returns
-------
class:`.RepTaxonomy`
"""
if rids is None:
target_rids = self.xrid
else:
target_rids = np.asarray(rids).astype(self.__internal_taxonomy.index.dtype)
if not self.xrid.isin(target_rids).sum() == len(target_rids):
raise ValueError("Invalid feature ids are provided.")
return type(self)(
taxonomy=self.__internal_taxonomy.loc[target_rids, "lineage"],
metadata=self.metadata,
name=self.name,
)
def _export(
self, taxlike: str = "lineage", ascending: bool = True, **kwargs: Any
) -> Tuple[pd.Series, dict]:
"""Creates taxonomy for export.
Parameters
----------
taxlike
Generate taxonomy in format(currently only `lineage` is supported.)
ascending
Sorting
kwargs
Compatibility
"""
if taxlike == "lineage":
return (
self.get_lineage_by_id(**kwargs).sort_values(ascending=ascending),
kwargs,
)
else:
raise NotImplemented
def export(
self,
output_fp: str,
*args,
_add_ext: bool = False,
sep: str = ",",
**kwargs: Any
) -> None:
"""Exports the taxonomy into the specified file.
Parameters
----------
output_fp
Export filepath
args
Compatibility
_add_ext
Add file extension or not.
sep
Delimiter
kwargs
Compatibility
"""
tmp_export, rkwarg = self._export(*args, **kwargs)
if _add_ext:
tmp_export.to_csv("{}.csv".format(output_fp), sep=sep)
else:
tmp_export.to_csv(output_fp, sep=sep)
def copy(self) -> "RepTaxonomy":
"""Copy of the instance."""
return type(self)(
taxonomy=self.__internal_taxonomy.loc[:, "lineage"],
metadata=self.metadata,
name=self.name,
)
def __fix_taxon_names(self) -> None:
"""Fix invalid taxon names."""
def taxon_fixer(taxon):
if taxon is not None and pd.notna(taxon):
tmp_taxon_trimmed = taxon.lower().strip()
if len(tmp_taxon_trimmed) > 0:
if tmp_taxon_trimmed[0] == "[":
tmp_taxon_trimmed = tmp_taxon_trimmed[1:]
if tmp_taxon_trimmed[-1] == "]":
tmp_taxon_trimmed = tmp_taxon_trimmed[:-1]
return tmp_taxon_trimmed.capitalize()
else:
return None
else:
return None
self.__internal_taxonomy.loc[:, VALID_RANKS] = self.__internal_taxonomy.loc[
:, VALID_RANKS
].applymap(taxon_fixer)
def __reconstruct_internal_lineages(self) -> None:
"""Reconstruct the internal lineages."""
self.__internal_taxonomy.loc[:, "lineage"] = generate_lineages_from_taxa(
self.__internal_taxonomy, True, self.__avail_ranks, False
)
# MASKED: __init_internal_taxonomy function (lines 590-645)
def __init_taxonomy_from_lineages(
self,
taxonomy_series: pd.Series,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done
"""Main method that produces taxonomy dataframe from lineages.
Parameters
----------
taxonomy_series
:class:`pandas.Series` with taxonomy lineages
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
"""
# Check if taxonomy is known and is available for parsing. Otherwise indentify_taxon_notation() will try to identify notation
if taxonomy_notation in AVAIL_TAXONOMY_NOTATIONS:
notation = taxonomy_notation
else:
# Get first lineage _sample for notation testing assuming the rest have the the same notations
sample_taxon = taxonomy_series.iloc[0]
# Identify notation of the lineage string
notation = indentify_taxon_notation(sample_taxon)
if order_ranks is not None:
if all([rank in VALID_RANKS for rank in order_ranks]):
target_order_ranks = order_ranks
else:
raise NotImplementedError
else:
target_order_ranks = VALID_RANKS
if notation == "greengenes":
lineages = taxonomy_series.reset_index().values.tolist()
ordered_taxa_list = []
ordered_indices_list = [elem[0] for elem in lineages]
for lineage in lineages:
tmp_lineage = jRegexGG.findall(lineage[1])
tmp_taxa_dict = {
elem[0]: elem[1] for elem in tmp_lineage if elem[0] in VALID_RANKS
}
for rank in VALID_RANKS:
if rank not in tmp_taxa_dict.keys():
tmp_taxa_dict.update({rank: None})
tmp_taxa_ordered = [tmp_taxa_dict[rank] for rank in VALID_RANKS]
ordered_taxa_list.append([None] + tmp_taxa_ordered)
taxonomy = pd.DataFrame(
index=ordered_indices_list,
data=ordered_taxa_list,
columns=["lineage"] + VALID_RANKS,
)
return taxonomy
elif notation == "qiime":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = jRegexQIIME.findall(lineage[1])
tmp_lineage.sort(key=lambda x: x[0])
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank, taxon in tmp_lineage:
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, sorted(list(tmp_ranks))]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
elif notation == "silva":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = lineage[1].split(";")
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank_i, taxon in enumerate(tmp_lineage):
rank = target_order_ranks[rank_i]
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_rank_ordered = [
rank for rank in target_order_ranks if rank in VALID_RANKS
]
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, tmp_rank_ordered]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
else:
raise NotImplementedError
def __init_taxonomy_from_frame(
self,
taxonomy_dataframe: pd.DataFrame,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done # For now only pass to _init_taxonomy_from_series
"""Main method that produces taxonomy sheet from dataframe.
Parameters
----------
taxonomy_dataframe
:class:`~pandas.DataFrame` with taxa split by ranks.
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
Returns
-------
:class:`~pandas.DataFrame`
"""
valid_ranks = extract_valid_ranks(taxonomy_dataframe.columns, VALID_RANKS)
if valid_ranks is not None:
if len(valid_ranks) > 0:
return pd.concat(
[
taxonomy_dataframe,
pd.DataFrame(
data="",
index=taxonomy_dataframe.index,
columns=[
rank for rank in VALID_RANKS if rank not in valid_ranks
],
),
],
axis=1,
)
else:
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join(taxa.values.tolist()), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
else:
valid_ranks = cols2ranks(taxonomy_dataframe.columns)
taxonomy_dataframe.columns = valid_ranks
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join([(t if isinstance(t,str) else '') for t in taxa.values]), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
@property
def avail_ranks(self) -> Sequence[str]:
"""List of available taxonomic ranks."""
return self.__avail_ranks
@property
def duplicated(self) -> pd.Index:
"""List of duplicated feature indices."""
return self.__internal_taxonomy.index[
self.__internal_taxonomy["lineage"].duplicated(keep=False)
]
@property
def data(self) -> pd.DataFrame:
"""Actual data representation as pd.DataFrame."""
return self.__internal_taxonomy
@property
def xrid(self) -> pd.Index:
"""Feature indices as pd.Index."""
return self.__internal_taxonomy.index
|
def __init_internal_taxonomy(
self,
taxonomy_data: Union[pd.Series, pd.DataFrame],
taxonomy_notation: Optional[str] = "greengenes",
order_ranks: Optional[Sequence[str]] = None,
**kwargs: Any
) -> None:
"""Main method to initialize taxonomy.
Parameters
----------
taxonomy_data
Incoming parsed taxonomy data
taxonomy_notation
Taxonomy lineage notation style. Can be one of
:const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None.
The 'silva' notation require `order_ranks`.
kwargs
Compatibility
"""
if isinstance(taxonomy_data, pd.Series):
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data, taxonomy_notation, order_ranks
)
elif isinstance(taxonomy_data, pd.DataFrame):
if taxonomy_data.shape[1] == 1:
taxonomy_data_series = pd.Series(
data=taxonomy_data.iloc[:, 0], index=taxonomy_data.index
)
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data_series, taxonomy_notation, order_ranks
)
else:
new_taxonomy = self.__init_taxonomy_from_frame(
taxonomy_data, taxonomy_notation, order_ranks
)
else:
raise RuntimeError(
"`taxonomy_data` must be either pd.Series or pd.Dataframe"
)
if new_taxonomy is None:
raise ValueError("Provided taxonomy is invalid.")
# Assign newly constructed taxonomy to the self.__internal_taxonomy
self.__internal_taxonomy = new_taxonomy
self.__fix_taxon_names() # Fix incorrect taxa
tmp_avail_ranks = [rank for rank in VALID_RANKS if rank in new_taxonomy.columns]
self.__avail_ranks = [
rank for rank in tmp_avail_ranks if new_taxonomy.loc[:, rank].notna().any()
]
# Reconstruct internal lineages for default greengenes notation
self.__reconstruct_internal_lineages()
self._init_state = True
| 590 | 645 |
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from pmaf.biome.essentials._metakit import EssentialFeatureMetabase
from pmaf.biome.essentials._base import EssentialBackboneBase
from pmaf.internal._constants import (
AVAIL_TAXONOMY_NOTATIONS,
jRegexGG,
jRegexQIIME,
BIOM_TAXONOMY_NAMES,
VALID_RANKS,
)
from pmaf.internal._shared import (
generate_lineages_from_taxa,
get_rank_upto,
indentify_taxon_notation,
validate_ranks,
extract_valid_ranks,
cols2ranks,
)
from collections import defaultdict
from os import path
import pandas as pd
import numpy as np
import biom
from typing import Union, Sequence, Tuple, Any, Optional
from pmaf.internal._typing import AnyGenericIdentifier, Mapper
class RepTaxonomy(EssentialBackboneBase, EssentialFeatureMetabase):
"""An `essential` class for handling taxonomy data."""
def __init__(
self,
taxonomy: Union[pd.DataFrame, pd.Series, str],
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> None:
"""Constructor for :class:`.RepTaxonomy`
Parameters
----------
taxonomy
Data containing feature taxonomy
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to :func:`~pandas.read_csv` or :mod:`biome` loader.
"""
tmp_metadata = kwargs.pop("metadata", {})
self.__avail_ranks = []
self.__internal_taxonomy = None
if isinstance(taxonomy, pd.DataFrame):
if taxonomy.shape[0] > 0:
if taxonomy.shape[1] > 1:
if validate_ranks(list(taxonomy.columns.values), VALID_RANKS):
tmp_taxonomy = taxonomy
else:
raise ValueError(
"Provided `taxonomy` Datafame has invalid ranks."
)
else:
tmp_taxonomy = taxonomy.iloc[:, 0]
else:
raise ValueError("Provided `taxonomy` Datafame is invalid.")
elif isinstance(taxonomy, pd.Series):
if taxonomy.shape[0] > 0:
tmp_taxonomy = taxonomy
else:
raise ValueError("Provided `taxonomy` Series is invalid.")
elif isinstance(taxonomy, str):
if path.isfile(taxonomy):
file_extension = path.splitext(taxonomy)[-1].lower()
if file_extension in [".csv", ".tsv"]:
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).loc[:, taxonomy_columns]
elif file_extension in [".biom", ".biome"]:
tmp_taxonomy, new_metadata = self.__load_biom(taxonomy, **kwargs)
tmp_metadata.update({"biom": new_metadata})
else:
raise NotImplementedError("File type is not supported.")
else:
raise FileNotFoundError("Provided `taxonomy` file path is invalid.")
else:
raise TypeError("Provided `taxonomy` has invalid type.")
self.__init_internal_taxonomy(tmp_taxonomy, **kwargs)
super().__init__(metadata=tmp_metadata, **kwargs)
@classmethod
def from_csv(
cls,
filepath: str,
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from CSV file.
Parameters
----------
filepath
Path to .csv File
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to the constructor.
filepath:
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(filepath, **kwargs)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(filepath, **kwargs).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(filepath, **kwargs).loc[:, taxonomy_columns]
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"filepath": path.abspath(filepath)})
return cls(taxonomy=tmp_taxonomy, metadata=tmp_metadata, **kwargs)
@classmethod
def from_biom(cls, filepath: str, **kwargs: Any) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from :mod:`biom`
file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Passed to the constructor.
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
taxonomy_frame, new_metadata = cls.__load_biom(filepath, **kwargs)
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"biom": new_metadata})
return cls(taxonomy=taxonomy_frame, metadata=tmp_metadata, **kwargs)
@classmethod
def __load_biom(cls, filepath: str, **kwargs: Any) -> Tuple[pd.DataFrame, dict]:
"""Actual private method to process :mod:`biom` file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Compatibility
"""
biom_file = biom.load_table(filepath)
if biom_file.metadata(axis="observation") is not None:
obs_data = biom_file.metadata_to_dataframe("observation")
col_names = list(obs_data.columns.values)
col_names_low = [col.lower() for col in col_names]
avail_col_names = [
colname
for tax_name in BIOM_TAXONOMY_NAMES
for colname in col_names_low
if colname[::-1].find(tax_name[::-1]) < 3
and colname[::-1].find(tax_name[::-1]) > -1
]
metadata_cols = [
col for col in col_names if col.lower() not in avail_col_names
]
if len(avail_col_names) == 1:
tmp_col_index = col_names_low.index(avail_col_names[0])
taxonomy_frame = obs_data[col_names[tmp_col_index]]
else:
taxonomy_frame = obs_data
tmp_metadata = obs_data.loc[:, metadata_cols].to_dict()
return taxonomy_frame, tmp_metadata
else:
raise ValueError("Biom file does not contain observation metadata.")
def _remove_features_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by features ids and ratify action.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
tmp_ids = np.asarray(ids, dtype=self.__internal_taxonomy.index.dtype)
if len(tmp_ids) > 0:
self.__internal_taxonomy.drop(tmp_ids, inplace=True)
return self._ratify_action("_remove_features_by_id", ids, **kwargs)
def _merge_features_by_map(
self, map_dict: Mapper, done: bool = False, **kwargs: Any
) -> Optional[Mapper]:
"""Merge features and ratify action.
Parameters
----------
map_dict
Map to use for merging
done
Whether merging was completed or not. Compatibility.
kwargs
Compatibility
"""
if not done:
raise NotImplementedError
if map_dict:
return self._ratify_action(
"_merge_features_by_map",
map_dict,
_annotations=self.__internal_taxonomy.loc[:, "lineage"].to_dict(),
**kwargs
)
def drop_feature_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by feature `ids`.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() == len(target_ids):
return self._remove_features_by_id(target_ids, **kwargs)
else:
raise ValueError("Invalid feature ids are provided.")
def get_taxonomy_by_id(
self, ids: Optional[AnyGenericIdentifier] = None
) -> pd.DataFrame:
"""Get taxonomy :class:`~pandas.DataFrame` by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
Returns
-------
class:`pandas.DataFrame` with taxonomy data
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() <= len(target_ids):
return self.__internal_taxonomy.loc[target_ids, self.__avail_ranks]
else:
raise ValueError("Invalid feature ids are provided.")
def get_lineage_by_id(
self,
ids: Optional[AnyGenericIdentifier] = None,
missing_rank: bool = False,
desired_ranks: Union[bool, Sequence[str]] = False,
drop_ranks: Union[bool, Sequence[str]] = False,
**kwargs: Any
) -> pd.Series:
"""Get taxonomy lineages by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
missing_rank
If True will generate prefix like `s__` or `d__`
desired_ranks
List of desired ranks to generate.
If False then will generate all main ranks
drop_ranks
List of ranks to drop from desired ranks.
This parameter only useful if `missing_rank` is True
kwargs
Compatibility.
Returns
-------
class:`pandas.Series` with consensus lineages and corresponding IDs
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
tmp_desired_ranks = VALID_RANKS if desired_ranks is False else desired_ranks
total_valid_rids = self.xrid.isin(target_ids).sum()
if total_valid_rids == len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[target_ids],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
elif total_valid_rids < len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[np.unique(target_ids)],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
else:
raise ValueError("Invalid feature ids are provided.")
def find_features_by_pattern(
self, pattern_str: str, case_sensitive: bool = False, regex: bool = False
) -> np.ndarray:
"""Searches for features with taxa that matches `pattern_str`
Parameters
----------
pattern_str
Pattern to search for
case_sensitive
Case sensitive mode
regex
Use regular expressions
Returns
-------
class:`~numpy.ndarray` with indices
"""
return self.__internal_taxonomy[
self.__internal_taxonomy.loc[:, "lineage"].str.contains(
pattern_str, case=case_sensitive, regex=regex
)
].index.values
def drop_features_without_taxa(
self, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features that do not contain taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ids_to_drop = self.find_features_without_taxa()
return self._remove_features_by_id(ids_to_drop, **kwargs)
def drop_features_without_ranks(
self, ranks: Sequence[str], any: bool = False, **kwargs: Any
) -> Optional[AnyGenericIdentifier]: # Done
"""Remove features that do not contain `ranks`
Parameters
----------
ranks
Ranks to look for
any
If True removes feature with single occurrence of missing rank.
If False all `ranks` must be missing.
kwargs
Compatibility
"""
target_ranks = np.asarray(ranks)
if self.__internal_taxonomy.columns.isin(target_ranks).sum() == len(
target_ranks
):
no_rank_mask = self.__internal_taxonomy.loc[:, ranks].isna()
no_rank_mask_adjusted = (
no_rank_mask.any(axis=1) if any else no_rank_mask.all(axis=1)
)
ids_to_drop = self.__internal_taxonomy.loc[no_rank_mask_adjusted].index
return self._remove_features_by_id(ids_to_drop, **kwargs)
else:
raise ValueError("Invalid ranks are provided.")
def merge_duplicated_features(self, **kwargs: Any) -> Optional[Mapper]:
"""Merge features with duplicated taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ret = {}
groupby = self.__internal_taxonomy.groupby("lineage")
if any([len(group) > 1 for group in groupby.groups.values()]):
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groupby.groups)))
for lineage, feature_ids in groupby.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
return self._merge_features_by_map(ret, True, **kwargs)
def merge_features_by_rank(self, level: str, **kwargs: Any) -> Optional[Mapper]:
"""Merge features by taxonomic rank/level.
Parameters
----------
level
Taxonomic rank/level to use for merging.
kwargs
Compatibility
"""
ret = {}
if not isinstance(level, str):
raise TypeError("`rank` must have str type.")
if level in self.__avail_ranks:
target_ranks = get_rank_upto(self.avail_ranks, level, True)
if target_ranks:
tmp_lineages = generate_lineages_from_taxa(
self.__internal_taxonomy, False, target_ranks, False
)
groups = tmp_lineages.groupby(tmp_lineages)
if len(groups.groups) > 1:
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groups.groups)))
for lineage, feature_ids in groups.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
else:
raise ValueError("Invalid rank are provided.")
return self._merge_features_by_map(ret, True, **kwargs)
def find_features_without_taxa(self) -> np.ndarray:
"""Find features without taxa.
Returns
-------
class:`~numpy.ndarray` with feature indices.
"""
return self.__internal_taxonomy.loc[
self.__internal_taxonomy.loc[:, VALID_RANKS].agg(
lambda rank: len("".join(map(lambda x: (str(x or "")), rank))), axis=1
)
< 1
].index.values
def get_subset(
self, rids: Optional[AnyGenericIdentifier] = None, *args, **kwargs: Any
) -> "RepTaxonomy":
"""Get subset of the :class:`.RepTaxonomy`.
Parameters
----------
rids
Feature identifiers.
args
Compatibility
kwargs
Compatibility
Returns
-------
class:`.RepTaxonomy`
"""
if rids is None:
target_rids = self.xrid
else:
target_rids = np.asarray(rids).astype(self.__internal_taxonomy.index.dtype)
if not self.xrid.isin(target_rids).sum() == len(target_rids):
raise ValueError("Invalid feature ids are provided.")
return type(self)(
taxonomy=self.__internal_taxonomy.loc[target_rids, "lineage"],
metadata=self.metadata,
name=self.name,
)
def _export(
self, taxlike: str = "lineage", ascending: bool = True, **kwargs: Any
) -> Tuple[pd.Series, dict]:
"""Creates taxonomy for export.
Parameters
----------
taxlike
Generate taxonomy in format(currently only `lineage` is supported.)
ascending
Sorting
kwargs
Compatibility
"""
if taxlike == "lineage":
return (
self.get_lineage_by_id(**kwargs).sort_values(ascending=ascending),
kwargs,
)
else:
raise NotImplemented
def export(
self,
output_fp: str,
*args,
_add_ext: bool = False,
sep: str = ",",
**kwargs: Any
) -> None:
"""Exports the taxonomy into the specified file.
Parameters
----------
output_fp
Export filepath
args
Compatibility
_add_ext
Add file extension or not.
sep
Delimiter
kwargs
Compatibility
"""
tmp_export, rkwarg = self._export(*args, **kwargs)
if _add_ext:
tmp_export.to_csv("{}.csv".format(output_fp), sep=sep)
else:
tmp_export.to_csv(output_fp, sep=sep)
def copy(self) -> "RepTaxonomy":
"""Copy of the instance."""
return type(self)(
taxonomy=self.__internal_taxonomy.loc[:, "lineage"],
metadata=self.metadata,
name=self.name,
)
def __fix_taxon_names(self) -> None:
"""Fix invalid taxon names."""
def taxon_fixer(taxon):
if taxon is not None and pd.notna(taxon):
tmp_taxon_trimmed = taxon.lower().strip()
if len(tmp_taxon_trimmed) > 0:
if tmp_taxon_trimmed[0] == "[":
tmp_taxon_trimmed = tmp_taxon_trimmed[1:]
if tmp_taxon_trimmed[-1] == "]":
tmp_taxon_trimmed = tmp_taxon_trimmed[:-1]
return tmp_taxon_trimmed.capitalize()
else:
return None
else:
return None
self.__internal_taxonomy.loc[:, VALID_RANKS] = self.__internal_taxonomy.loc[
:, VALID_RANKS
].applymap(taxon_fixer)
def __reconstruct_internal_lineages(self) -> None:
"""Reconstruct the internal lineages."""
self.__internal_taxonomy.loc[:, "lineage"] = generate_lineages_from_taxa(
self.__internal_taxonomy, True, self.__avail_ranks, False
)
def __init_internal_taxonomy(
self,
taxonomy_data: Union[pd.Series, pd.DataFrame],
taxonomy_notation: Optional[str] = "greengenes",
order_ranks: Optional[Sequence[str]] = None,
**kwargs: Any
) -> None:
"""Main method to initialize taxonomy.
Parameters
----------
taxonomy_data
Incoming parsed taxonomy data
taxonomy_notation
Taxonomy lineage notation style. Can be one of
:const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None.
The 'silva' notation require `order_ranks`.
kwargs
Compatibility
"""
if isinstance(taxonomy_data, pd.Series):
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data, taxonomy_notation, order_ranks
)
elif isinstance(taxonomy_data, pd.DataFrame):
if taxonomy_data.shape[1] == 1:
taxonomy_data_series = pd.Series(
data=taxonomy_data.iloc[:, 0], index=taxonomy_data.index
)
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data_series, taxonomy_notation, order_ranks
)
else:
new_taxonomy = self.__init_taxonomy_from_frame(
taxonomy_data, taxonomy_notation, order_ranks
)
else:
raise RuntimeError(
"`taxonomy_data` must be either pd.Series or pd.Dataframe"
)
if new_taxonomy is None:
raise ValueError("Provided taxonomy is invalid.")
# Assign newly constructed taxonomy to the self.__internal_taxonomy
self.__internal_taxonomy = new_taxonomy
self.__fix_taxon_names() # Fix incorrect taxa
tmp_avail_ranks = [rank for rank in VALID_RANKS if rank in new_taxonomy.columns]
self.__avail_ranks = [
rank for rank in tmp_avail_ranks if new_taxonomy.loc[:, rank].notna().any()
]
# Reconstruct internal lineages for default greengenes notation
self.__reconstruct_internal_lineages()
self._init_state = True
def __init_taxonomy_from_lineages(
self,
taxonomy_series: pd.Series,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done
"""Main method that produces taxonomy dataframe from lineages.
Parameters
----------
taxonomy_series
:class:`pandas.Series` with taxonomy lineages
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
"""
# Check if taxonomy is known and is available for parsing. Otherwise indentify_taxon_notation() will try to identify notation
if taxonomy_notation in AVAIL_TAXONOMY_NOTATIONS:
notation = taxonomy_notation
else:
# Get first lineage _sample for notation testing assuming the rest have the the same notations
sample_taxon = taxonomy_series.iloc[0]
# Identify notation of the lineage string
notation = indentify_taxon_notation(sample_taxon)
if order_ranks is not None:
if all([rank in VALID_RANKS for rank in order_ranks]):
target_order_ranks = order_ranks
else:
raise NotImplementedError
else:
target_order_ranks = VALID_RANKS
if notation == "greengenes":
lineages = taxonomy_series.reset_index().values.tolist()
ordered_taxa_list = []
ordered_indices_list = [elem[0] for elem in lineages]
for lineage in lineages:
tmp_lineage = jRegexGG.findall(lineage[1])
tmp_taxa_dict = {
elem[0]: elem[1] for elem in tmp_lineage if elem[0] in VALID_RANKS
}
for rank in VALID_RANKS:
if rank not in tmp_taxa_dict.keys():
tmp_taxa_dict.update({rank: None})
tmp_taxa_ordered = [tmp_taxa_dict[rank] for rank in VALID_RANKS]
ordered_taxa_list.append([None] + tmp_taxa_ordered)
taxonomy = pd.DataFrame(
index=ordered_indices_list,
data=ordered_taxa_list,
columns=["lineage"] + VALID_RANKS,
)
return taxonomy
elif notation == "qiime":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = jRegexQIIME.findall(lineage[1])
tmp_lineage.sort(key=lambda x: x[0])
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank, taxon in tmp_lineage:
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, sorted(list(tmp_ranks))]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
elif notation == "silva":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = lineage[1].split(";")
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank_i, taxon in enumerate(tmp_lineage):
rank = target_order_ranks[rank_i]
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_rank_ordered = [
rank for rank in target_order_ranks if rank in VALID_RANKS
]
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, tmp_rank_ordered]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
else:
raise NotImplementedError
def __init_taxonomy_from_frame(
self,
taxonomy_dataframe: pd.DataFrame,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done # For now only pass to _init_taxonomy_from_series
"""Main method that produces taxonomy sheet from dataframe.
Parameters
----------
taxonomy_dataframe
:class:`~pandas.DataFrame` with taxa split by ranks.
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
Returns
-------
:class:`~pandas.DataFrame`
"""
valid_ranks = extract_valid_ranks(taxonomy_dataframe.columns, VALID_RANKS)
if valid_ranks is not None:
if len(valid_ranks) > 0:
return pd.concat(
[
taxonomy_dataframe,
pd.DataFrame(
data="",
index=taxonomy_dataframe.index,
columns=[
rank for rank in VALID_RANKS if rank not in valid_ranks
],
),
],
axis=1,
)
else:
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join(taxa.values.tolist()), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
else:
valid_ranks = cols2ranks(taxonomy_dataframe.columns)
taxonomy_dataframe.columns = valid_ranks
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join([(t if isinstance(t,str) else '') for t in taxa.values]), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
@property
def avail_ranks(self) -> Sequence[str]:
"""List of available taxonomic ranks."""
return self.__avail_ranks
@property
def duplicated(self) -> pd.Index:
"""List of duplicated feature indices."""
return self.__internal_taxonomy.index[
self.__internal_taxonomy["lineage"].duplicated(keep=False)
]
@property
def data(self) -> pd.DataFrame:
"""Actual data representation as pd.DataFrame."""
return self.__internal_taxonomy
@property
def xrid(self) -> pd.Index:
"""Feature indices as pd.Index."""
return self.__internal_taxonomy.index
|
get_share
|
Represents a share on the Data Box Edge/Gateway device.
:param str device_name: The device name.
:param str name: The share name.
:param str resource_group_name: The resource group name.
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetShareResult',
'AwaitableGetShareResult',
'get_share',
]
@pulumi.output_type
class GetShareResult:
"""
Represents a share on the Data Box Edge/Gateway device.
"""
def __init__(__self__, access_protocol=None, azure_container_info=None, client_access_rights=None, data_policy=None, description=None, id=None, monitoring_status=None, name=None, refresh_details=None, share_mappings=None, share_status=None, system_data=None, type=None, user_access_rights=None):
if access_protocol and not isinstance(access_protocol, str):
raise TypeError("Expected argument 'access_protocol' to be a str")
pulumi.set(__self__, "access_protocol", access_protocol)
if azure_container_info and not isinstance(azure_container_info, dict):
raise TypeError("Expected argument 'azure_container_info' to be a dict")
pulumi.set(__self__, "azure_container_info", azure_container_info)
if client_access_rights and not isinstance(client_access_rights, list):
raise TypeError("Expected argument 'client_access_rights' to be a list")
pulumi.set(__self__, "client_access_rights", client_access_rights)
if data_policy and not isinstance(data_policy, str):
raise TypeError("Expected argument 'data_policy' to be a str")
pulumi.set(__self__, "data_policy", data_policy)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if monitoring_status and not isinstance(monitoring_status, str):
raise TypeError("Expected argument 'monitoring_status' to be a str")
pulumi.set(__self__, "monitoring_status", monitoring_status)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if refresh_details and not isinstance(refresh_details, dict):
raise TypeError("Expected argument 'refresh_details' to be a dict")
pulumi.set(__self__, "refresh_details", refresh_details)
if share_mappings and not isinstance(share_mappings, list):
raise TypeError("Expected argument 'share_mappings' to be a list")
pulumi.set(__self__, "share_mappings", share_mappings)
if share_status and not isinstance(share_status, str):
raise TypeError("Expected argument 'share_status' to be a str")
pulumi.set(__self__, "share_status", share_status)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if user_access_rights and not isinstance(user_access_rights, list):
raise TypeError("Expected argument 'user_access_rights' to be a list")
pulumi.set(__self__, "user_access_rights", user_access_rights)
@property
@pulumi.getter(name="accessProtocol")
def access_protocol(self) -> str:
"""
Access protocol to be used by the share.
"""
return pulumi.get(self, "access_protocol")
@property
@pulumi.getter(name="azureContainerInfo")
def azure_container_info(self) -> Optional['outputs.AzureContainerInfoResponse']:
"""
Azure container mapping for the share.
"""
return pulumi.get(self, "azure_container_info")
@property
@pulumi.getter(name="clientAccessRights")
def client_access_rights(self) -> Optional[Sequence['outputs.ClientAccessRightResponse']]:
"""
List of IP addresses and corresponding access rights on the share(required for NFS protocol).
"""
return pulumi.get(self, "client_access_rights")
@property
@pulumi.getter(name="dataPolicy")
def data_policy(self) -> Optional[str]:
"""
Data policy of the share.
"""
return pulumi.get(self, "data_policy")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Description for the share.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
"""
The path ID that uniquely identifies the object.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="monitoringStatus")
def monitoring_status(self) -> str:
"""
Current monitoring status of the share.
"""
return pulumi.get(self, "monitoring_status")
@property
@pulumi.getter
def name(self) -> str:
"""
The object name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="refreshDetails")
def refresh_details(self) -> Optional['outputs.RefreshDetailsResponse']:
"""
Details of the refresh job on this share.
"""
return pulumi.get(self, "refresh_details")
@property
@pulumi.getter(name="shareMappings")
def share_mappings(self) -> Sequence['outputs.MountPointMapResponse']:
"""
Share mount point to the role.
"""
return pulumi.get(self, "share_mappings")
@property
@pulumi.getter(name="shareStatus")
def share_status(self) -> str:
"""
Current status of the share.
"""
return pulumi.get(self, "share_status")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Share on ASE device
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The hierarchical type of the object.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userAccessRights")
def user_access_rights(self) -> Optional[Sequence['outputs.UserAccessRightResponse']]:
"""
Mapping of users and corresponding access rights on the share (required for SMB protocol).
"""
return pulumi.get(self, "user_access_rights")
class AwaitableGetShareResult(GetShareResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetShareResult(
access_protocol=self.access_protocol,
azure_container_info=self.azure_container_info,
client_access_rights=self.client_access_rights,
data_policy=self.data_policy,
description=self.description,
id=self.id,
monitoring_status=self.monitoring_status,
name=self.name,
refresh_details=self.refresh_details,
share_mappings=self.share_mappings,
share_status=self.share_status,
system_data=self.system_data,
type=self.type,
user_access_rights=self.user_access_rights)
# MASKED: get_share function (lines 202-238)
|
def get_share(device_name: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetShareResult:
"""
Represents a share on the Data Box Edge/Gateway device.
:param str device_name: The device name.
:param str name: The share name.
:param str resource_group_name: The resource group name.
"""
__args__ = dict()
__args__['deviceName'] = device_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:databoxedge/v20201201:getShare', __args__, opts=opts, typ=GetShareResult).value
return AwaitableGetShareResult(
access_protocol=__ret__.access_protocol,
azure_container_info=__ret__.azure_container_info,
client_access_rights=__ret__.client_access_rights,
data_policy=__ret__.data_policy,
description=__ret__.description,
id=__ret__.id,
monitoring_status=__ret__.monitoring_status,
name=__ret__.name,
refresh_details=__ret__.refresh_details,
share_mappings=__ret__.share_mappings,
share_status=__ret__.share_status,
system_data=__ret__.system_data,
type=__ret__.type,
user_access_rights=__ret__.user_access_rights)
| 202 | 238 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetShareResult',
'AwaitableGetShareResult',
'get_share',
]
@pulumi.output_type
class GetShareResult:
"""
Represents a share on the Data Box Edge/Gateway device.
"""
def __init__(__self__, access_protocol=None, azure_container_info=None, client_access_rights=None, data_policy=None, description=None, id=None, monitoring_status=None, name=None, refresh_details=None, share_mappings=None, share_status=None, system_data=None, type=None, user_access_rights=None):
if access_protocol and not isinstance(access_protocol, str):
raise TypeError("Expected argument 'access_protocol' to be a str")
pulumi.set(__self__, "access_protocol", access_protocol)
if azure_container_info and not isinstance(azure_container_info, dict):
raise TypeError("Expected argument 'azure_container_info' to be a dict")
pulumi.set(__self__, "azure_container_info", azure_container_info)
if client_access_rights and not isinstance(client_access_rights, list):
raise TypeError("Expected argument 'client_access_rights' to be a list")
pulumi.set(__self__, "client_access_rights", client_access_rights)
if data_policy and not isinstance(data_policy, str):
raise TypeError("Expected argument 'data_policy' to be a str")
pulumi.set(__self__, "data_policy", data_policy)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if monitoring_status and not isinstance(monitoring_status, str):
raise TypeError("Expected argument 'monitoring_status' to be a str")
pulumi.set(__self__, "monitoring_status", monitoring_status)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if refresh_details and not isinstance(refresh_details, dict):
raise TypeError("Expected argument 'refresh_details' to be a dict")
pulumi.set(__self__, "refresh_details", refresh_details)
if share_mappings and not isinstance(share_mappings, list):
raise TypeError("Expected argument 'share_mappings' to be a list")
pulumi.set(__self__, "share_mappings", share_mappings)
if share_status and not isinstance(share_status, str):
raise TypeError("Expected argument 'share_status' to be a str")
pulumi.set(__self__, "share_status", share_status)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if user_access_rights and not isinstance(user_access_rights, list):
raise TypeError("Expected argument 'user_access_rights' to be a list")
pulumi.set(__self__, "user_access_rights", user_access_rights)
@property
@pulumi.getter(name="accessProtocol")
def access_protocol(self) -> str:
"""
Access protocol to be used by the share.
"""
return pulumi.get(self, "access_protocol")
@property
@pulumi.getter(name="azureContainerInfo")
def azure_container_info(self) -> Optional['outputs.AzureContainerInfoResponse']:
"""
Azure container mapping for the share.
"""
return pulumi.get(self, "azure_container_info")
@property
@pulumi.getter(name="clientAccessRights")
def client_access_rights(self) -> Optional[Sequence['outputs.ClientAccessRightResponse']]:
"""
List of IP addresses and corresponding access rights on the share(required for NFS protocol).
"""
return pulumi.get(self, "client_access_rights")
@property
@pulumi.getter(name="dataPolicy")
def data_policy(self) -> Optional[str]:
"""
Data policy of the share.
"""
return pulumi.get(self, "data_policy")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Description for the share.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
"""
The path ID that uniquely identifies the object.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="monitoringStatus")
def monitoring_status(self) -> str:
"""
Current monitoring status of the share.
"""
return pulumi.get(self, "monitoring_status")
@property
@pulumi.getter
def name(self) -> str:
"""
The object name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="refreshDetails")
def refresh_details(self) -> Optional['outputs.RefreshDetailsResponse']:
"""
Details of the refresh job on this share.
"""
return pulumi.get(self, "refresh_details")
@property
@pulumi.getter(name="shareMappings")
def share_mappings(self) -> Sequence['outputs.MountPointMapResponse']:
"""
Share mount point to the role.
"""
return pulumi.get(self, "share_mappings")
@property
@pulumi.getter(name="shareStatus")
def share_status(self) -> str:
"""
Current status of the share.
"""
return pulumi.get(self, "share_status")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Share on ASE device
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The hierarchical type of the object.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userAccessRights")
def user_access_rights(self) -> Optional[Sequence['outputs.UserAccessRightResponse']]:
"""
Mapping of users and corresponding access rights on the share (required for SMB protocol).
"""
return pulumi.get(self, "user_access_rights")
class AwaitableGetShareResult(GetShareResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetShareResult(
access_protocol=self.access_protocol,
azure_container_info=self.azure_container_info,
client_access_rights=self.client_access_rights,
data_policy=self.data_policy,
description=self.description,
id=self.id,
monitoring_status=self.monitoring_status,
name=self.name,
refresh_details=self.refresh_details,
share_mappings=self.share_mappings,
share_status=self.share_status,
system_data=self.system_data,
type=self.type,
user_access_rights=self.user_access_rights)
def get_share(device_name: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetShareResult:
"""
Represents a share on the Data Box Edge/Gateway device.
:param str device_name: The device name.
:param str name: The share name.
:param str resource_group_name: The resource group name.
"""
__args__ = dict()
__args__['deviceName'] = device_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:databoxedge/v20201201:getShare', __args__, opts=opts, typ=GetShareResult).value
return AwaitableGetShareResult(
access_protocol=__ret__.access_protocol,
azure_container_info=__ret__.azure_container_info,
client_access_rights=__ret__.client_access_rights,
data_policy=__ret__.data_policy,
description=__ret__.description,
id=__ret__.id,
monitoring_status=__ret__.monitoring_status,
name=__ret__.name,
refresh_details=__ret__.refresh_details,
share_mappings=__ret__.share_mappings,
share_status=__ret__.share_status,
system_data=__ret__.system_data,
type=__ret__.type,
user_access_rights=__ret__.user_access_rights)
|
get
|
Get an existing DataCollectionRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['DataCollectionRule']
class DataCollectionRule(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
data_collection_rule_name: Optional[pulumi.Input[str]] = None,
data_flows: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DataFlowArgs']]]]] = None,
data_sources: Optional[pulumi.Input[pulumi.InputType['DataCollectionRuleDataSourcesArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
destinations: Optional[pulumi.Input[pulumi.InputType['DataCollectionRuleDestinationsArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Definition of ARM tracked top level resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] data_collection_rule_name: The name of the data collection rule. The name is case insensitive.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DataFlowArgs']]]] data_flows: The specification of data flows.
:param pulumi.Input[pulumi.InputType['DataCollectionRuleDataSourcesArgs']] data_sources: The specification of data sources.
This property is optional and can be omitted if the rule is meant to be used via direct calls to the provisioned endpoint.
:param pulumi.Input[str] description: Description of the data collection rule.
:param pulumi.Input[pulumi.InputType['DataCollectionRuleDestinationsArgs']] destinations: The specification of destinations.
:param pulumi.Input[str] location: The geo-location where the resource lives.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['data_collection_rule_name'] = data_collection_rule_name
if data_flows is None and not opts.urn:
raise TypeError("Missing required property 'data_flows'")
__props__['data_flows'] = data_flows
__props__['data_sources'] = data_sources
__props__['description'] = description
if destinations is None and not opts.urn:
raise TypeError("Missing required property 'destinations'")
__props__['destinations'] = destinations
__props__['location'] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['etag'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:insights/v20191101preview:DataCollectionRule"), pulumi.Alias(type_="azure-native:insights:DataCollectionRule"), pulumi.Alias(type_="azure-nextgen:insights:DataCollectionRule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DataCollectionRule, __self__).__init__(
'azure-native:insights/v20191101preview:DataCollectionRule',
resource_name,
__props__,
opts)
# MASKED: get function (lines 90-116)
@property
@pulumi.getter(name="dataFlows")
def data_flows(self) -> pulumi.Output[Sequence['outputs.DataFlowResponse']]:
"""
The specification of data flows.
"""
return pulumi.get(self, "data_flows")
@property
@pulumi.getter(name="dataSources")
def data_sources(self) -> pulumi.Output[Optional['outputs.DataCollectionRuleResponseDataSources']]:
"""
The specification of data sources.
This property is optional and can be omitted if the rule is meant to be used via direct calls to the provisioned endpoint.
"""
return pulumi.get(self, "data_sources")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of the data collection rule.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def destinations(self) -> pulumi.Output['outputs.DataCollectionRuleResponseDestinations']:
"""
The specification of destinations.
"""
return pulumi.get(self, "destinations")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
Resource entity tag (ETag).
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The geo-location where the resource lives.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The resource provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'DataCollectionRule':
"""
Get an existing DataCollectionRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["data_flows"] = None
__props__["data_sources"] = None
__props__["description"] = None
__props__["destinations"] = None
__props__["etag"] = None
__props__["location"] = None
__props__["name"] = None
__props__["provisioning_state"] = None
__props__["tags"] = None
__props__["type"] = None
return DataCollectionRule(resource_name, opts=opts, __props__=__props__)
| 90 | 116 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['DataCollectionRule']
class DataCollectionRule(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
data_collection_rule_name: Optional[pulumi.Input[str]] = None,
data_flows: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DataFlowArgs']]]]] = None,
data_sources: Optional[pulumi.Input[pulumi.InputType['DataCollectionRuleDataSourcesArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
destinations: Optional[pulumi.Input[pulumi.InputType['DataCollectionRuleDestinationsArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Definition of ARM tracked top level resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] data_collection_rule_name: The name of the data collection rule. The name is case insensitive.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DataFlowArgs']]]] data_flows: The specification of data flows.
:param pulumi.Input[pulumi.InputType['DataCollectionRuleDataSourcesArgs']] data_sources: The specification of data sources.
This property is optional and can be omitted if the rule is meant to be used via direct calls to the provisioned endpoint.
:param pulumi.Input[str] description: Description of the data collection rule.
:param pulumi.Input[pulumi.InputType['DataCollectionRuleDestinationsArgs']] destinations: The specification of destinations.
:param pulumi.Input[str] location: The geo-location where the resource lives.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['data_collection_rule_name'] = data_collection_rule_name
if data_flows is None and not opts.urn:
raise TypeError("Missing required property 'data_flows'")
__props__['data_flows'] = data_flows
__props__['data_sources'] = data_sources
__props__['description'] = description
if destinations is None and not opts.urn:
raise TypeError("Missing required property 'destinations'")
__props__['destinations'] = destinations
__props__['location'] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['etag'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:insights/v20191101preview:DataCollectionRule"), pulumi.Alias(type_="azure-native:insights:DataCollectionRule"), pulumi.Alias(type_="azure-nextgen:insights:DataCollectionRule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DataCollectionRule, __self__).__init__(
'azure-native:insights/v20191101preview:DataCollectionRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'DataCollectionRule':
"""
Get an existing DataCollectionRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["data_flows"] = None
__props__["data_sources"] = None
__props__["description"] = None
__props__["destinations"] = None
__props__["etag"] = None
__props__["location"] = None
__props__["name"] = None
__props__["provisioning_state"] = None
__props__["tags"] = None
__props__["type"] = None
return DataCollectionRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="dataFlows")
def data_flows(self) -> pulumi.Output[Sequence['outputs.DataFlowResponse']]:
"""
The specification of data flows.
"""
return pulumi.get(self, "data_flows")
@property
@pulumi.getter(name="dataSources")
def data_sources(self) -> pulumi.Output[Optional['outputs.DataCollectionRuleResponseDataSources']]:
"""
The specification of data sources.
This property is optional and can be omitted if the rule is meant to be used via direct calls to the provisioned endpoint.
"""
return pulumi.get(self, "data_sources")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of the data collection rule.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def destinations(self) -> pulumi.Output['outputs.DataCollectionRuleResponseDestinations']:
"""
The specification of destinations.
"""
return pulumi.get(self, "destinations")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
Resource entity tag (ETag).
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The geo-location where the resource lives.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The resource provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
get_zones
|
Method to fetch zones for given region or all the regions if none specified.
Args:
region (str): Name of region to get zones of.
Returns:
zones (obj): Map of zone -> subnet
|
#!/usr/bin/env python
#
# Copyright 2019 YugaByte, Inc. and Contributors
#
# Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt
import boto3
import json
import logging
import os
import re
from ipaddr import IPNetwork
from ybops.utils import get_or_create, get_and_cleanup
from ybops.common.exceptions import YBOpsRuntimeError
from ybops.cloud.common.utils import request_retry_decorator
RESOURCE_PREFIX_FORMAT = "yb-{}"
IGW_CIDR = "0.0.0.0/0"
SUBNET_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT
IGW_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-igw"
ROUTE_TABLE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-rt"
SG_YUGABYTE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-sg"
PEER_CONN_FORMAT = "yb-peer-conn-{}-to-{}"
class AwsBootstrapRegion():
def __init__(self, region, metadata, region_cidrs):
self.region = region
self.metadata = metadata
self.region_cidrs = region_cidrs
self.client = get_client(self.region)
# Outputs.
self.vpc = None
self.igw = None
self.peer_vpc = None
self.sg_yugabyte = None
self.subnets = []
self.route_table = None
def bootstrap(self):
self.setup_vpc()
self.setup_igw()
self.setup_subnets()
self.setup_yugabyte_sg()
self.setup_rt()
def setup_vpc(self):
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(self.region)
vpc = create_vpc(client=self.client, tag_name=vpc_region_tag,
cidr=get_region_cidr(self.metadata, self.region))
vpc.wait_until_available()
self.vpc = vpc
def setup_igw(self):
igw_tag = IGW_PREFIX_FORMAT.format(self.region)
igw = create_igw(client=self.client, tag_name=igw_tag, vpc=self.vpc)
self.igw = igw
def setup_subnets(self):
zones = get_zones(self.region)
subnets = {}
for zone_index, zone in enumerate(sorted(zones.keys())):
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
zone_cidr = self.metadata["zone_cidr_format"].format(
get_cidr_prefix(self.metadata, self.region), (zone_index + 1) * 16)
subnet = create_subnet(self.client, self.vpc, zone, zone_cidr, vpc_zone_tag)
subnets[zone] = subnet
self.subnets = subnets
def setup_yugabyte_sg(self):
sg_group_name = get_yb_sg_name(self.region)
rules = list(self.metadata["sg_rules"])
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
sg = create_security_group(client=self.client, group_name=sg_group_name,
description="YugaByte SG", vpc=self.vpc,
rules=rules)
self.sg_yugabyte = sg
def setup_rt(self):
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(self.region)
route_table = create_route_table(client=self.client, tag_name=route_table_tag,
vpc=self.vpc)
# TODO: handle private/public case at somepoint, also NAT.
add_route_to_rt(route_table, IGW_CIDR, "GatewayId", self.igw.id)
current_associated_subnet_ids = [assoc.subnet_id for assoc in route_table.associations]
missing_ids = [subnet.id for subnet in self.subnets.values()
if subnet.id not in current_associated_subnet_ids]
for subnet_id in missing_ids:
route_table.associate_with_subnet(SubnetId=subnet_id)
self.route_table = route_table
def add_sg_ingress_to_sg(self, incoming_sg, target_sg):
current_sg_ids = set([pair["GroupId"]
for perm in target_sg.ip_permissions
for pair in perm["UserIdGroupPairs"]])
if incoming_sg.id not in current_sg_ids:
target_sg.authorize_ingress(
IpPermissions=[{
"IpProtocol": "-1",
"UserIdGroupPairs": [{"GroupId": incoming_sg.id}]}])
def add_route_to_rt(route_table, cidr, target_type, target_id):
kwargs = {target_type: target_id}
route = get_route_by_cidr(route_table, cidr)
if route is None:
route_table.create_route(DestinationCidrBlock=cidr, **kwargs)
elif getattr(route, dumb_camel_to_snake(target_type)) != target_id:
route.replace(**kwargs)
def add_cidr_to_rules(rules, cidr):
rule_block = {
"ip_protocol": "-1",
"from_port": 0,
"to_port": 65535,
"cidr_ip": cidr
}
rules.append(rule_block)
def get_cidr_prefix(metadata, region):
return metadata["regions"][region]["cidr_prefix"]
def get_region_cidr(metadata, region):
return metadata["region_cidr_format"].format(get_cidr_prefix(metadata, region))
def get_region_cidrs(metadata):
return dict([(r, get_region_cidr(metadata, r)) for r in metadata["regions"].keys()])
def dumb_camel_to_snake(s):
return re.sub("([A-Z])", "_\\1", s).lower()[1:]
class YbVpcComponents:
def __init__(self):
self.region = None
self.vpc = None
self.sg_yugabyte = None
self.customer_sgs = None
self.route_table = None
self.subnets = None
@staticmethod
def from_pieces(region, vpc_id, sg_id, rt_id, az_to_subnet_ids):
c = YbVpcComponents()
c.region = region
client = get_client(region)
c.vpc = client.Vpc(vpc_id)
c.sg_yugabyte = client.SecurityGroup(sg_id)
c.route_table = client.RouteTable(rt_id)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
@staticmethod
def from_user_json(region, per_region_meta):
c = YbVpcComponents()
c.region = region
client = get_client(region)
vpc_id = per_region_meta.get("vpcId")
if vpc_id:
c.vpc = client.Vpc(vpc_id)
else:
c.vpc = get_vpc(client, RESOURCE_PREFIX_FORMAT.format(region))
sg_ids = per_region_meta.get("customSecurityGroupId")
if sg_ids:
c.customer_sgs = [client.SecurityGroup(sg_id) for sg_id in sg_ids.split(",")]
else:
c.sg_yugabyte = get_security_group(
client, SG_YUGABYTE_PREFIX_FORMAT.format(region), c.vpc)
if not vpc_id:
c.route_table = get_route_table(client, ROUTE_TABLE_PREFIX_FORMAT.format(region))
az_to_subnet_ids = {}
if vpc_id:
az_to_subnet_ids = per_region_meta.get("azToSubnetIds", {})
else:
az_to_subnet_ids = get_zones(region)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
def as_json(self):
sgs = self.customer_sgs if self.customer_sgs else [self.sg_yugabyte]
return vpc_components_as_json(self.vpc, sgs, self.subnets)
class AwsBootstrapClient():
def __init__(self, metadata, host_vpc_id, host_vpc_region):
self.metadata = metadata
self.host_vpc_id = host_vpc_id
self.host_vpc_region = host_vpc_region
self.region_cidrs = get_region_cidrs(self.metadata)
# Validation.
self._validate_cidr_overlap()
def _validate_cidr_overlap(self):
region_networks = [IPNetwork(cidr) for cidr in self.region_cidrs.values()]
all_networks = region_networks
for i in xrange(len(all_networks)):
for j in xrange(i + 1, len(all_networks)):
left = all_networks[i]
right = all_networks[j]
if left.overlaps(right):
raise YBOpsRuntimeError(
"IP blocks in the CIDRs overlap: {} - {}".format(left, right))
def bootstrap_individual_region(self, region):
if region is None:
raise YBOpsRuntimeError("Must provider region to bootstrap!")
client = AwsBootstrapRegion(region, self.metadata, self.region_cidrs)
client.bootstrap()
return YbVpcComponents.from_pieces(
region, client.vpc.id, client.sg_yugabyte.id, client.route_table.id,
{az: s.id for az, s in client.subnets.iteritems()})
def cross_link_regions(self, components):
# Do the cross linking, adding CIDR entries to RTs and SGs, as well as doing vpc peerings.
region_and_vpc_tuples = [(r, c.vpc) for r, c in components.iteritems()]
host_vpc = None
if self.host_vpc_id and self.host_vpc_region:
host_vpc = get_client(self.host_vpc_region).Vpc(self.host_vpc_id)
region_and_vpc_tuples.append((self.host_vpc_region, host_vpc))
# Setup VPC peerings.
for i in xrange(len(region_and_vpc_tuples) - 1):
i_region, i_vpc = region_and_vpc_tuples[i]
for j in xrange(i + 1, len(region_and_vpc_tuples)):
j_region, j_vpc = region_and_vpc_tuples[j]
peering = create_vpc_peering(
# i is the host, j is the target.
client=get_client(i_region), vpc=j_vpc, host_vpc=i_vpc, target_region=j_region)
if len(peering) != 1:
raise YBOpsRuntimeError(
"Expecting one peering connection, got {}".format(peer_conn))
peering = peering[0]
# Add route i -> j.
add_route_to_rt(components[i_region].route_table, j_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
# Add route j -> i.
# Note: If we have a host_vpc, it is the last in the list, and it doesn't have an
# associated component, so we special case it.
if host_vpc is None or j != len(region_and_vpc_tuples) - 1:
add_route_to_rt(components[j_region].route_table, i_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
else:
# TODO: should ideally filter to the RT that is relevant, but we do not really
# know the subnets which matter from this host_vpc...
for rt in list(host_vpc.route_tables.all()):
add_route_to_rt(rt, i_vpc.cidr_block, "VpcPeeringConnectionId", peering.id)
# Setup SG entries for all the CIDRs.
all_cidrs = [vpc.cidr_block for r, vpc in region_and_vpc_tuples]
rules = []
# Add CIDRs from all the VPCs, including the host.
for cidr in all_cidrs:
add_cidr_to_rules(rules, cidr)
# Add CIDRs from any custom networks we have internally, primarily the OpenVPN in AWS.
# TODO(bogdan): custom CIDR entries
for cidr in self.metadata.get("custom_network_whitelisted_ip_cidrs", []):
add_cidr_to_rules(rules, cidr)
for region, component in components.iteritems():
sg = component.sg_yugabyte
ip_perms = sg.ip_permissions
for rule in rules:
found = False
for perm in ip_perms:
if perm.get("FromPort") == rule["from_port"] and \
perm.get("ToPort") == rule["to_port"] and \
perm.get("IpProtocol") == rule["ip_protocol"] and \
len([True for r in perm.get("IpRanges", [])
if r.get("CidrIp") == rule["cidr_ip"]]) > 0:
# This rule matches this permission, so no need to add it.
found = True
break
if not found:
try:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
if "InvalidPermission.Duplicate" not in str(e):
raise YBOpsRuntimeError(
"Authorize Security Group Ingress failed: {}".format(repr(e)))
def aws_exception_handler(e):
"""AWS specific exception handler.
Args:
e: the exception that was raised by the underlying API call that just failed.
Returns:
True if this exception can be retried, False otherwise.
"""
return "Request limit exceeded" in str(e)
def aws_request_limit_retry(fn):
"""A decorator for retrying an AWS operation after exceeding request limit. Does retries with
randomized jitter. Ideally, we should reconfigure boto3 to do the right kind of retries
internally, but as of May 2017 there does not seem to be a good way of doing that.
Initially not adding this decorator to all functions in this module. This should be done
gradually as we encounter rate limiting errors.
Relevant boto issues:
https://github.com/boto/boto3/issues/770
https://github.com/boto/botocore/issues/882
"""
return request_retry_decorator(fn, aws_exception_handler)
def get_client(region):
"""Method to get boto3 ec2 resource for given region
Args:
region (str): Region name
Returns:
boto3 resource
"""
return boto3.resource("ec2", region_name=region)
def get_clients(regions):
"""Method to get boto3 clients for given region or all the regions if none specified.
Args:
regions (list): List of regions to return clients for
Returns:
clients(obj): Map of region to boto3 resource
"""
return {region: get_client(region) for region in regions}
def get_available_regions(metadata):
return metadata["regions"].keys()
def get_spot_pricing(region, zone, instance_type):
client = boto3.client('ec2', region_name=region)
prod_desc = ['Linux/UNIX (Amazon VPC)']
spot_price = client.describe_spot_price_history(InstanceTypes=[instance_type],
MaxResults=1,
ProductDescriptions=prod_desc,
AvailabilityZone=zone)
if len(spot_price['SpotPriceHistory']) == 0:
raise YBOpsRuntimeError('Invalid instance type {} for zone {}'.format(instance_type, zone))
return spot_price['SpotPriceHistory'][0]['SpotPrice']
# MASKED: get_zones function (lines 363-389)
def get_vpc(client, tag_name, **kwargs):
"""Method to fetch vpc based on the tag_name.
Args:
client (boto client): Boto Client for the region to query.
tag_name (str): VPC tag name.
Returns:
VPC obj: VPC object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.vpcs.filter(Filters=filters)), None)
def fetch_subnets(vpc, tag_name):
"""Method to fetch subnets based on the tag_name.
Args:
vpc (vpc obj): VPC object to search for subnets
tag_name (str): subnet tag name.
Returns:
subnets (list): list of aws subnets for given vpc.
"""
filters = get_tag_filter(tag_name)
return vpc.subnets.filter(Filters=filters)
def create_subnet(client, vpc, zone, cidr, tag_name):
"""Method to create subnet based on cidr and tag name.
Args:
client (boto client): Region specific boto client
vpc (VPC object): VPC object to create subnet.
zone (str): Availability zone name
cidr (str): CIDR string
tag_name (str): Tag name for subnet.
Returns:
subnet: Newly created subnet object.
"""
subnet = next((s for s in fetch_subnets(vpc, tag_name) if s.cidr_block == cidr), None)
if subnet is None:
subnet = vpc.create_subnet(CidrBlock=cidr, AvailabilityZone=zone)
# TODO: no direct waiter on subnet just yet, it seems...
client.meta.client.get_waiter("subnet_available").wait(SubnetIds=[subnet.id])
tag_resource_name(client, subnet.id, tag_name)
return subnet
def get_security_group(client, group_name, vpc, **kwargs):
"""Method to fetch security group based on the group_name.
Args:
client (boto client): Region specific boto client
group_name (str): Security Group name
vpc (VPC object): The VPC in which to check for the SG
Returns:
SecurityGroup: Matching security group.
"""
filters = get_filters("group-name", group_name) + get_filters("vpc-id", vpc.id)
return next(iter(client.security_groups.filter(Filters=filters)), None)
@get_or_create(get_security_group)
def create_security_group(client, group_name, vpc, description, rules):
"""Method to create a security group based on the group_name and authorize ingress with
the rules provided.
Args:
client (boto client): Region specific boto client
group_name (str): security group name
description (str): description of the security group
vpc (VPC Object): VPC object to create the security group
rules (dict): List of rules to add to security group.
"""
sg = vpc.create_security_group(GroupName=group_name, Description=description)
try:
for rule in rules:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
logging.error("Authorize Security Group Ingress failed: {}".format(e))
sg.delete()
raise YBOpsRuntimeError("Security Group creation failed.")
return sg
def get_igw(client, tag_name, **kwargs):
"""Method to fetch Internet Gateway based on tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): Internet Gateway tag name.
Returns:
internet_gateway: internet gateway object.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.internet_gateways.filter(Filters=filters)), None)
@get_or_create(get_igw)
def create_igw(client, tag_name, vpc):
"""Method to create Internet Gateway based on tag_name in given VPC. If the gateway
already exists, it would return that object. If the object doesn't have a tag, we
would tag it accordingly.
Args:
client (boto client): Region specific boto client
tag_name (str): Tag name for internet gateway.
vpc (VPC object): VPC object to create Internet Gateway
Returns:
internet gateway: newly internet gateway object.
"""
# Query to make sure the region doesn't have any IGW already attached.
existing_igw = next(iter(vpc.internet_gateways.all()), None)
if existing_igw is not None:
# If we have existing igw for the region, lets just tag it with yb-XX-igw
tag_resource_name(client, existing_igw.id, tag_name)
return existing_igw
# If we don't have a internet gateway, lets create one and attach it to vpc
igw = client.create_internet_gateway()
tag_resource_name(client, igw.id, tag_name)
vpc.attach_internet_gateway(InternetGatewayId=igw.id)
return igw
def get_route_table(client, tag_name, **kwargs):
"""Method to fetch route table based on tag_name
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name to search for.
Returns:
RouteTable (obj): Matching route table object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.route_tables.filter(Filters=filters)), None)
@get_or_create(get_route_table)
def create_route_table(client, tag_name, vpc):
"""Method to create route table based on tag_name in given VPC. It will first
query for the tag name to see if the route table already exists or if one is already
attached to the VPC, if so it will return that route table.
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name
vpc (vpc object): VPC object to create the route table against
Returns:
RouteTable (obj): newly created RouteTable object.
"""
# Check to see if there is a route table attached to VPC, if so, we can just tag it
existing_route_table = next(iter(vpc.route_tables.all()), None)
if existing_route_table is not None:
tag_resource_name(client, existing_route_table.id, tag_name)
return existing_route_table
# If no route table exists, we can create one and tag it.
route_table = vpc.create_route_table()
tag_resource_name(client, route_table.id, tag_name)
return route_table
@get_and_cleanup(get_security_group)
def cleanup_security_group(sg, **kwargs):
"""Method to cleanup security group for the matching group_name.
Args:
sg: Instance of security group matching the group_name.
"""
sg.delete()
@get_and_cleanup(get_igw)
def cleanup_igw(igw, **kwargs):
"""Method to cleanup Internet Gateway matching the tag name. And also remove any vpc
that is attached to the Internet Gateway.
Args:
igw: Instance of Internet Gateway matching tag_name.
"""
for vpc in igw.attachments:
igw.detach_from_vpc(VpcId=vpc['VpcId'])
igw.delete()
@get_and_cleanup(get_route_table)
def cleanup_route_table(rt, **kwargs):
"""Method to cleanup the Route Table matching the tag name.
Args:
rt: Instance of Route Table matching tag_name.
"""
rt.delete()
def get_route_by_cidr(route_table, cidr):
"""Method to check if given CIDR already attached to route table.
Args:
RouteTable (obj): Route Table object.
cidr (str): CIDR string to check in route table.
Returns:
Route: the route for this CIDR or None if not found
"""
return dict((r.destination_cidr_block, r) for r in route_table.routes).get(cidr)
@get_or_create(get_vpc)
def create_vpc(client, tag_name, cidr):
"""Method to create vpc based on the cidr and tag with tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): VPC tag name
cidr (str): CIDR string.
Returns:
VPC(Object): Newly created VPC object.
"""
vpc = client.create_vpc(CidrBlock=cidr)
vpc.modify_attribute(EnableDnsHostnames={'Value': True})
tag_resource_name(client, vpc.id, tag_name)
return vpc
def set_yb_sg_and_fetch_vpc(metadata, region, dest_vpc_id):
"""Method to bootstrap vpc and security group, and enable vpc peering
with the host_instance vpc.
Args:
metadata (obj): Cloud metadata object with cidr prefix and other metadata.
region (str): Region name to create the vpc in.
dest_vpc_id (str): Id of the VPC that yugabyte machines will reside in.
Returns:
vpc_info (json): return vpc, subnet and security group as json.
"""
client = get_client(region)
dest_vpc = client.Vpc(dest_vpc_id)
subnets = {subnet.availability_zone: subnet for subnet in dest_vpc.subnets.all()}
sg_group_name = get_yb_sg_name(region)
rules = metadata["sg_rules"]
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
add_cidr_to_rules(rules, dest_vpc.cidr_block)
sgs = [create_security_group(client=client, group_name=sg_group_name, vpc=dest_vpc,
description="YugaByte SG", rules=rules)]
return vpc_components_as_json(dest_vpc, sgs, subnets)
def query_vpc(region):
"""Method to query VPC against given region and respective subnets.
Args:
region (str): Region name to query the VPC.
Returns:
vpc and subnet info (obj): Object with region and zone subnet id.
"""
per_vpc_info = {}
# Fetch all available AZs, as we want to group subnets by AZ.
raw_client = boto3.client("ec2", region_name=region)
zones = [z["ZoneName"]
for z in raw_client.describe_availability_zones(
Filters=get_filters("state", "available")).get("AvailabilityZones", [])]
# Default to empty lists, in case some zones do not have subnets, so we can use this as a query
# for all available AZs in this region.
subnets_by_zone = {z: [] for z in zones}
# Fetch SGs and group them by VPC ID.
client = get_client(region)
per_vpc_sgs = {}
sgs = client.security_groups.all()
for sg in sgs:
sg_list = per_vpc_sgs.setdefault(sg.vpc_id, [])
sg_list.append({
"sg_id": sg.group_id,
# Note: Name tag is not mandatory or always present but group_name is!
"sg_name": sg.group_name
})
# Fetch all available VPCs so we can group by VPC ID.
region_vpcs = client.vpcs.all()
for vpc in region_vpcs:
# Filter for available subnets and group by AZ.
subnets = vpc.subnets.filter(Filters=get_filters("state", "available"))
for s in subnets:
subnets_for_this_az = subnets_by_zone.setdefault(s.availability_zone, [])
subnets_for_this_az.append({
"subnet_id": s.subnet_id,
"name": _get_name_from_tags(s.tags),
"public": s.map_public_ip_on_launch
})
vpc_info = {
"subnets_by_zone": subnets_by_zone,
# In case we somehow did not find any SGs, default to empty list.
"security_groups": per_vpc_sgs.get(vpc.id, [])
}
per_vpc_info[vpc.id] = vpc_info
region_json = {
"per_vpc_info": per_vpc_info
}
return region_json
def _get_name_from_tags(tags):
for t in tags if tags else []:
if t.get("Key") == "Name":
return t.get("Value", None)
return None
def vpc_components_as_json(vpc, sgs, subnets):
"""Method takes VPC, Security Group and Subnets and returns a json data format with ids.
Args:
vpc (VPC Object): Region specific VPC object
sgs (List of Security Group Object): Region specific Security Group object
subnets (subnet object map): Map of Subnet objects keyed of zone.
Retuns:
json (str): A Json string for yugaware to consume with necessary ids.
"""
result = {}
result["vpc_id"] = vpc.id
result["security_group"] = [{"id": sg.group_id, "name": sg.group_name} for sg in sgs]
result["zones"] = {}
for zone, subnet in subnets.iteritems():
result["zones"][zone] = subnet.id
return result
def delete_vpc(region, host_vpc_id=None, host_vpc_region=None):
"""Method to delete VPC, Subnet, Internet Gateway, Route Table and VPC peering.
Args:
region (str): Region name to query the VPC.
"""
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(region)
client = get_client(region)
region_vpc = get_vpc(client, vpc_region_tag)
if region_vpc is None:
raise YBOpsRuntimeError("VPC not setup.")
zones = get_zones(region)
# Remove the yugabyte SG first.
sg_group_name = get_yb_sg_name(region)
cleanup_security_group(client=client, group_name=sg_group_name, vpc=region_vpc)
# Cleanup the subnets.
for zone, subnet_id in zones.iteritems():
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
if subnet_id is not None:
client.Subnet(subnet_id).delete()
# Remove the IGW.
igw_tag = IGW_PREFIX_FORMAT.format(region)
cleanup_igw(client=client, tag_name=igw_tag)
# Remove this region's CIDR from the RT of the host vpc.
host_vpc = None
if host_vpc_id is not None and host_vpc_region is not None:
host_vpc = get_client(host_vpc_region).Vpc(host_vpc_id)
for rt in list(host_vpc.route_tables.all()):
delete_route(rt, region_vpc.cidr_block)
# Remove all of the VPC peerings of this vpc.
cleanup_vpc_peering(client=client, vpc=region_vpc, host_vpc=None)
# Delete the VPC itself.
region_vpc.delete()
# Finally cleanup the Routing Table.
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(region)
cleanup_route_table(client=client, tag_name=route_table_tag)
return {"success": "VPC deleted."}
def tag_resource_name(client, resource_id, tag_name):
"""Method to create name tag for given resource.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_name (str): Tag name.
"""
tag_resource(client, resource_id, "Name", tag_name)
def tag_resource(client, resource_id, tag_key, tag_value):
"""Method to attach arbitrary key-value tags to resources.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_key: Tag key
tag_value: Tag value
"""
tags = [{"Key": tag_key, "Value": tag_value}]
client.create_tags(Resources=[resource_id], Tags=tags)
def get_filters(key, value):
return [{'Name': key, 'Values': [value]}]
def get_tag_filter(tag_name):
return get_filters("tag:Name", tag_name)
def get_vpc_peerings(vpc, host_vpc, **kwargs):
"""Method to fetch all the VPC peerings against given VPC. If host_vpc is provided
it will check if there is a peering against that vpc.
Args:
vpc(VPC object): VPC Object to search for peerings
host_vpc (Host VPC object): Can be Null as well, to check if specific host_vpc
peering is done.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
output = []
# Search through accepted vpc peerings.
vpc_peerings = vpc.accepted_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.requester_vpc == host_vpc)])
# Also search through requested vpc peerings.
vpc_peerings = vpc.requested_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.accepter_vpc == host_vpc)])
return output
@get_and_cleanup(get_vpc_peerings)
def cleanup_vpc_peering(vpc_peerings, **kwargs):
for vpc_peering in vpc_peerings:
vpc_peering.delete()
@get_or_create(get_vpc_peerings)
def create_vpc_peering(client, vpc, host_vpc, target_region):
"""Method would create a vpc peering between the newly created VPC and caller's VPC
Also makes sure, if they aren't the same, then there is no need for vpc peering.
Args:
client (boto client): Region specific boto client
vpc (VPC object): Newly created VPC object
host_vpc(Host VPC object): Host VPC to peer with.
target_region (region name): Region name in which peering is being created.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
try:
peer_conn = client.create_vpc_peering_connection(
VpcId=host_vpc.id, PeerVpcId=vpc.id, PeerRegion=target_region)
peer_conn.wait_until_exists()
# Need to accept from the other end.
remote_peer_conn = get_client(target_region).VpcPeeringConnection(peer_conn.id)
remote_peer_conn.wait_until_exists()
remote_peer_conn.accept()
return [peer_conn]
except Exception as e:
logging.error(e)
raise YBOpsRuntimeError("Unable to create VPC peering.")
def get_device_names(instance_type, num_volumes):
device_names = []
for i in xrange(num_volumes):
device_name_format = "nvme{}n1" if is_nvme(instance_type) else "xvd{}"
index = "{}".format(i if is_nvme(instance_type) else chr(ord('b') + i))
device_names.append(device_name_format.format(index))
return device_names
def is_next_gen(instance_type):
return instance_type.startswith(("c3", "c4", "c5", "m4", "r4"))
def is_nvme(instance_type):
return instance_type.startswith("i3")
def has_ephemerals(instance_type):
return not is_nvme(instance_type) and not is_next_gen(instance_type)
def create_instance(args):
client = get_client(args.region)
vars = {
"ImageId": args.machine_image,
"KeyName": args.key_pair_name,
"MinCount": 1,
"MaxCount": 1,
"InstanceType": args.instance_type,
}
# Network setup.
# Lets assume they have provided security group id comma delimited.
sg_ids = args.security_group_id.split(",") if args.security_group_id else None
if sg_ids is None:
# Figure out which VPC this instance will be brought up in and search for the SG in there.
# This is for a bit of backwards compatibility with the previous mode of potentially using
# YW's VPC, in which we would still deploy a SG with the same name as in our normal VPCs.
# This means there could be customers that had that deployment mode from the start AND have
# a SG we created back then, with the internal naming convention we use, but NOT in the YB
# VPC (which they likely will not even have).
vpc = get_vpc_for_subnet(client, args.cloud_subnet)
sg_name = get_yb_sg_name(args.region)
sg = get_security_group(client, sg_name, vpc)
sg_ids = [sg.id]
vars["NetworkInterfaces"] = [{
"DeviceIndex": 0,
"AssociatePublicIpAddress": args.assign_public_ip,
"SubnetId": args.cloud_subnet,
"Groups": sg_ids
}]
# Volume setup.
volumes = []
ebs = {
"DeleteOnTermination": True,
# TODO: constant
"VolumeSize": 40,
"VolumeType": "gp2"
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
volumes.append({
"DeviceName": "/dev/sda1",
"Ebs": ebs
})
device_names = get_device_names(args.instance_type, args.num_volumes)
# TODO: Clean up semantics on nvme vs "next-gen" vs ephemerals, as this is currently whack...
for i, device_name in enumerate(device_names):
volume = {}
if has_ephemerals(args.instance_type):
volume = {
"DeviceName": "/dev/{}".format(device_name),
"VirtualName": "ephemeral{}".format(i)
}
elif is_next_gen(args.instance_type):
ebs = {
"DeleteOnTermination": True,
"VolumeType": args.volume_type,
# TODO: make this int.
"VolumeSize": args.volume_size
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
if args.volume_type == "io1":
# TODO: make this int.
ebs["Iops"] = args.disk_iops
volume = {
"DeviceName": "/dev/{}".format(device_name),
"Ebs": ebs
}
volumes.append(volume)
vars["BlockDeviceMappings"] = volumes
# Tag setup.
def __create_tag(k, v):
return {"Key": k, "Value": v}
# Add Name all the time.
instance_tags = [
__create_tag("Name", args.search_pattern),
__create_tag("launched-by", os.environ.get("USER", "unknown")),
__create_tag("yb-server-type", args.type)
]
custom_tags = args.instance_tags if args.instance_tags is not None else '{}'
for k, v in json.loads(custom_tags).iteritems():
instance_tags.append(__create_tag(k, v))
vars["TagSpecifications"] = [{
"ResourceType": "instance",
"Tags": instance_tags
}]
# TODO: user_data > templates/cloud_init.yml.j2, still needed?
instance_ids = client.create_instances(**vars)
if len(instance_ids) != 1:
logging.error("Invalid create_instances response: {}".format(instance_ids))
raise YBOpsRuntimeError("Expected to create 1 instance, got {}".format(
len(instance_ids)))
instance = instance_ids[0]
instance.wait_until_running()
def modify_tags(region, instance_id, tags_to_set_str, tags_to_remove_str):
instance = get_client(region).Instance(instance_id)
# Remove all the tags we were asked to, except the internal ones.
tags_to_remove = set(tags_to_remove_str.split(",") if tags_to_remove_str else [])
# TODO: combine these with the above instance creation function.
internal_tags = set(["Name", "launched-by", "yb-server-type"])
if tags_to_remove & internal_tags:
raise YBOpsRuntimeError(
"Was asked to remove tags: {}, which contain internal tags: {}".format(
tags_to_remove, internal_tags
))
# Note: passing an empty list to Tags will remove all tags from the instance.
if tags_to_remove:
instance.delete_tags(Tags=[{"Key": k} for k in tags_to_remove])
# Set all the tags provided.
tags_to_set = json.loads(tags_to_set_str if tags_to_set_str else "{}")
customer_tags = []
for k, v in tags_to_set.iteritems():
customer_tags.append({"Key": k, "Value": v})
instance.create_tags(Tags=customer_tags)
def delete_route(rt, cidr):
route = get_route_by_cidr(rt, cidr)
if route is not None:
route.delete()
def get_vpc_for_subnet(client, subnet):
return client.Subnet(subnet).vpc
def get_yb_sg_name(region):
return SG_YUGABYTE_PREFIX_FORMAT.format(region)
def list_dns_record_set(hosted_zone_id):
return boto3.client('route53').get_hosted_zone(Id=hosted_zone_id)
def create_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'CREATE')
def edit_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'UPSERT')
def delete_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'DELETE')
def _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, action):
client = boto3.client('route53')
records = []
for ip in ip_list:
records.append({'Value': ip})
result = list_dns_record_set(hosted_zone_id)
hosted_zone_name = result['HostedZone']['Name']
change_batch = {
'Comment': "YugaWare driven Record Set",
'Changes': [{
'Action': action,
'ResourceRecordSet': {
'Name': "{}.{}".format(domain_name_prefix, hosted_zone_name),
'Type': 'A',
'TTL': 5,
'ResourceRecords': records
}
}]
}
result = client.change_resource_record_sets(
HostedZoneId=hosted_zone_id,
ChangeBatch=change_batch)
client.get_waiter('resource_record_sets_changed').wait(
Id=result['ChangeInfo']['Id'],
WaiterConfig={
'Delay': 10,
'MaxAttempts': 60
})
|
def get_zones(region, dest_vpc_id=None):
"""Method to fetch zones for given region or all the regions if none specified.
Args:
region (str): Name of region to get zones of.
Returns:
zones (obj): Map of zone -> subnet
"""
result = {}
filters = get_filters("state", "available")
client = boto3.client("ec2", region_name=region)
zones = client.describe_availability_zones(Filters=filters).get("AvailabilityZones", [])
new_client = get_client(region)
zone_mapping = {}
for z in zones:
zone_name = z["ZoneName"]
zone_tag = SUBNET_PREFIX_FORMAT.format(zone_name)
region_vpc = None
if dest_vpc_id:
region_vpc = new_client.Vpc(dest_vpc_id)
else:
region_vpc = get_vpc(new_client, RESOURCE_PREFIX_FORMAT.format(region))
subnet = next(iter(fetch_subnets(region_vpc, zone_tag)), None)
if subnet is None:
subnet = next(iter([s for s in region_vpc.subnets.all()
if s.availability_zone == zone_name]), None)
zone_mapping[zone_name] = subnet.id if subnet is not None else None
return zone_mapping
| 363 | 389 |
#!/usr/bin/env python
#
# Copyright 2019 YugaByte, Inc. and Contributors
#
# Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt
import boto3
import json
import logging
import os
import re
from ipaddr import IPNetwork
from ybops.utils import get_or_create, get_and_cleanup
from ybops.common.exceptions import YBOpsRuntimeError
from ybops.cloud.common.utils import request_retry_decorator
RESOURCE_PREFIX_FORMAT = "yb-{}"
IGW_CIDR = "0.0.0.0/0"
SUBNET_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT
IGW_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-igw"
ROUTE_TABLE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-rt"
SG_YUGABYTE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-sg"
PEER_CONN_FORMAT = "yb-peer-conn-{}-to-{}"
class AwsBootstrapRegion():
def __init__(self, region, metadata, region_cidrs):
self.region = region
self.metadata = metadata
self.region_cidrs = region_cidrs
self.client = get_client(self.region)
# Outputs.
self.vpc = None
self.igw = None
self.peer_vpc = None
self.sg_yugabyte = None
self.subnets = []
self.route_table = None
def bootstrap(self):
self.setup_vpc()
self.setup_igw()
self.setup_subnets()
self.setup_yugabyte_sg()
self.setup_rt()
def setup_vpc(self):
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(self.region)
vpc = create_vpc(client=self.client, tag_name=vpc_region_tag,
cidr=get_region_cidr(self.metadata, self.region))
vpc.wait_until_available()
self.vpc = vpc
def setup_igw(self):
igw_tag = IGW_PREFIX_FORMAT.format(self.region)
igw = create_igw(client=self.client, tag_name=igw_tag, vpc=self.vpc)
self.igw = igw
def setup_subnets(self):
zones = get_zones(self.region)
subnets = {}
for zone_index, zone in enumerate(sorted(zones.keys())):
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
zone_cidr = self.metadata["zone_cidr_format"].format(
get_cidr_prefix(self.metadata, self.region), (zone_index + 1) * 16)
subnet = create_subnet(self.client, self.vpc, zone, zone_cidr, vpc_zone_tag)
subnets[zone] = subnet
self.subnets = subnets
def setup_yugabyte_sg(self):
sg_group_name = get_yb_sg_name(self.region)
rules = list(self.metadata["sg_rules"])
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
sg = create_security_group(client=self.client, group_name=sg_group_name,
description="YugaByte SG", vpc=self.vpc,
rules=rules)
self.sg_yugabyte = sg
def setup_rt(self):
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(self.region)
route_table = create_route_table(client=self.client, tag_name=route_table_tag,
vpc=self.vpc)
# TODO: handle private/public case at somepoint, also NAT.
add_route_to_rt(route_table, IGW_CIDR, "GatewayId", self.igw.id)
current_associated_subnet_ids = [assoc.subnet_id for assoc in route_table.associations]
missing_ids = [subnet.id for subnet in self.subnets.values()
if subnet.id not in current_associated_subnet_ids]
for subnet_id in missing_ids:
route_table.associate_with_subnet(SubnetId=subnet_id)
self.route_table = route_table
def add_sg_ingress_to_sg(self, incoming_sg, target_sg):
current_sg_ids = set([pair["GroupId"]
for perm in target_sg.ip_permissions
for pair in perm["UserIdGroupPairs"]])
if incoming_sg.id not in current_sg_ids:
target_sg.authorize_ingress(
IpPermissions=[{
"IpProtocol": "-1",
"UserIdGroupPairs": [{"GroupId": incoming_sg.id}]}])
def add_route_to_rt(route_table, cidr, target_type, target_id):
kwargs = {target_type: target_id}
route = get_route_by_cidr(route_table, cidr)
if route is None:
route_table.create_route(DestinationCidrBlock=cidr, **kwargs)
elif getattr(route, dumb_camel_to_snake(target_type)) != target_id:
route.replace(**kwargs)
def add_cidr_to_rules(rules, cidr):
rule_block = {
"ip_protocol": "-1",
"from_port": 0,
"to_port": 65535,
"cidr_ip": cidr
}
rules.append(rule_block)
def get_cidr_prefix(metadata, region):
return metadata["regions"][region]["cidr_prefix"]
def get_region_cidr(metadata, region):
return metadata["region_cidr_format"].format(get_cidr_prefix(metadata, region))
def get_region_cidrs(metadata):
return dict([(r, get_region_cidr(metadata, r)) for r in metadata["regions"].keys()])
def dumb_camel_to_snake(s):
return re.sub("([A-Z])", "_\\1", s).lower()[1:]
class YbVpcComponents:
def __init__(self):
self.region = None
self.vpc = None
self.sg_yugabyte = None
self.customer_sgs = None
self.route_table = None
self.subnets = None
@staticmethod
def from_pieces(region, vpc_id, sg_id, rt_id, az_to_subnet_ids):
c = YbVpcComponents()
c.region = region
client = get_client(region)
c.vpc = client.Vpc(vpc_id)
c.sg_yugabyte = client.SecurityGroup(sg_id)
c.route_table = client.RouteTable(rt_id)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
@staticmethod
def from_user_json(region, per_region_meta):
c = YbVpcComponents()
c.region = region
client = get_client(region)
vpc_id = per_region_meta.get("vpcId")
if vpc_id:
c.vpc = client.Vpc(vpc_id)
else:
c.vpc = get_vpc(client, RESOURCE_PREFIX_FORMAT.format(region))
sg_ids = per_region_meta.get("customSecurityGroupId")
if sg_ids:
c.customer_sgs = [client.SecurityGroup(sg_id) for sg_id in sg_ids.split(",")]
else:
c.sg_yugabyte = get_security_group(
client, SG_YUGABYTE_PREFIX_FORMAT.format(region), c.vpc)
if not vpc_id:
c.route_table = get_route_table(client, ROUTE_TABLE_PREFIX_FORMAT.format(region))
az_to_subnet_ids = {}
if vpc_id:
az_to_subnet_ids = per_region_meta.get("azToSubnetIds", {})
else:
az_to_subnet_ids = get_zones(region)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
def as_json(self):
sgs = self.customer_sgs if self.customer_sgs else [self.sg_yugabyte]
return vpc_components_as_json(self.vpc, sgs, self.subnets)
class AwsBootstrapClient():
def __init__(self, metadata, host_vpc_id, host_vpc_region):
self.metadata = metadata
self.host_vpc_id = host_vpc_id
self.host_vpc_region = host_vpc_region
self.region_cidrs = get_region_cidrs(self.metadata)
# Validation.
self._validate_cidr_overlap()
def _validate_cidr_overlap(self):
region_networks = [IPNetwork(cidr) for cidr in self.region_cidrs.values()]
all_networks = region_networks
for i in xrange(len(all_networks)):
for j in xrange(i + 1, len(all_networks)):
left = all_networks[i]
right = all_networks[j]
if left.overlaps(right):
raise YBOpsRuntimeError(
"IP blocks in the CIDRs overlap: {} - {}".format(left, right))
def bootstrap_individual_region(self, region):
if region is None:
raise YBOpsRuntimeError("Must provider region to bootstrap!")
client = AwsBootstrapRegion(region, self.metadata, self.region_cidrs)
client.bootstrap()
return YbVpcComponents.from_pieces(
region, client.vpc.id, client.sg_yugabyte.id, client.route_table.id,
{az: s.id for az, s in client.subnets.iteritems()})
def cross_link_regions(self, components):
# Do the cross linking, adding CIDR entries to RTs and SGs, as well as doing vpc peerings.
region_and_vpc_tuples = [(r, c.vpc) for r, c in components.iteritems()]
host_vpc = None
if self.host_vpc_id and self.host_vpc_region:
host_vpc = get_client(self.host_vpc_region).Vpc(self.host_vpc_id)
region_and_vpc_tuples.append((self.host_vpc_region, host_vpc))
# Setup VPC peerings.
for i in xrange(len(region_and_vpc_tuples) - 1):
i_region, i_vpc = region_and_vpc_tuples[i]
for j in xrange(i + 1, len(region_and_vpc_tuples)):
j_region, j_vpc = region_and_vpc_tuples[j]
peering = create_vpc_peering(
# i is the host, j is the target.
client=get_client(i_region), vpc=j_vpc, host_vpc=i_vpc, target_region=j_region)
if len(peering) != 1:
raise YBOpsRuntimeError(
"Expecting one peering connection, got {}".format(peer_conn))
peering = peering[0]
# Add route i -> j.
add_route_to_rt(components[i_region].route_table, j_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
# Add route j -> i.
# Note: If we have a host_vpc, it is the last in the list, and it doesn't have an
# associated component, so we special case it.
if host_vpc is None or j != len(region_and_vpc_tuples) - 1:
add_route_to_rt(components[j_region].route_table, i_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
else:
# TODO: should ideally filter to the RT that is relevant, but we do not really
# know the subnets which matter from this host_vpc...
for rt in list(host_vpc.route_tables.all()):
add_route_to_rt(rt, i_vpc.cidr_block, "VpcPeeringConnectionId", peering.id)
# Setup SG entries for all the CIDRs.
all_cidrs = [vpc.cidr_block for r, vpc in region_and_vpc_tuples]
rules = []
# Add CIDRs from all the VPCs, including the host.
for cidr in all_cidrs:
add_cidr_to_rules(rules, cidr)
# Add CIDRs from any custom networks we have internally, primarily the OpenVPN in AWS.
# TODO(bogdan): custom CIDR entries
for cidr in self.metadata.get("custom_network_whitelisted_ip_cidrs", []):
add_cidr_to_rules(rules, cidr)
for region, component in components.iteritems():
sg = component.sg_yugabyte
ip_perms = sg.ip_permissions
for rule in rules:
found = False
for perm in ip_perms:
if perm.get("FromPort") == rule["from_port"] and \
perm.get("ToPort") == rule["to_port"] and \
perm.get("IpProtocol") == rule["ip_protocol"] and \
len([True for r in perm.get("IpRanges", [])
if r.get("CidrIp") == rule["cidr_ip"]]) > 0:
# This rule matches this permission, so no need to add it.
found = True
break
if not found:
try:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
if "InvalidPermission.Duplicate" not in str(e):
raise YBOpsRuntimeError(
"Authorize Security Group Ingress failed: {}".format(repr(e)))
def aws_exception_handler(e):
"""AWS specific exception handler.
Args:
e: the exception that was raised by the underlying API call that just failed.
Returns:
True if this exception can be retried, False otherwise.
"""
return "Request limit exceeded" in str(e)
def aws_request_limit_retry(fn):
"""A decorator for retrying an AWS operation after exceeding request limit. Does retries with
randomized jitter. Ideally, we should reconfigure boto3 to do the right kind of retries
internally, but as of May 2017 there does not seem to be a good way of doing that.
Initially not adding this decorator to all functions in this module. This should be done
gradually as we encounter rate limiting errors.
Relevant boto issues:
https://github.com/boto/boto3/issues/770
https://github.com/boto/botocore/issues/882
"""
return request_retry_decorator(fn, aws_exception_handler)
def get_client(region):
"""Method to get boto3 ec2 resource for given region
Args:
region (str): Region name
Returns:
boto3 resource
"""
return boto3.resource("ec2", region_name=region)
def get_clients(regions):
"""Method to get boto3 clients for given region or all the regions if none specified.
Args:
regions (list): List of regions to return clients for
Returns:
clients(obj): Map of region to boto3 resource
"""
return {region: get_client(region) for region in regions}
def get_available_regions(metadata):
return metadata["regions"].keys()
def get_spot_pricing(region, zone, instance_type):
client = boto3.client('ec2', region_name=region)
prod_desc = ['Linux/UNIX (Amazon VPC)']
spot_price = client.describe_spot_price_history(InstanceTypes=[instance_type],
MaxResults=1,
ProductDescriptions=prod_desc,
AvailabilityZone=zone)
if len(spot_price['SpotPriceHistory']) == 0:
raise YBOpsRuntimeError('Invalid instance type {} for zone {}'.format(instance_type, zone))
return spot_price['SpotPriceHistory'][0]['SpotPrice']
def get_zones(region, dest_vpc_id=None):
"""Method to fetch zones for given region or all the regions if none specified.
Args:
region (str): Name of region to get zones of.
Returns:
zones (obj): Map of zone -> subnet
"""
result = {}
filters = get_filters("state", "available")
client = boto3.client("ec2", region_name=region)
zones = client.describe_availability_zones(Filters=filters).get("AvailabilityZones", [])
new_client = get_client(region)
zone_mapping = {}
for z in zones:
zone_name = z["ZoneName"]
zone_tag = SUBNET_PREFIX_FORMAT.format(zone_name)
region_vpc = None
if dest_vpc_id:
region_vpc = new_client.Vpc(dest_vpc_id)
else:
region_vpc = get_vpc(new_client, RESOURCE_PREFIX_FORMAT.format(region))
subnet = next(iter(fetch_subnets(region_vpc, zone_tag)), None)
if subnet is None:
subnet = next(iter([s for s in region_vpc.subnets.all()
if s.availability_zone == zone_name]), None)
zone_mapping[zone_name] = subnet.id if subnet is not None else None
return zone_mapping
def get_vpc(client, tag_name, **kwargs):
"""Method to fetch vpc based on the tag_name.
Args:
client (boto client): Boto Client for the region to query.
tag_name (str): VPC tag name.
Returns:
VPC obj: VPC object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.vpcs.filter(Filters=filters)), None)
def fetch_subnets(vpc, tag_name):
"""Method to fetch subnets based on the tag_name.
Args:
vpc (vpc obj): VPC object to search for subnets
tag_name (str): subnet tag name.
Returns:
subnets (list): list of aws subnets for given vpc.
"""
filters = get_tag_filter(tag_name)
return vpc.subnets.filter(Filters=filters)
def create_subnet(client, vpc, zone, cidr, tag_name):
"""Method to create subnet based on cidr and tag name.
Args:
client (boto client): Region specific boto client
vpc (VPC object): VPC object to create subnet.
zone (str): Availability zone name
cidr (str): CIDR string
tag_name (str): Tag name for subnet.
Returns:
subnet: Newly created subnet object.
"""
subnet = next((s for s in fetch_subnets(vpc, tag_name) if s.cidr_block == cidr), None)
if subnet is None:
subnet = vpc.create_subnet(CidrBlock=cidr, AvailabilityZone=zone)
# TODO: no direct waiter on subnet just yet, it seems...
client.meta.client.get_waiter("subnet_available").wait(SubnetIds=[subnet.id])
tag_resource_name(client, subnet.id, tag_name)
return subnet
def get_security_group(client, group_name, vpc, **kwargs):
"""Method to fetch security group based on the group_name.
Args:
client (boto client): Region specific boto client
group_name (str): Security Group name
vpc (VPC object): The VPC in which to check for the SG
Returns:
SecurityGroup: Matching security group.
"""
filters = get_filters("group-name", group_name) + get_filters("vpc-id", vpc.id)
return next(iter(client.security_groups.filter(Filters=filters)), None)
@get_or_create(get_security_group)
def create_security_group(client, group_name, vpc, description, rules):
"""Method to create a security group based on the group_name and authorize ingress with
the rules provided.
Args:
client (boto client): Region specific boto client
group_name (str): security group name
description (str): description of the security group
vpc (VPC Object): VPC object to create the security group
rules (dict): List of rules to add to security group.
"""
sg = vpc.create_security_group(GroupName=group_name, Description=description)
try:
for rule in rules:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
logging.error("Authorize Security Group Ingress failed: {}".format(e))
sg.delete()
raise YBOpsRuntimeError("Security Group creation failed.")
return sg
def get_igw(client, tag_name, **kwargs):
"""Method to fetch Internet Gateway based on tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): Internet Gateway tag name.
Returns:
internet_gateway: internet gateway object.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.internet_gateways.filter(Filters=filters)), None)
@get_or_create(get_igw)
def create_igw(client, tag_name, vpc):
"""Method to create Internet Gateway based on tag_name in given VPC. If the gateway
already exists, it would return that object. If the object doesn't have a tag, we
would tag it accordingly.
Args:
client (boto client): Region specific boto client
tag_name (str): Tag name for internet gateway.
vpc (VPC object): VPC object to create Internet Gateway
Returns:
internet gateway: newly internet gateway object.
"""
# Query to make sure the region doesn't have any IGW already attached.
existing_igw = next(iter(vpc.internet_gateways.all()), None)
if existing_igw is not None:
# If we have existing igw for the region, lets just tag it with yb-XX-igw
tag_resource_name(client, existing_igw.id, tag_name)
return existing_igw
# If we don't have a internet gateway, lets create one and attach it to vpc
igw = client.create_internet_gateway()
tag_resource_name(client, igw.id, tag_name)
vpc.attach_internet_gateway(InternetGatewayId=igw.id)
return igw
def get_route_table(client, tag_name, **kwargs):
"""Method to fetch route table based on tag_name
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name to search for.
Returns:
RouteTable (obj): Matching route table object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.route_tables.filter(Filters=filters)), None)
@get_or_create(get_route_table)
def create_route_table(client, tag_name, vpc):
"""Method to create route table based on tag_name in given VPC. It will first
query for the tag name to see if the route table already exists or if one is already
attached to the VPC, if so it will return that route table.
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name
vpc (vpc object): VPC object to create the route table against
Returns:
RouteTable (obj): newly created RouteTable object.
"""
# Check to see if there is a route table attached to VPC, if so, we can just tag it
existing_route_table = next(iter(vpc.route_tables.all()), None)
if existing_route_table is not None:
tag_resource_name(client, existing_route_table.id, tag_name)
return existing_route_table
# If no route table exists, we can create one and tag it.
route_table = vpc.create_route_table()
tag_resource_name(client, route_table.id, tag_name)
return route_table
@get_and_cleanup(get_security_group)
def cleanup_security_group(sg, **kwargs):
"""Method to cleanup security group for the matching group_name.
Args:
sg: Instance of security group matching the group_name.
"""
sg.delete()
@get_and_cleanup(get_igw)
def cleanup_igw(igw, **kwargs):
"""Method to cleanup Internet Gateway matching the tag name. And also remove any vpc
that is attached to the Internet Gateway.
Args:
igw: Instance of Internet Gateway matching tag_name.
"""
for vpc in igw.attachments:
igw.detach_from_vpc(VpcId=vpc['VpcId'])
igw.delete()
@get_and_cleanup(get_route_table)
def cleanup_route_table(rt, **kwargs):
"""Method to cleanup the Route Table matching the tag name.
Args:
rt: Instance of Route Table matching tag_name.
"""
rt.delete()
def get_route_by_cidr(route_table, cidr):
"""Method to check if given CIDR already attached to route table.
Args:
RouteTable (obj): Route Table object.
cidr (str): CIDR string to check in route table.
Returns:
Route: the route for this CIDR or None if not found
"""
return dict((r.destination_cidr_block, r) for r in route_table.routes).get(cidr)
@get_or_create(get_vpc)
def create_vpc(client, tag_name, cidr):
"""Method to create vpc based on the cidr and tag with tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): VPC tag name
cidr (str): CIDR string.
Returns:
VPC(Object): Newly created VPC object.
"""
vpc = client.create_vpc(CidrBlock=cidr)
vpc.modify_attribute(EnableDnsHostnames={'Value': True})
tag_resource_name(client, vpc.id, tag_name)
return vpc
def set_yb_sg_and_fetch_vpc(metadata, region, dest_vpc_id):
"""Method to bootstrap vpc and security group, and enable vpc peering
with the host_instance vpc.
Args:
metadata (obj): Cloud metadata object with cidr prefix and other metadata.
region (str): Region name to create the vpc in.
dest_vpc_id (str): Id of the VPC that yugabyte machines will reside in.
Returns:
vpc_info (json): return vpc, subnet and security group as json.
"""
client = get_client(region)
dest_vpc = client.Vpc(dest_vpc_id)
subnets = {subnet.availability_zone: subnet for subnet in dest_vpc.subnets.all()}
sg_group_name = get_yb_sg_name(region)
rules = metadata["sg_rules"]
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
add_cidr_to_rules(rules, dest_vpc.cidr_block)
sgs = [create_security_group(client=client, group_name=sg_group_name, vpc=dest_vpc,
description="YugaByte SG", rules=rules)]
return vpc_components_as_json(dest_vpc, sgs, subnets)
def query_vpc(region):
"""Method to query VPC against given region and respective subnets.
Args:
region (str): Region name to query the VPC.
Returns:
vpc and subnet info (obj): Object with region and zone subnet id.
"""
per_vpc_info = {}
# Fetch all available AZs, as we want to group subnets by AZ.
raw_client = boto3.client("ec2", region_name=region)
zones = [z["ZoneName"]
for z in raw_client.describe_availability_zones(
Filters=get_filters("state", "available")).get("AvailabilityZones", [])]
# Default to empty lists, in case some zones do not have subnets, so we can use this as a query
# for all available AZs in this region.
subnets_by_zone = {z: [] for z in zones}
# Fetch SGs and group them by VPC ID.
client = get_client(region)
per_vpc_sgs = {}
sgs = client.security_groups.all()
for sg in sgs:
sg_list = per_vpc_sgs.setdefault(sg.vpc_id, [])
sg_list.append({
"sg_id": sg.group_id,
# Note: Name tag is not mandatory or always present but group_name is!
"sg_name": sg.group_name
})
# Fetch all available VPCs so we can group by VPC ID.
region_vpcs = client.vpcs.all()
for vpc in region_vpcs:
# Filter for available subnets and group by AZ.
subnets = vpc.subnets.filter(Filters=get_filters("state", "available"))
for s in subnets:
subnets_for_this_az = subnets_by_zone.setdefault(s.availability_zone, [])
subnets_for_this_az.append({
"subnet_id": s.subnet_id,
"name": _get_name_from_tags(s.tags),
"public": s.map_public_ip_on_launch
})
vpc_info = {
"subnets_by_zone": subnets_by_zone,
# In case we somehow did not find any SGs, default to empty list.
"security_groups": per_vpc_sgs.get(vpc.id, [])
}
per_vpc_info[vpc.id] = vpc_info
region_json = {
"per_vpc_info": per_vpc_info
}
return region_json
def _get_name_from_tags(tags):
for t in tags if tags else []:
if t.get("Key") == "Name":
return t.get("Value", None)
return None
def vpc_components_as_json(vpc, sgs, subnets):
"""Method takes VPC, Security Group and Subnets and returns a json data format with ids.
Args:
vpc (VPC Object): Region specific VPC object
sgs (List of Security Group Object): Region specific Security Group object
subnets (subnet object map): Map of Subnet objects keyed of zone.
Retuns:
json (str): A Json string for yugaware to consume with necessary ids.
"""
result = {}
result["vpc_id"] = vpc.id
result["security_group"] = [{"id": sg.group_id, "name": sg.group_name} for sg in sgs]
result["zones"] = {}
for zone, subnet in subnets.iteritems():
result["zones"][zone] = subnet.id
return result
def delete_vpc(region, host_vpc_id=None, host_vpc_region=None):
"""Method to delete VPC, Subnet, Internet Gateway, Route Table and VPC peering.
Args:
region (str): Region name to query the VPC.
"""
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(region)
client = get_client(region)
region_vpc = get_vpc(client, vpc_region_tag)
if region_vpc is None:
raise YBOpsRuntimeError("VPC not setup.")
zones = get_zones(region)
# Remove the yugabyte SG first.
sg_group_name = get_yb_sg_name(region)
cleanup_security_group(client=client, group_name=sg_group_name, vpc=region_vpc)
# Cleanup the subnets.
for zone, subnet_id in zones.iteritems():
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
if subnet_id is not None:
client.Subnet(subnet_id).delete()
# Remove the IGW.
igw_tag = IGW_PREFIX_FORMAT.format(region)
cleanup_igw(client=client, tag_name=igw_tag)
# Remove this region's CIDR from the RT of the host vpc.
host_vpc = None
if host_vpc_id is not None and host_vpc_region is not None:
host_vpc = get_client(host_vpc_region).Vpc(host_vpc_id)
for rt in list(host_vpc.route_tables.all()):
delete_route(rt, region_vpc.cidr_block)
# Remove all of the VPC peerings of this vpc.
cleanup_vpc_peering(client=client, vpc=region_vpc, host_vpc=None)
# Delete the VPC itself.
region_vpc.delete()
# Finally cleanup the Routing Table.
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(region)
cleanup_route_table(client=client, tag_name=route_table_tag)
return {"success": "VPC deleted."}
def tag_resource_name(client, resource_id, tag_name):
"""Method to create name tag for given resource.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_name (str): Tag name.
"""
tag_resource(client, resource_id, "Name", tag_name)
def tag_resource(client, resource_id, tag_key, tag_value):
"""Method to attach arbitrary key-value tags to resources.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_key: Tag key
tag_value: Tag value
"""
tags = [{"Key": tag_key, "Value": tag_value}]
client.create_tags(Resources=[resource_id], Tags=tags)
def get_filters(key, value):
return [{'Name': key, 'Values': [value]}]
def get_tag_filter(tag_name):
return get_filters("tag:Name", tag_name)
def get_vpc_peerings(vpc, host_vpc, **kwargs):
"""Method to fetch all the VPC peerings against given VPC. If host_vpc is provided
it will check if there is a peering against that vpc.
Args:
vpc(VPC object): VPC Object to search for peerings
host_vpc (Host VPC object): Can be Null as well, to check if specific host_vpc
peering is done.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
output = []
# Search through accepted vpc peerings.
vpc_peerings = vpc.accepted_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.requester_vpc == host_vpc)])
# Also search through requested vpc peerings.
vpc_peerings = vpc.requested_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.accepter_vpc == host_vpc)])
return output
@get_and_cleanup(get_vpc_peerings)
def cleanup_vpc_peering(vpc_peerings, **kwargs):
for vpc_peering in vpc_peerings:
vpc_peering.delete()
@get_or_create(get_vpc_peerings)
def create_vpc_peering(client, vpc, host_vpc, target_region):
"""Method would create a vpc peering between the newly created VPC and caller's VPC
Also makes sure, if they aren't the same, then there is no need for vpc peering.
Args:
client (boto client): Region specific boto client
vpc (VPC object): Newly created VPC object
host_vpc(Host VPC object): Host VPC to peer with.
target_region (region name): Region name in which peering is being created.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
try:
peer_conn = client.create_vpc_peering_connection(
VpcId=host_vpc.id, PeerVpcId=vpc.id, PeerRegion=target_region)
peer_conn.wait_until_exists()
# Need to accept from the other end.
remote_peer_conn = get_client(target_region).VpcPeeringConnection(peer_conn.id)
remote_peer_conn.wait_until_exists()
remote_peer_conn.accept()
return [peer_conn]
except Exception as e:
logging.error(e)
raise YBOpsRuntimeError("Unable to create VPC peering.")
def get_device_names(instance_type, num_volumes):
device_names = []
for i in xrange(num_volumes):
device_name_format = "nvme{}n1" if is_nvme(instance_type) else "xvd{}"
index = "{}".format(i if is_nvme(instance_type) else chr(ord('b') + i))
device_names.append(device_name_format.format(index))
return device_names
def is_next_gen(instance_type):
return instance_type.startswith(("c3", "c4", "c5", "m4", "r4"))
def is_nvme(instance_type):
return instance_type.startswith("i3")
def has_ephemerals(instance_type):
return not is_nvme(instance_type) and not is_next_gen(instance_type)
def create_instance(args):
client = get_client(args.region)
vars = {
"ImageId": args.machine_image,
"KeyName": args.key_pair_name,
"MinCount": 1,
"MaxCount": 1,
"InstanceType": args.instance_type,
}
# Network setup.
# Lets assume they have provided security group id comma delimited.
sg_ids = args.security_group_id.split(",") if args.security_group_id else None
if sg_ids is None:
# Figure out which VPC this instance will be brought up in and search for the SG in there.
# This is for a bit of backwards compatibility with the previous mode of potentially using
# YW's VPC, in which we would still deploy a SG with the same name as in our normal VPCs.
# This means there could be customers that had that deployment mode from the start AND have
# a SG we created back then, with the internal naming convention we use, but NOT in the YB
# VPC (which they likely will not even have).
vpc = get_vpc_for_subnet(client, args.cloud_subnet)
sg_name = get_yb_sg_name(args.region)
sg = get_security_group(client, sg_name, vpc)
sg_ids = [sg.id]
vars["NetworkInterfaces"] = [{
"DeviceIndex": 0,
"AssociatePublicIpAddress": args.assign_public_ip,
"SubnetId": args.cloud_subnet,
"Groups": sg_ids
}]
# Volume setup.
volumes = []
ebs = {
"DeleteOnTermination": True,
# TODO: constant
"VolumeSize": 40,
"VolumeType": "gp2"
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
volumes.append({
"DeviceName": "/dev/sda1",
"Ebs": ebs
})
device_names = get_device_names(args.instance_type, args.num_volumes)
# TODO: Clean up semantics on nvme vs "next-gen" vs ephemerals, as this is currently whack...
for i, device_name in enumerate(device_names):
volume = {}
if has_ephemerals(args.instance_type):
volume = {
"DeviceName": "/dev/{}".format(device_name),
"VirtualName": "ephemeral{}".format(i)
}
elif is_next_gen(args.instance_type):
ebs = {
"DeleteOnTermination": True,
"VolumeType": args.volume_type,
# TODO: make this int.
"VolumeSize": args.volume_size
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
if args.volume_type == "io1":
# TODO: make this int.
ebs["Iops"] = args.disk_iops
volume = {
"DeviceName": "/dev/{}".format(device_name),
"Ebs": ebs
}
volumes.append(volume)
vars["BlockDeviceMappings"] = volumes
# Tag setup.
def __create_tag(k, v):
return {"Key": k, "Value": v}
# Add Name all the time.
instance_tags = [
__create_tag("Name", args.search_pattern),
__create_tag("launched-by", os.environ.get("USER", "unknown")),
__create_tag("yb-server-type", args.type)
]
custom_tags = args.instance_tags if args.instance_tags is not None else '{}'
for k, v in json.loads(custom_tags).iteritems():
instance_tags.append(__create_tag(k, v))
vars["TagSpecifications"] = [{
"ResourceType": "instance",
"Tags": instance_tags
}]
# TODO: user_data > templates/cloud_init.yml.j2, still needed?
instance_ids = client.create_instances(**vars)
if len(instance_ids) != 1:
logging.error("Invalid create_instances response: {}".format(instance_ids))
raise YBOpsRuntimeError("Expected to create 1 instance, got {}".format(
len(instance_ids)))
instance = instance_ids[0]
instance.wait_until_running()
def modify_tags(region, instance_id, tags_to_set_str, tags_to_remove_str):
instance = get_client(region).Instance(instance_id)
# Remove all the tags we were asked to, except the internal ones.
tags_to_remove = set(tags_to_remove_str.split(",") if tags_to_remove_str else [])
# TODO: combine these with the above instance creation function.
internal_tags = set(["Name", "launched-by", "yb-server-type"])
if tags_to_remove & internal_tags:
raise YBOpsRuntimeError(
"Was asked to remove tags: {}, which contain internal tags: {}".format(
tags_to_remove, internal_tags
))
# Note: passing an empty list to Tags will remove all tags from the instance.
if tags_to_remove:
instance.delete_tags(Tags=[{"Key": k} for k in tags_to_remove])
# Set all the tags provided.
tags_to_set = json.loads(tags_to_set_str if tags_to_set_str else "{}")
customer_tags = []
for k, v in tags_to_set.iteritems():
customer_tags.append({"Key": k, "Value": v})
instance.create_tags(Tags=customer_tags)
def delete_route(rt, cidr):
route = get_route_by_cidr(rt, cidr)
if route is not None:
route.delete()
def get_vpc_for_subnet(client, subnet):
return client.Subnet(subnet).vpc
def get_yb_sg_name(region):
return SG_YUGABYTE_PREFIX_FORMAT.format(region)
def list_dns_record_set(hosted_zone_id):
return boto3.client('route53').get_hosted_zone(Id=hosted_zone_id)
def create_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'CREATE')
def edit_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'UPSERT')
def delete_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'DELETE')
def _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, action):
client = boto3.client('route53')
records = []
for ip in ip_list:
records.append({'Value': ip})
result = list_dns_record_set(hosted_zone_id)
hosted_zone_name = result['HostedZone']['Name']
change_batch = {
'Comment': "YugaWare driven Record Set",
'Changes': [{
'Action': action,
'ResourceRecordSet': {
'Name': "{}.{}".format(domain_name_prefix, hosted_zone_name),
'Type': 'A',
'TTL': 5,
'ResourceRecords': records
}
}]
}
result = client.change_resource_record_sets(
HostedZoneId=hosted_zone_id,
ChangeBatch=change_batch)
client.get_waiter('resource_record_sets_changed').wait(
Id=result['ChangeInfo']['Id'],
WaiterConfig={
'Delay': 10,
'MaxAttempts': 60
})
|
create_subnet
|
Method to create subnet based on cidr and tag name.
Args:
client (boto client): Region specific boto client
vpc (VPC object): VPC object to create subnet.
zone (str): Availability zone name
cidr (str): CIDR string
tag_name (str): Tag name for subnet.
Returns:
subnet: Newly created subnet object.
|
#!/usr/bin/env python
#
# Copyright 2019 YugaByte, Inc. and Contributors
#
# Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt
import boto3
import json
import logging
import os
import re
from ipaddr import IPNetwork
from ybops.utils import get_or_create, get_and_cleanup
from ybops.common.exceptions import YBOpsRuntimeError
from ybops.cloud.common.utils import request_retry_decorator
RESOURCE_PREFIX_FORMAT = "yb-{}"
IGW_CIDR = "0.0.0.0/0"
SUBNET_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT
IGW_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-igw"
ROUTE_TABLE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-rt"
SG_YUGABYTE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-sg"
PEER_CONN_FORMAT = "yb-peer-conn-{}-to-{}"
class AwsBootstrapRegion():
def __init__(self, region, metadata, region_cidrs):
self.region = region
self.metadata = metadata
self.region_cidrs = region_cidrs
self.client = get_client(self.region)
# Outputs.
self.vpc = None
self.igw = None
self.peer_vpc = None
self.sg_yugabyte = None
self.subnets = []
self.route_table = None
def bootstrap(self):
self.setup_vpc()
self.setup_igw()
self.setup_subnets()
self.setup_yugabyte_sg()
self.setup_rt()
def setup_vpc(self):
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(self.region)
vpc = create_vpc(client=self.client, tag_name=vpc_region_tag,
cidr=get_region_cidr(self.metadata, self.region))
vpc.wait_until_available()
self.vpc = vpc
def setup_igw(self):
igw_tag = IGW_PREFIX_FORMAT.format(self.region)
igw = create_igw(client=self.client, tag_name=igw_tag, vpc=self.vpc)
self.igw = igw
def setup_subnets(self):
zones = get_zones(self.region)
subnets = {}
for zone_index, zone in enumerate(sorted(zones.keys())):
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
zone_cidr = self.metadata["zone_cidr_format"].format(
get_cidr_prefix(self.metadata, self.region), (zone_index + 1) * 16)
subnet = create_subnet(self.client, self.vpc, zone, zone_cidr, vpc_zone_tag)
subnets[zone] = subnet
self.subnets = subnets
def setup_yugabyte_sg(self):
sg_group_name = get_yb_sg_name(self.region)
rules = list(self.metadata["sg_rules"])
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
sg = create_security_group(client=self.client, group_name=sg_group_name,
description="YugaByte SG", vpc=self.vpc,
rules=rules)
self.sg_yugabyte = sg
def setup_rt(self):
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(self.region)
route_table = create_route_table(client=self.client, tag_name=route_table_tag,
vpc=self.vpc)
# TODO: handle private/public case at somepoint, also NAT.
add_route_to_rt(route_table, IGW_CIDR, "GatewayId", self.igw.id)
current_associated_subnet_ids = [assoc.subnet_id for assoc in route_table.associations]
missing_ids = [subnet.id for subnet in self.subnets.values()
if subnet.id not in current_associated_subnet_ids]
for subnet_id in missing_ids:
route_table.associate_with_subnet(SubnetId=subnet_id)
self.route_table = route_table
def add_sg_ingress_to_sg(self, incoming_sg, target_sg):
current_sg_ids = set([pair["GroupId"]
for perm in target_sg.ip_permissions
for pair in perm["UserIdGroupPairs"]])
if incoming_sg.id not in current_sg_ids:
target_sg.authorize_ingress(
IpPermissions=[{
"IpProtocol": "-1",
"UserIdGroupPairs": [{"GroupId": incoming_sg.id}]}])
def add_route_to_rt(route_table, cidr, target_type, target_id):
kwargs = {target_type: target_id}
route = get_route_by_cidr(route_table, cidr)
if route is None:
route_table.create_route(DestinationCidrBlock=cidr, **kwargs)
elif getattr(route, dumb_camel_to_snake(target_type)) != target_id:
route.replace(**kwargs)
def add_cidr_to_rules(rules, cidr):
rule_block = {
"ip_protocol": "-1",
"from_port": 0,
"to_port": 65535,
"cidr_ip": cidr
}
rules.append(rule_block)
def get_cidr_prefix(metadata, region):
return metadata["regions"][region]["cidr_prefix"]
def get_region_cidr(metadata, region):
return metadata["region_cidr_format"].format(get_cidr_prefix(metadata, region))
def get_region_cidrs(metadata):
return dict([(r, get_region_cidr(metadata, r)) for r in metadata["regions"].keys()])
def dumb_camel_to_snake(s):
return re.sub("([A-Z])", "_\\1", s).lower()[1:]
class YbVpcComponents:
def __init__(self):
self.region = None
self.vpc = None
self.sg_yugabyte = None
self.customer_sgs = None
self.route_table = None
self.subnets = None
@staticmethod
def from_pieces(region, vpc_id, sg_id, rt_id, az_to_subnet_ids):
c = YbVpcComponents()
c.region = region
client = get_client(region)
c.vpc = client.Vpc(vpc_id)
c.sg_yugabyte = client.SecurityGroup(sg_id)
c.route_table = client.RouteTable(rt_id)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
@staticmethod
def from_user_json(region, per_region_meta):
c = YbVpcComponents()
c.region = region
client = get_client(region)
vpc_id = per_region_meta.get("vpcId")
if vpc_id:
c.vpc = client.Vpc(vpc_id)
else:
c.vpc = get_vpc(client, RESOURCE_PREFIX_FORMAT.format(region))
sg_ids = per_region_meta.get("customSecurityGroupId")
if sg_ids:
c.customer_sgs = [client.SecurityGroup(sg_id) for sg_id in sg_ids.split(",")]
else:
c.sg_yugabyte = get_security_group(
client, SG_YUGABYTE_PREFIX_FORMAT.format(region), c.vpc)
if not vpc_id:
c.route_table = get_route_table(client, ROUTE_TABLE_PREFIX_FORMAT.format(region))
az_to_subnet_ids = {}
if vpc_id:
az_to_subnet_ids = per_region_meta.get("azToSubnetIds", {})
else:
az_to_subnet_ids = get_zones(region)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
def as_json(self):
sgs = self.customer_sgs if self.customer_sgs else [self.sg_yugabyte]
return vpc_components_as_json(self.vpc, sgs, self.subnets)
class AwsBootstrapClient():
def __init__(self, metadata, host_vpc_id, host_vpc_region):
self.metadata = metadata
self.host_vpc_id = host_vpc_id
self.host_vpc_region = host_vpc_region
self.region_cidrs = get_region_cidrs(self.metadata)
# Validation.
self._validate_cidr_overlap()
def _validate_cidr_overlap(self):
region_networks = [IPNetwork(cidr) for cidr in self.region_cidrs.values()]
all_networks = region_networks
for i in xrange(len(all_networks)):
for j in xrange(i + 1, len(all_networks)):
left = all_networks[i]
right = all_networks[j]
if left.overlaps(right):
raise YBOpsRuntimeError(
"IP blocks in the CIDRs overlap: {} - {}".format(left, right))
def bootstrap_individual_region(self, region):
if region is None:
raise YBOpsRuntimeError("Must provider region to bootstrap!")
client = AwsBootstrapRegion(region, self.metadata, self.region_cidrs)
client.bootstrap()
return YbVpcComponents.from_pieces(
region, client.vpc.id, client.sg_yugabyte.id, client.route_table.id,
{az: s.id for az, s in client.subnets.iteritems()})
def cross_link_regions(self, components):
# Do the cross linking, adding CIDR entries to RTs and SGs, as well as doing vpc peerings.
region_and_vpc_tuples = [(r, c.vpc) for r, c in components.iteritems()]
host_vpc = None
if self.host_vpc_id and self.host_vpc_region:
host_vpc = get_client(self.host_vpc_region).Vpc(self.host_vpc_id)
region_and_vpc_tuples.append((self.host_vpc_region, host_vpc))
# Setup VPC peerings.
for i in xrange(len(region_and_vpc_tuples) - 1):
i_region, i_vpc = region_and_vpc_tuples[i]
for j in xrange(i + 1, len(region_and_vpc_tuples)):
j_region, j_vpc = region_and_vpc_tuples[j]
peering = create_vpc_peering(
# i is the host, j is the target.
client=get_client(i_region), vpc=j_vpc, host_vpc=i_vpc, target_region=j_region)
if len(peering) != 1:
raise YBOpsRuntimeError(
"Expecting one peering connection, got {}".format(peer_conn))
peering = peering[0]
# Add route i -> j.
add_route_to_rt(components[i_region].route_table, j_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
# Add route j -> i.
# Note: If we have a host_vpc, it is the last in the list, and it doesn't have an
# associated component, so we special case it.
if host_vpc is None or j != len(region_and_vpc_tuples) - 1:
add_route_to_rt(components[j_region].route_table, i_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
else:
# TODO: should ideally filter to the RT that is relevant, but we do not really
# know the subnets which matter from this host_vpc...
for rt in list(host_vpc.route_tables.all()):
add_route_to_rt(rt, i_vpc.cidr_block, "VpcPeeringConnectionId", peering.id)
# Setup SG entries for all the CIDRs.
all_cidrs = [vpc.cidr_block for r, vpc in region_and_vpc_tuples]
rules = []
# Add CIDRs from all the VPCs, including the host.
for cidr in all_cidrs:
add_cidr_to_rules(rules, cidr)
# Add CIDRs from any custom networks we have internally, primarily the OpenVPN in AWS.
# TODO(bogdan): custom CIDR entries
for cidr in self.metadata.get("custom_network_whitelisted_ip_cidrs", []):
add_cidr_to_rules(rules, cidr)
for region, component in components.iteritems():
sg = component.sg_yugabyte
ip_perms = sg.ip_permissions
for rule in rules:
found = False
for perm in ip_perms:
if perm.get("FromPort") == rule["from_port"] and \
perm.get("ToPort") == rule["to_port"] and \
perm.get("IpProtocol") == rule["ip_protocol"] and \
len([True for r in perm.get("IpRanges", [])
if r.get("CidrIp") == rule["cidr_ip"]]) > 0:
# This rule matches this permission, so no need to add it.
found = True
break
if not found:
try:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
if "InvalidPermission.Duplicate" not in str(e):
raise YBOpsRuntimeError(
"Authorize Security Group Ingress failed: {}".format(repr(e)))
def aws_exception_handler(e):
"""AWS specific exception handler.
Args:
e: the exception that was raised by the underlying API call that just failed.
Returns:
True if this exception can be retried, False otherwise.
"""
return "Request limit exceeded" in str(e)
def aws_request_limit_retry(fn):
"""A decorator for retrying an AWS operation after exceeding request limit. Does retries with
randomized jitter. Ideally, we should reconfigure boto3 to do the right kind of retries
internally, but as of May 2017 there does not seem to be a good way of doing that.
Initially not adding this decorator to all functions in this module. This should be done
gradually as we encounter rate limiting errors.
Relevant boto issues:
https://github.com/boto/boto3/issues/770
https://github.com/boto/botocore/issues/882
"""
return request_retry_decorator(fn, aws_exception_handler)
def get_client(region):
"""Method to get boto3 ec2 resource for given region
Args:
region (str): Region name
Returns:
boto3 resource
"""
return boto3.resource("ec2", region_name=region)
def get_clients(regions):
"""Method to get boto3 clients for given region or all the regions if none specified.
Args:
regions (list): List of regions to return clients for
Returns:
clients(obj): Map of region to boto3 resource
"""
return {region: get_client(region) for region in regions}
def get_available_regions(metadata):
return metadata["regions"].keys()
def get_spot_pricing(region, zone, instance_type):
client = boto3.client('ec2', region_name=region)
prod_desc = ['Linux/UNIX (Amazon VPC)']
spot_price = client.describe_spot_price_history(InstanceTypes=[instance_type],
MaxResults=1,
ProductDescriptions=prod_desc,
AvailabilityZone=zone)
if len(spot_price['SpotPriceHistory']) == 0:
raise YBOpsRuntimeError('Invalid instance type {} for zone {}'.format(instance_type, zone))
return spot_price['SpotPriceHistory'][0]['SpotPrice']
def get_zones(region, dest_vpc_id=None):
"""Method to fetch zones for given region or all the regions if none specified.
Args:
region (str): Name of region to get zones of.
Returns:
zones (obj): Map of zone -> subnet
"""
result = {}
filters = get_filters("state", "available")
client = boto3.client("ec2", region_name=region)
zones = client.describe_availability_zones(Filters=filters).get("AvailabilityZones", [])
new_client = get_client(region)
zone_mapping = {}
for z in zones:
zone_name = z["ZoneName"]
zone_tag = SUBNET_PREFIX_FORMAT.format(zone_name)
region_vpc = None
if dest_vpc_id:
region_vpc = new_client.Vpc(dest_vpc_id)
else:
region_vpc = get_vpc(new_client, RESOURCE_PREFIX_FORMAT.format(region))
subnet = next(iter(fetch_subnets(region_vpc, zone_tag)), None)
if subnet is None:
subnet = next(iter([s for s in region_vpc.subnets.all()
if s.availability_zone == zone_name]), None)
zone_mapping[zone_name] = subnet.id if subnet is not None else None
return zone_mapping
def get_vpc(client, tag_name, **kwargs):
"""Method to fetch vpc based on the tag_name.
Args:
client (boto client): Boto Client for the region to query.
tag_name (str): VPC tag name.
Returns:
VPC obj: VPC object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.vpcs.filter(Filters=filters)), None)
def fetch_subnets(vpc, tag_name):
"""Method to fetch subnets based on the tag_name.
Args:
vpc (vpc obj): VPC object to search for subnets
tag_name (str): subnet tag name.
Returns:
subnets (list): list of aws subnets for given vpc.
"""
filters = get_tag_filter(tag_name)
return vpc.subnets.filter(Filters=filters)
# MASKED: create_subnet function (lines 416-433)
def get_security_group(client, group_name, vpc, **kwargs):
"""Method to fetch security group based on the group_name.
Args:
client (boto client): Region specific boto client
group_name (str): Security Group name
vpc (VPC object): The VPC in which to check for the SG
Returns:
SecurityGroup: Matching security group.
"""
filters = get_filters("group-name", group_name) + get_filters("vpc-id", vpc.id)
return next(iter(client.security_groups.filter(Filters=filters)), None)
@get_or_create(get_security_group)
def create_security_group(client, group_name, vpc, description, rules):
"""Method to create a security group based on the group_name and authorize ingress with
the rules provided.
Args:
client (boto client): Region specific boto client
group_name (str): security group name
description (str): description of the security group
vpc (VPC Object): VPC object to create the security group
rules (dict): List of rules to add to security group.
"""
sg = vpc.create_security_group(GroupName=group_name, Description=description)
try:
for rule in rules:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
logging.error("Authorize Security Group Ingress failed: {}".format(e))
sg.delete()
raise YBOpsRuntimeError("Security Group creation failed.")
return sg
def get_igw(client, tag_name, **kwargs):
"""Method to fetch Internet Gateway based on tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): Internet Gateway tag name.
Returns:
internet_gateway: internet gateway object.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.internet_gateways.filter(Filters=filters)), None)
@get_or_create(get_igw)
def create_igw(client, tag_name, vpc):
"""Method to create Internet Gateway based on tag_name in given VPC. If the gateway
already exists, it would return that object. If the object doesn't have a tag, we
would tag it accordingly.
Args:
client (boto client): Region specific boto client
tag_name (str): Tag name for internet gateway.
vpc (VPC object): VPC object to create Internet Gateway
Returns:
internet gateway: newly internet gateway object.
"""
# Query to make sure the region doesn't have any IGW already attached.
existing_igw = next(iter(vpc.internet_gateways.all()), None)
if existing_igw is not None:
# If we have existing igw for the region, lets just tag it with yb-XX-igw
tag_resource_name(client, existing_igw.id, tag_name)
return existing_igw
# If we don't have a internet gateway, lets create one and attach it to vpc
igw = client.create_internet_gateway()
tag_resource_name(client, igw.id, tag_name)
vpc.attach_internet_gateway(InternetGatewayId=igw.id)
return igw
def get_route_table(client, tag_name, **kwargs):
"""Method to fetch route table based on tag_name
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name to search for.
Returns:
RouteTable (obj): Matching route table object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.route_tables.filter(Filters=filters)), None)
@get_or_create(get_route_table)
def create_route_table(client, tag_name, vpc):
"""Method to create route table based on tag_name in given VPC. It will first
query for the tag name to see if the route table already exists or if one is already
attached to the VPC, if so it will return that route table.
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name
vpc (vpc object): VPC object to create the route table against
Returns:
RouteTable (obj): newly created RouteTable object.
"""
# Check to see if there is a route table attached to VPC, if so, we can just tag it
existing_route_table = next(iter(vpc.route_tables.all()), None)
if existing_route_table is not None:
tag_resource_name(client, existing_route_table.id, tag_name)
return existing_route_table
# If no route table exists, we can create one and tag it.
route_table = vpc.create_route_table()
tag_resource_name(client, route_table.id, tag_name)
return route_table
@get_and_cleanup(get_security_group)
def cleanup_security_group(sg, **kwargs):
"""Method to cleanup security group for the matching group_name.
Args:
sg: Instance of security group matching the group_name.
"""
sg.delete()
@get_and_cleanup(get_igw)
def cleanup_igw(igw, **kwargs):
"""Method to cleanup Internet Gateway matching the tag name. And also remove any vpc
that is attached to the Internet Gateway.
Args:
igw: Instance of Internet Gateway matching tag_name.
"""
for vpc in igw.attachments:
igw.detach_from_vpc(VpcId=vpc['VpcId'])
igw.delete()
@get_and_cleanup(get_route_table)
def cleanup_route_table(rt, **kwargs):
"""Method to cleanup the Route Table matching the tag name.
Args:
rt: Instance of Route Table matching tag_name.
"""
rt.delete()
def get_route_by_cidr(route_table, cidr):
"""Method to check if given CIDR already attached to route table.
Args:
RouteTable (obj): Route Table object.
cidr (str): CIDR string to check in route table.
Returns:
Route: the route for this CIDR or None if not found
"""
return dict((r.destination_cidr_block, r) for r in route_table.routes).get(cidr)
@get_or_create(get_vpc)
def create_vpc(client, tag_name, cidr):
"""Method to create vpc based on the cidr and tag with tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): VPC tag name
cidr (str): CIDR string.
Returns:
VPC(Object): Newly created VPC object.
"""
vpc = client.create_vpc(CidrBlock=cidr)
vpc.modify_attribute(EnableDnsHostnames={'Value': True})
tag_resource_name(client, vpc.id, tag_name)
return vpc
def set_yb_sg_and_fetch_vpc(metadata, region, dest_vpc_id):
"""Method to bootstrap vpc and security group, and enable vpc peering
with the host_instance vpc.
Args:
metadata (obj): Cloud metadata object with cidr prefix and other metadata.
region (str): Region name to create the vpc in.
dest_vpc_id (str): Id of the VPC that yugabyte machines will reside in.
Returns:
vpc_info (json): return vpc, subnet and security group as json.
"""
client = get_client(region)
dest_vpc = client.Vpc(dest_vpc_id)
subnets = {subnet.availability_zone: subnet for subnet in dest_vpc.subnets.all()}
sg_group_name = get_yb_sg_name(region)
rules = metadata["sg_rules"]
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
add_cidr_to_rules(rules, dest_vpc.cidr_block)
sgs = [create_security_group(client=client, group_name=sg_group_name, vpc=dest_vpc,
description="YugaByte SG", rules=rules)]
return vpc_components_as_json(dest_vpc, sgs, subnets)
def query_vpc(region):
"""Method to query VPC against given region and respective subnets.
Args:
region (str): Region name to query the VPC.
Returns:
vpc and subnet info (obj): Object with region and zone subnet id.
"""
per_vpc_info = {}
# Fetch all available AZs, as we want to group subnets by AZ.
raw_client = boto3.client("ec2", region_name=region)
zones = [z["ZoneName"]
for z in raw_client.describe_availability_zones(
Filters=get_filters("state", "available")).get("AvailabilityZones", [])]
# Default to empty lists, in case some zones do not have subnets, so we can use this as a query
# for all available AZs in this region.
subnets_by_zone = {z: [] for z in zones}
# Fetch SGs and group them by VPC ID.
client = get_client(region)
per_vpc_sgs = {}
sgs = client.security_groups.all()
for sg in sgs:
sg_list = per_vpc_sgs.setdefault(sg.vpc_id, [])
sg_list.append({
"sg_id": sg.group_id,
# Note: Name tag is not mandatory or always present but group_name is!
"sg_name": sg.group_name
})
# Fetch all available VPCs so we can group by VPC ID.
region_vpcs = client.vpcs.all()
for vpc in region_vpcs:
# Filter for available subnets and group by AZ.
subnets = vpc.subnets.filter(Filters=get_filters("state", "available"))
for s in subnets:
subnets_for_this_az = subnets_by_zone.setdefault(s.availability_zone, [])
subnets_for_this_az.append({
"subnet_id": s.subnet_id,
"name": _get_name_from_tags(s.tags),
"public": s.map_public_ip_on_launch
})
vpc_info = {
"subnets_by_zone": subnets_by_zone,
# In case we somehow did not find any SGs, default to empty list.
"security_groups": per_vpc_sgs.get(vpc.id, [])
}
per_vpc_info[vpc.id] = vpc_info
region_json = {
"per_vpc_info": per_vpc_info
}
return region_json
def _get_name_from_tags(tags):
for t in tags if tags else []:
if t.get("Key") == "Name":
return t.get("Value", None)
return None
def vpc_components_as_json(vpc, sgs, subnets):
"""Method takes VPC, Security Group and Subnets and returns a json data format with ids.
Args:
vpc (VPC Object): Region specific VPC object
sgs (List of Security Group Object): Region specific Security Group object
subnets (subnet object map): Map of Subnet objects keyed of zone.
Retuns:
json (str): A Json string for yugaware to consume with necessary ids.
"""
result = {}
result["vpc_id"] = vpc.id
result["security_group"] = [{"id": sg.group_id, "name": sg.group_name} for sg in sgs]
result["zones"] = {}
for zone, subnet in subnets.iteritems():
result["zones"][zone] = subnet.id
return result
def delete_vpc(region, host_vpc_id=None, host_vpc_region=None):
"""Method to delete VPC, Subnet, Internet Gateway, Route Table and VPC peering.
Args:
region (str): Region name to query the VPC.
"""
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(region)
client = get_client(region)
region_vpc = get_vpc(client, vpc_region_tag)
if region_vpc is None:
raise YBOpsRuntimeError("VPC not setup.")
zones = get_zones(region)
# Remove the yugabyte SG first.
sg_group_name = get_yb_sg_name(region)
cleanup_security_group(client=client, group_name=sg_group_name, vpc=region_vpc)
# Cleanup the subnets.
for zone, subnet_id in zones.iteritems():
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
if subnet_id is not None:
client.Subnet(subnet_id).delete()
# Remove the IGW.
igw_tag = IGW_PREFIX_FORMAT.format(region)
cleanup_igw(client=client, tag_name=igw_tag)
# Remove this region's CIDR from the RT of the host vpc.
host_vpc = None
if host_vpc_id is not None and host_vpc_region is not None:
host_vpc = get_client(host_vpc_region).Vpc(host_vpc_id)
for rt in list(host_vpc.route_tables.all()):
delete_route(rt, region_vpc.cidr_block)
# Remove all of the VPC peerings of this vpc.
cleanup_vpc_peering(client=client, vpc=region_vpc, host_vpc=None)
# Delete the VPC itself.
region_vpc.delete()
# Finally cleanup the Routing Table.
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(region)
cleanup_route_table(client=client, tag_name=route_table_tag)
return {"success": "VPC deleted."}
def tag_resource_name(client, resource_id, tag_name):
"""Method to create name tag for given resource.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_name (str): Tag name.
"""
tag_resource(client, resource_id, "Name", tag_name)
def tag_resource(client, resource_id, tag_key, tag_value):
"""Method to attach arbitrary key-value tags to resources.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_key: Tag key
tag_value: Tag value
"""
tags = [{"Key": tag_key, "Value": tag_value}]
client.create_tags(Resources=[resource_id], Tags=tags)
def get_filters(key, value):
return [{'Name': key, 'Values': [value]}]
def get_tag_filter(tag_name):
return get_filters("tag:Name", tag_name)
def get_vpc_peerings(vpc, host_vpc, **kwargs):
"""Method to fetch all the VPC peerings against given VPC. If host_vpc is provided
it will check if there is a peering against that vpc.
Args:
vpc(VPC object): VPC Object to search for peerings
host_vpc (Host VPC object): Can be Null as well, to check if specific host_vpc
peering is done.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
output = []
# Search through accepted vpc peerings.
vpc_peerings = vpc.accepted_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.requester_vpc == host_vpc)])
# Also search through requested vpc peerings.
vpc_peerings = vpc.requested_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.accepter_vpc == host_vpc)])
return output
@get_and_cleanup(get_vpc_peerings)
def cleanup_vpc_peering(vpc_peerings, **kwargs):
for vpc_peering in vpc_peerings:
vpc_peering.delete()
@get_or_create(get_vpc_peerings)
def create_vpc_peering(client, vpc, host_vpc, target_region):
"""Method would create a vpc peering between the newly created VPC and caller's VPC
Also makes sure, if they aren't the same, then there is no need for vpc peering.
Args:
client (boto client): Region specific boto client
vpc (VPC object): Newly created VPC object
host_vpc(Host VPC object): Host VPC to peer with.
target_region (region name): Region name in which peering is being created.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
try:
peer_conn = client.create_vpc_peering_connection(
VpcId=host_vpc.id, PeerVpcId=vpc.id, PeerRegion=target_region)
peer_conn.wait_until_exists()
# Need to accept from the other end.
remote_peer_conn = get_client(target_region).VpcPeeringConnection(peer_conn.id)
remote_peer_conn.wait_until_exists()
remote_peer_conn.accept()
return [peer_conn]
except Exception as e:
logging.error(e)
raise YBOpsRuntimeError("Unable to create VPC peering.")
def get_device_names(instance_type, num_volumes):
device_names = []
for i in xrange(num_volumes):
device_name_format = "nvme{}n1" if is_nvme(instance_type) else "xvd{}"
index = "{}".format(i if is_nvme(instance_type) else chr(ord('b') + i))
device_names.append(device_name_format.format(index))
return device_names
def is_next_gen(instance_type):
return instance_type.startswith(("c3", "c4", "c5", "m4", "r4"))
def is_nvme(instance_type):
return instance_type.startswith("i3")
def has_ephemerals(instance_type):
return not is_nvme(instance_type) and not is_next_gen(instance_type)
def create_instance(args):
client = get_client(args.region)
vars = {
"ImageId": args.machine_image,
"KeyName": args.key_pair_name,
"MinCount": 1,
"MaxCount": 1,
"InstanceType": args.instance_type,
}
# Network setup.
# Lets assume they have provided security group id comma delimited.
sg_ids = args.security_group_id.split(",") if args.security_group_id else None
if sg_ids is None:
# Figure out which VPC this instance will be brought up in and search for the SG in there.
# This is for a bit of backwards compatibility with the previous mode of potentially using
# YW's VPC, in which we would still deploy a SG with the same name as in our normal VPCs.
# This means there could be customers that had that deployment mode from the start AND have
# a SG we created back then, with the internal naming convention we use, but NOT in the YB
# VPC (which they likely will not even have).
vpc = get_vpc_for_subnet(client, args.cloud_subnet)
sg_name = get_yb_sg_name(args.region)
sg = get_security_group(client, sg_name, vpc)
sg_ids = [sg.id]
vars["NetworkInterfaces"] = [{
"DeviceIndex": 0,
"AssociatePublicIpAddress": args.assign_public_ip,
"SubnetId": args.cloud_subnet,
"Groups": sg_ids
}]
# Volume setup.
volumes = []
ebs = {
"DeleteOnTermination": True,
# TODO: constant
"VolumeSize": 40,
"VolumeType": "gp2"
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
volumes.append({
"DeviceName": "/dev/sda1",
"Ebs": ebs
})
device_names = get_device_names(args.instance_type, args.num_volumes)
# TODO: Clean up semantics on nvme vs "next-gen" vs ephemerals, as this is currently whack...
for i, device_name in enumerate(device_names):
volume = {}
if has_ephemerals(args.instance_type):
volume = {
"DeviceName": "/dev/{}".format(device_name),
"VirtualName": "ephemeral{}".format(i)
}
elif is_next_gen(args.instance_type):
ebs = {
"DeleteOnTermination": True,
"VolumeType": args.volume_type,
# TODO: make this int.
"VolumeSize": args.volume_size
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
if args.volume_type == "io1":
# TODO: make this int.
ebs["Iops"] = args.disk_iops
volume = {
"DeviceName": "/dev/{}".format(device_name),
"Ebs": ebs
}
volumes.append(volume)
vars["BlockDeviceMappings"] = volumes
# Tag setup.
def __create_tag(k, v):
return {"Key": k, "Value": v}
# Add Name all the time.
instance_tags = [
__create_tag("Name", args.search_pattern),
__create_tag("launched-by", os.environ.get("USER", "unknown")),
__create_tag("yb-server-type", args.type)
]
custom_tags = args.instance_tags if args.instance_tags is not None else '{}'
for k, v in json.loads(custom_tags).iteritems():
instance_tags.append(__create_tag(k, v))
vars["TagSpecifications"] = [{
"ResourceType": "instance",
"Tags": instance_tags
}]
# TODO: user_data > templates/cloud_init.yml.j2, still needed?
instance_ids = client.create_instances(**vars)
if len(instance_ids) != 1:
logging.error("Invalid create_instances response: {}".format(instance_ids))
raise YBOpsRuntimeError("Expected to create 1 instance, got {}".format(
len(instance_ids)))
instance = instance_ids[0]
instance.wait_until_running()
def modify_tags(region, instance_id, tags_to_set_str, tags_to_remove_str):
instance = get_client(region).Instance(instance_id)
# Remove all the tags we were asked to, except the internal ones.
tags_to_remove = set(tags_to_remove_str.split(",") if tags_to_remove_str else [])
# TODO: combine these with the above instance creation function.
internal_tags = set(["Name", "launched-by", "yb-server-type"])
if tags_to_remove & internal_tags:
raise YBOpsRuntimeError(
"Was asked to remove tags: {}, which contain internal tags: {}".format(
tags_to_remove, internal_tags
))
# Note: passing an empty list to Tags will remove all tags from the instance.
if tags_to_remove:
instance.delete_tags(Tags=[{"Key": k} for k in tags_to_remove])
# Set all the tags provided.
tags_to_set = json.loads(tags_to_set_str if tags_to_set_str else "{}")
customer_tags = []
for k, v in tags_to_set.iteritems():
customer_tags.append({"Key": k, "Value": v})
instance.create_tags(Tags=customer_tags)
def delete_route(rt, cidr):
route = get_route_by_cidr(rt, cidr)
if route is not None:
route.delete()
def get_vpc_for_subnet(client, subnet):
return client.Subnet(subnet).vpc
def get_yb_sg_name(region):
return SG_YUGABYTE_PREFIX_FORMAT.format(region)
def list_dns_record_set(hosted_zone_id):
return boto3.client('route53').get_hosted_zone(Id=hosted_zone_id)
def create_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'CREATE')
def edit_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'UPSERT')
def delete_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'DELETE')
def _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, action):
client = boto3.client('route53')
records = []
for ip in ip_list:
records.append({'Value': ip})
result = list_dns_record_set(hosted_zone_id)
hosted_zone_name = result['HostedZone']['Name']
change_batch = {
'Comment': "YugaWare driven Record Set",
'Changes': [{
'Action': action,
'ResourceRecordSet': {
'Name': "{}.{}".format(domain_name_prefix, hosted_zone_name),
'Type': 'A',
'TTL': 5,
'ResourceRecords': records
}
}]
}
result = client.change_resource_record_sets(
HostedZoneId=hosted_zone_id,
ChangeBatch=change_batch)
client.get_waiter('resource_record_sets_changed').wait(
Id=result['ChangeInfo']['Id'],
WaiterConfig={
'Delay': 10,
'MaxAttempts': 60
})
|
def create_subnet(client, vpc, zone, cidr, tag_name):
"""Method to create subnet based on cidr and tag name.
Args:
client (boto client): Region specific boto client
vpc (VPC object): VPC object to create subnet.
zone (str): Availability zone name
cidr (str): CIDR string
tag_name (str): Tag name for subnet.
Returns:
subnet: Newly created subnet object.
"""
subnet = next((s for s in fetch_subnets(vpc, tag_name) if s.cidr_block == cidr), None)
if subnet is None:
subnet = vpc.create_subnet(CidrBlock=cidr, AvailabilityZone=zone)
# TODO: no direct waiter on subnet just yet, it seems...
client.meta.client.get_waiter("subnet_available").wait(SubnetIds=[subnet.id])
tag_resource_name(client, subnet.id, tag_name)
return subnet
| 416 | 433 |
#!/usr/bin/env python
#
# Copyright 2019 YugaByte, Inc. and Contributors
#
# Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt
import boto3
import json
import logging
import os
import re
from ipaddr import IPNetwork
from ybops.utils import get_or_create, get_and_cleanup
from ybops.common.exceptions import YBOpsRuntimeError
from ybops.cloud.common.utils import request_retry_decorator
RESOURCE_PREFIX_FORMAT = "yb-{}"
IGW_CIDR = "0.0.0.0/0"
SUBNET_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT
IGW_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-igw"
ROUTE_TABLE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-rt"
SG_YUGABYTE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-sg"
PEER_CONN_FORMAT = "yb-peer-conn-{}-to-{}"
class AwsBootstrapRegion():
def __init__(self, region, metadata, region_cidrs):
self.region = region
self.metadata = metadata
self.region_cidrs = region_cidrs
self.client = get_client(self.region)
# Outputs.
self.vpc = None
self.igw = None
self.peer_vpc = None
self.sg_yugabyte = None
self.subnets = []
self.route_table = None
def bootstrap(self):
self.setup_vpc()
self.setup_igw()
self.setup_subnets()
self.setup_yugabyte_sg()
self.setup_rt()
def setup_vpc(self):
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(self.region)
vpc = create_vpc(client=self.client, tag_name=vpc_region_tag,
cidr=get_region_cidr(self.metadata, self.region))
vpc.wait_until_available()
self.vpc = vpc
def setup_igw(self):
igw_tag = IGW_PREFIX_FORMAT.format(self.region)
igw = create_igw(client=self.client, tag_name=igw_tag, vpc=self.vpc)
self.igw = igw
def setup_subnets(self):
zones = get_zones(self.region)
subnets = {}
for zone_index, zone in enumerate(sorted(zones.keys())):
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
zone_cidr = self.metadata["zone_cidr_format"].format(
get_cidr_prefix(self.metadata, self.region), (zone_index + 1) * 16)
subnet = create_subnet(self.client, self.vpc, zone, zone_cidr, vpc_zone_tag)
subnets[zone] = subnet
self.subnets = subnets
def setup_yugabyte_sg(self):
sg_group_name = get_yb_sg_name(self.region)
rules = list(self.metadata["sg_rules"])
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
sg = create_security_group(client=self.client, group_name=sg_group_name,
description="YugaByte SG", vpc=self.vpc,
rules=rules)
self.sg_yugabyte = sg
def setup_rt(self):
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(self.region)
route_table = create_route_table(client=self.client, tag_name=route_table_tag,
vpc=self.vpc)
# TODO: handle private/public case at somepoint, also NAT.
add_route_to_rt(route_table, IGW_CIDR, "GatewayId", self.igw.id)
current_associated_subnet_ids = [assoc.subnet_id for assoc in route_table.associations]
missing_ids = [subnet.id for subnet in self.subnets.values()
if subnet.id not in current_associated_subnet_ids]
for subnet_id in missing_ids:
route_table.associate_with_subnet(SubnetId=subnet_id)
self.route_table = route_table
def add_sg_ingress_to_sg(self, incoming_sg, target_sg):
current_sg_ids = set([pair["GroupId"]
for perm in target_sg.ip_permissions
for pair in perm["UserIdGroupPairs"]])
if incoming_sg.id not in current_sg_ids:
target_sg.authorize_ingress(
IpPermissions=[{
"IpProtocol": "-1",
"UserIdGroupPairs": [{"GroupId": incoming_sg.id}]}])
def add_route_to_rt(route_table, cidr, target_type, target_id):
kwargs = {target_type: target_id}
route = get_route_by_cidr(route_table, cidr)
if route is None:
route_table.create_route(DestinationCidrBlock=cidr, **kwargs)
elif getattr(route, dumb_camel_to_snake(target_type)) != target_id:
route.replace(**kwargs)
def add_cidr_to_rules(rules, cidr):
rule_block = {
"ip_protocol": "-1",
"from_port": 0,
"to_port": 65535,
"cidr_ip": cidr
}
rules.append(rule_block)
def get_cidr_prefix(metadata, region):
return metadata["regions"][region]["cidr_prefix"]
def get_region_cidr(metadata, region):
return metadata["region_cidr_format"].format(get_cidr_prefix(metadata, region))
def get_region_cidrs(metadata):
return dict([(r, get_region_cidr(metadata, r)) for r in metadata["regions"].keys()])
def dumb_camel_to_snake(s):
return re.sub("([A-Z])", "_\\1", s).lower()[1:]
class YbVpcComponents:
def __init__(self):
self.region = None
self.vpc = None
self.sg_yugabyte = None
self.customer_sgs = None
self.route_table = None
self.subnets = None
@staticmethod
def from_pieces(region, vpc_id, sg_id, rt_id, az_to_subnet_ids):
c = YbVpcComponents()
c.region = region
client = get_client(region)
c.vpc = client.Vpc(vpc_id)
c.sg_yugabyte = client.SecurityGroup(sg_id)
c.route_table = client.RouteTable(rt_id)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
@staticmethod
def from_user_json(region, per_region_meta):
c = YbVpcComponents()
c.region = region
client = get_client(region)
vpc_id = per_region_meta.get("vpcId")
if vpc_id:
c.vpc = client.Vpc(vpc_id)
else:
c.vpc = get_vpc(client, RESOURCE_PREFIX_FORMAT.format(region))
sg_ids = per_region_meta.get("customSecurityGroupId")
if sg_ids:
c.customer_sgs = [client.SecurityGroup(sg_id) for sg_id in sg_ids.split(",")]
else:
c.sg_yugabyte = get_security_group(
client, SG_YUGABYTE_PREFIX_FORMAT.format(region), c.vpc)
if not vpc_id:
c.route_table = get_route_table(client, ROUTE_TABLE_PREFIX_FORMAT.format(region))
az_to_subnet_ids = {}
if vpc_id:
az_to_subnet_ids = per_region_meta.get("azToSubnetIds", {})
else:
az_to_subnet_ids = get_zones(region)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
def as_json(self):
sgs = self.customer_sgs if self.customer_sgs else [self.sg_yugabyte]
return vpc_components_as_json(self.vpc, sgs, self.subnets)
class AwsBootstrapClient():
def __init__(self, metadata, host_vpc_id, host_vpc_region):
self.metadata = metadata
self.host_vpc_id = host_vpc_id
self.host_vpc_region = host_vpc_region
self.region_cidrs = get_region_cidrs(self.metadata)
# Validation.
self._validate_cidr_overlap()
def _validate_cidr_overlap(self):
region_networks = [IPNetwork(cidr) for cidr in self.region_cidrs.values()]
all_networks = region_networks
for i in xrange(len(all_networks)):
for j in xrange(i + 1, len(all_networks)):
left = all_networks[i]
right = all_networks[j]
if left.overlaps(right):
raise YBOpsRuntimeError(
"IP blocks in the CIDRs overlap: {} - {}".format(left, right))
def bootstrap_individual_region(self, region):
if region is None:
raise YBOpsRuntimeError("Must provider region to bootstrap!")
client = AwsBootstrapRegion(region, self.metadata, self.region_cidrs)
client.bootstrap()
return YbVpcComponents.from_pieces(
region, client.vpc.id, client.sg_yugabyte.id, client.route_table.id,
{az: s.id for az, s in client.subnets.iteritems()})
def cross_link_regions(self, components):
# Do the cross linking, adding CIDR entries to RTs and SGs, as well as doing vpc peerings.
region_and_vpc_tuples = [(r, c.vpc) for r, c in components.iteritems()]
host_vpc = None
if self.host_vpc_id and self.host_vpc_region:
host_vpc = get_client(self.host_vpc_region).Vpc(self.host_vpc_id)
region_and_vpc_tuples.append((self.host_vpc_region, host_vpc))
# Setup VPC peerings.
for i in xrange(len(region_and_vpc_tuples) - 1):
i_region, i_vpc = region_and_vpc_tuples[i]
for j in xrange(i + 1, len(region_and_vpc_tuples)):
j_region, j_vpc = region_and_vpc_tuples[j]
peering = create_vpc_peering(
# i is the host, j is the target.
client=get_client(i_region), vpc=j_vpc, host_vpc=i_vpc, target_region=j_region)
if len(peering) != 1:
raise YBOpsRuntimeError(
"Expecting one peering connection, got {}".format(peer_conn))
peering = peering[0]
# Add route i -> j.
add_route_to_rt(components[i_region].route_table, j_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
# Add route j -> i.
# Note: If we have a host_vpc, it is the last in the list, and it doesn't have an
# associated component, so we special case it.
if host_vpc is None or j != len(region_and_vpc_tuples) - 1:
add_route_to_rt(components[j_region].route_table, i_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
else:
# TODO: should ideally filter to the RT that is relevant, but we do not really
# know the subnets which matter from this host_vpc...
for rt in list(host_vpc.route_tables.all()):
add_route_to_rt(rt, i_vpc.cidr_block, "VpcPeeringConnectionId", peering.id)
# Setup SG entries for all the CIDRs.
all_cidrs = [vpc.cidr_block for r, vpc in region_and_vpc_tuples]
rules = []
# Add CIDRs from all the VPCs, including the host.
for cidr in all_cidrs:
add_cidr_to_rules(rules, cidr)
# Add CIDRs from any custom networks we have internally, primarily the OpenVPN in AWS.
# TODO(bogdan): custom CIDR entries
for cidr in self.metadata.get("custom_network_whitelisted_ip_cidrs", []):
add_cidr_to_rules(rules, cidr)
for region, component in components.iteritems():
sg = component.sg_yugabyte
ip_perms = sg.ip_permissions
for rule in rules:
found = False
for perm in ip_perms:
if perm.get("FromPort") == rule["from_port"] and \
perm.get("ToPort") == rule["to_port"] and \
perm.get("IpProtocol") == rule["ip_protocol"] and \
len([True for r in perm.get("IpRanges", [])
if r.get("CidrIp") == rule["cidr_ip"]]) > 0:
# This rule matches this permission, so no need to add it.
found = True
break
if not found:
try:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
if "InvalidPermission.Duplicate" not in str(e):
raise YBOpsRuntimeError(
"Authorize Security Group Ingress failed: {}".format(repr(e)))
def aws_exception_handler(e):
"""AWS specific exception handler.
Args:
e: the exception that was raised by the underlying API call that just failed.
Returns:
True if this exception can be retried, False otherwise.
"""
return "Request limit exceeded" in str(e)
def aws_request_limit_retry(fn):
"""A decorator for retrying an AWS operation after exceeding request limit. Does retries with
randomized jitter. Ideally, we should reconfigure boto3 to do the right kind of retries
internally, but as of May 2017 there does not seem to be a good way of doing that.
Initially not adding this decorator to all functions in this module. This should be done
gradually as we encounter rate limiting errors.
Relevant boto issues:
https://github.com/boto/boto3/issues/770
https://github.com/boto/botocore/issues/882
"""
return request_retry_decorator(fn, aws_exception_handler)
def get_client(region):
"""Method to get boto3 ec2 resource for given region
Args:
region (str): Region name
Returns:
boto3 resource
"""
return boto3.resource("ec2", region_name=region)
def get_clients(regions):
"""Method to get boto3 clients for given region or all the regions if none specified.
Args:
regions (list): List of regions to return clients for
Returns:
clients(obj): Map of region to boto3 resource
"""
return {region: get_client(region) for region in regions}
def get_available_regions(metadata):
return metadata["regions"].keys()
def get_spot_pricing(region, zone, instance_type):
client = boto3.client('ec2', region_name=region)
prod_desc = ['Linux/UNIX (Amazon VPC)']
spot_price = client.describe_spot_price_history(InstanceTypes=[instance_type],
MaxResults=1,
ProductDescriptions=prod_desc,
AvailabilityZone=zone)
if len(spot_price['SpotPriceHistory']) == 0:
raise YBOpsRuntimeError('Invalid instance type {} for zone {}'.format(instance_type, zone))
return spot_price['SpotPriceHistory'][0]['SpotPrice']
def get_zones(region, dest_vpc_id=None):
"""Method to fetch zones for given region or all the regions if none specified.
Args:
region (str): Name of region to get zones of.
Returns:
zones (obj): Map of zone -> subnet
"""
result = {}
filters = get_filters("state", "available")
client = boto3.client("ec2", region_name=region)
zones = client.describe_availability_zones(Filters=filters).get("AvailabilityZones", [])
new_client = get_client(region)
zone_mapping = {}
for z in zones:
zone_name = z["ZoneName"]
zone_tag = SUBNET_PREFIX_FORMAT.format(zone_name)
region_vpc = None
if dest_vpc_id:
region_vpc = new_client.Vpc(dest_vpc_id)
else:
region_vpc = get_vpc(new_client, RESOURCE_PREFIX_FORMAT.format(region))
subnet = next(iter(fetch_subnets(region_vpc, zone_tag)), None)
if subnet is None:
subnet = next(iter([s for s in region_vpc.subnets.all()
if s.availability_zone == zone_name]), None)
zone_mapping[zone_name] = subnet.id if subnet is not None else None
return zone_mapping
def get_vpc(client, tag_name, **kwargs):
"""Method to fetch vpc based on the tag_name.
Args:
client (boto client): Boto Client for the region to query.
tag_name (str): VPC tag name.
Returns:
VPC obj: VPC object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.vpcs.filter(Filters=filters)), None)
def fetch_subnets(vpc, tag_name):
"""Method to fetch subnets based on the tag_name.
Args:
vpc (vpc obj): VPC object to search for subnets
tag_name (str): subnet tag name.
Returns:
subnets (list): list of aws subnets for given vpc.
"""
filters = get_tag_filter(tag_name)
return vpc.subnets.filter(Filters=filters)
def create_subnet(client, vpc, zone, cidr, tag_name):
"""Method to create subnet based on cidr and tag name.
Args:
client (boto client): Region specific boto client
vpc (VPC object): VPC object to create subnet.
zone (str): Availability zone name
cidr (str): CIDR string
tag_name (str): Tag name for subnet.
Returns:
subnet: Newly created subnet object.
"""
subnet = next((s for s in fetch_subnets(vpc, tag_name) if s.cidr_block == cidr), None)
if subnet is None:
subnet = vpc.create_subnet(CidrBlock=cidr, AvailabilityZone=zone)
# TODO: no direct waiter on subnet just yet, it seems...
client.meta.client.get_waiter("subnet_available").wait(SubnetIds=[subnet.id])
tag_resource_name(client, subnet.id, tag_name)
return subnet
def get_security_group(client, group_name, vpc, **kwargs):
"""Method to fetch security group based on the group_name.
Args:
client (boto client): Region specific boto client
group_name (str): Security Group name
vpc (VPC object): The VPC in which to check for the SG
Returns:
SecurityGroup: Matching security group.
"""
filters = get_filters("group-name", group_name) + get_filters("vpc-id", vpc.id)
return next(iter(client.security_groups.filter(Filters=filters)), None)
@get_or_create(get_security_group)
def create_security_group(client, group_name, vpc, description, rules):
"""Method to create a security group based on the group_name and authorize ingress with
the rules provided.
Args:
client (boto client): Region specific boto client
group_name (str): security group name
description (str): description of the security group
vpc (VPC Object): VPC object to create the security group
rules (dict): List of rules to add to security group.
"""
sg = vpc.create_security_group(GroupName=group_name, Description=description)
try:
for rule in rules:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
logging.error("Authorize Security Group Ingress failed: {}".format(e))
sg.delete()
raise YBOpsRuntimeError("Security Group creation failed.")
return sg
def get_igw(client, tag_name, **kwargs):
"""Method to fetch Internet Gateway based on tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): Internet Gateway tag name.
Returns:
internet_gateway: internet gateway object.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.internet_gateways.filter(Filters=filters)), None)
@get_or_create(get_igw)
def create_igw(client, tag_name, vpc):
"""Method to create Internet Gateway based on tag_name in given VPC. If the gateway
already exists, it would return that object. If the object doesn't have a tag, we
would tag it accordingly.
Args:
client (boto client): Region specific boto client
tag_name (str): Tag name for internet gateway.
vpc (VPC object): VPC object to create Internet Gateway
Returns:
internet gateway: newly internet gateway object.
"""
# Query to make sure the region doesn't have any IGW already attached.
existing_igw = next(iter(vpc.internet_gateways.all()), None)
if existing_igw is not None:
# If we have existing igw for the region, lets just tag it with yb-XX-igw
tag_resource_name(client, existing_igw.id, tag_name)
return existing_igw
# If we don't have a internet gateway, lets create one and attach it to vpc
igw = client.create_internet_gateway()
tag_resource_name(client, igw.id, tag_name)
vpc.attach_internet_gateway(InternetGatewayId=igw.id)
return igw
def get_route_table(client, tag_name, **kwargs):
"""Method to fetch route table based on tag_name
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name to search for.
Returns:
RouteTable (obj): Matching route table object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.route_tables.filter(Filters=filters)), None)
@get_or_create(get_route_table)
def create_route_table(client, tag_name, vpc):
"""Method to create route table based on tag_name in given VPC. It will first
query for the tag name to see if the route table already exists or if one is already
attached to the VPC, if so it will return that route table.
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name
vpc (vpc object): VPC object to create the route table against
Returns:
RouteTable (obj): newly created RouteTable object.
"""
# Check to see if there is a route table attached to VPC, if so, we can just tag it
existing_route_table = next(iter(vpc.route_tables.all()), None)
if existing_route_table is not None:
tag_resource_name(client, existing_route_table.id, tag_name)
return existing_route_table
# If no route table exists, we can create one and tag it.
route_table = vpc.create_route_table()
tag_resource_name(client, route_table.id, tag_name)
return route_table
@get_and_cleanup(get_security_group)
def cleanup_security_group(sg, **kwargs):
"""Method to cleanup security group for the matching group_name.
Args:
sg: Instance of security group matching the group_name.
"""
sg.delete()
@get_and_cleanup(get_igw)
def cleanup_igw(igw, **kwargs):
"""Method to cleanup Internet Gateway matching the tag name. And also remove any vpc
that is attached to the Internet Gateway.
Args:
igw: Instance of Internet Gateway matching tag_name.
"""
for vpc in igw.attachments:
igw.detach_from_vpc(VpcId=vpc['VpcId'])
igw.delete()
@get_and_cleanup(get_route_table)
def cleanup_route_table(rt, **kwargs):
"""Method to cleanup the Route Table matching the tag name.
Args:
rt: Instance of Route Table matching tag_name.
"""
rt.delete()
def get_route_by_cidr(route_table, cidr):
"""Method to check if given CIDR already attached to route table.
Args:
RouteTable (obj): Route Table object.
cidr (str): CIDR string to check in route table.
Returns:
Route: the route for this CIDR or None if not found
"""
return dict((r.destination_cidr_block, r) for r in route_table.routes).get(cidr)
@get_or_create(get_vpc)
def create_vpc(client, tag_name, cidr):
"""Method to create vpc based on the cidr and tag with tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): VPC tag name
cidr (str): CIDR string.
Returns:
VPC(Object): Newly created VPC object.
"""
vpc = client.create_vpc(CidrBlock=cidr)
vpc.modify_attribute(EnableDnsHostnames={'Value': True})
tag_resource_name(client, vpc.id, tag_name)
return vpc
def set_yb_sg_and_fetch_vpc(metadata, region, dest_vpc_id):
"""Method to bootstrap vpc and security group, and enable vpc peering
with the host_instance vpc.
Args:
metadata (obj): Cloud metadata object with cidr prefix and other metadata.
region (str): Region name to create the vpc in.
dest_vpc_id (str): Id of the VPC that yugabyte machines will reside in.
Returns:
vpc_info (json): return vpc, subnet and security group as json.
"""
client = get_client(region)
dest_vpc = client.Vpc(dest_vpc_id)
subnets = {subnet.availability_zone: subnet for subnet in dest_vpc.subnets.all()}
sg_group_name = get_yb_sg_name(region)
rules = metadata["sg_rules"]
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
add_cidr_to_rules(rules, dest_vpc.cidr_block)
sgs = [create_security_group(client=client, group_name=sg_group_name, vpc=dest_vpc,
description="YugaByte SG", rules=rules)]
return vpc_components_as_json(dest_vpc, sgs, subnets)
def query_vpc(region):
"""Method to query VPC against given region and respective subnets.
Args:
region (str): Region name to query the VPC.
Returns:
vpc and subnet info (obj): Object with region and zone subnet id.
"""
per_vpc_info = {}
# Fetch all available AZs, as we want to group subnets by AZ.
raw_client = boto3.client("ec2", region_name=region)
zones = [z["ZoneName"]
for z in raw_client.describe_availability_zones(
Filters=get_filters("state", "available")).get("AvailabilityZones", [])]
# Default to empty lists, in case some zones do not have subnets, so we can use this as a query
# for all available AZs in this region.
subnets_by_zone = {z: [] for z in zones}
# Fetch SGs and group them by VPC ID.
client = get_client(region)
per_vpc_sgs = {}
sgs = client.security_groups.all()
for sg in sgs:
sg_list = per_vpc_sgs.setdefault(sg.vpc_id, [])
sg_list.append({
"sg_id": sg.group_id,
# Note: Name tag is not mandatory or always present but group_name is!
"sg_name": sg.group_name
})
# Fetch all available VPCs so we can group by VPC ID.
region_vpcs = client.vpcs.all()
for vpc in region_vpcs:
# Filter for available subnets and group by AZ.
subnets = vpc.subnets.filter(Filters=get_filters("state", "available"))
for s in subnets:
subnets_for_this_az = subnets_by_zone.setdefault(s.availability_zone, [])
subnets_for_this_az.append({
"subnet_id": s.subnet_id,
"name": _get_name_from_tags(s.tags),
"public": s.map_public_ip_on_launch
})
vpc_info = {
"subnets_by_zone": subnets_by_zone,
# In case we somehow did not find any SGs, default to empty list.
"security_groups": per_vpc_sgs.get(vpc.id, [])
}
per_vpc_info[vpc.id] = vpc_info
region_json = {
"per_vpc_info": per_vpc_info
}
return region_json
def _get_name_from_tags(tags):
for t in tags if tags else []:
if t.get("Key") == "Name":
return t.get("Value", None)
return None
def vpc_components_as_json(vpc, sgs, subnets):
"""Method takes VPC, Security Group and Subnets and returns a json data format with ids.
Args:
vpc (VPC Object): Region specific VPC object
sgs (List of Security Group Object): Region specific Security Group object
subnets (subnet object map): Map of Subnet objects keyed of zone.
Retuns:
json (str): A Json string for yugaware to consume with necessary ids.
"""
result = {}
result["vpc_id"] = vpc.id
result["security_group"] = [{"id": sg.group_id, "name": sg.group_name} for sg in sgs]
result["zones"] = {}
for zone, subnet in subnets.iteritems():
result["zones"][zone] = subnet.id
return result
def delete_vpc(region, host_vpc_id=None, host_vpc_region=None):
"""Method to delete VPC, Subnet, Internet Gateway, Route Table and VPC peering.
Args:
region (str): Region name to query the VPC.
"""
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(region)
client = get_client(region)
region_vpc = get_vpc(client, vpc_region_tag)
if region_vpc is None:
raise YBOpsRuntimeError("VPC not setup.")
zones = get_zones(region)
# Remove the yugabyte SG first.
sg_group_name = get_yb_sg_name(region)
cleanup_security_group(client=client, group_name=sg_group_name, vpc=region_vpc)
# Cleanup the subnets.
for zone, subnet_id in zones.iteritems():
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
if subnet_id is not None:
client.Subnet(subnet_id).delete()
# Remove the IGW.
igw_tag = IGW_PREFIX_FORMAT.format(region)
cleanup_igw(client=client, tag_name=igw_tag)
# Remove this region's CIDR from the RT of the host vpc.
host_vpc = None
if host_vpc_id is not None and host_vpc_region is not None:
host_vpc = get_client(host_vpc_region).Vpc(host_vpc_id)
for rt in list(host_vpc.route_tables.all()):
delete_route(rt, region_vpc.cidr_block)
# Remove all of the VPC peerings of this vpc.
cleanup_vpc_peering(client=client, vpc=region_vpc, host_vpc=None)
# Delete the VPC itself.
region_vpc.delete()
# Finally cleanup the Routing Table.
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(region)
cleanup_route_table(client=client, tag_name=route_table_tag)
return {"success": "VPC deleted."}
def tag_resource_name(client, resource_id, tag_name):
"""Method to create name tag for given resource.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_name (str): Tag name.
"""
tag_resource(client, resource_id, "Name", tag_name)
def tag_resource(client, resource_id, tag_key, tag_value):
"""Method to attach arbitrary key-value tags to resources.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_key: Tag key
tag_value: Tag value
"""
tags = [{"Key": tag_key, "Value": tag_value}]
client.create_tags(Resources=[resource_id], Tags=tags)
def get_filters(key, value):
return [{'Name': key, 'Values': [value]}]
def get_tag_filter(tag_name):
return get_filters("tag:Name", tag_name)
def get_vpc_peerings(vpc, host_vpc, **kwargs):
"""Method to fetch all the VPC peerings against given VPC. If host_vpc is provided
it will check if there is a peering against that vpc.
Args:
vpc(VPC object): VPC Object to search for peerings
host_vpc (Host VPC object): Can be Null as well, to check if specific host_vpc
peering is done.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
output = []
# Search through accepted vpc peerings.
vpc_peerings = vpc.accepted_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.requester_vpc == host_vpc)])
# Also search through requested vpc peerings.
vpc_peerings = vpc.requested_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.accepter_vpc == host_vpc)])
return output
@get_and_cleanup(get_vpc_peerings)
def cleanup_vpc_peering(vpc_peerings, **kwargs):
for vpc_peering in vpc_peerings:
vpc_peering.delete()
@get_or_create(get_vpc_peerings)
def create_vpc_peering(client, vpc, host_vpc, target_region):
"""Method would create a vpc peering between the newly created VPC and caller's VPC
Also makes sure, if they aren't the same, then there is no need for vpc peering.
Args:
client (boto client): Region specific boto client
vpc (VPC object): Newly created VPC object
host_vpc(Host VPC object): Host VPC to peer with.
target_region (region name): Region name in which peering is being created.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
try:
peer_conn = client.create_vpc_peering_connection(
VpcId=host_vpc.id, PeerVpcId=vpc.id, PeerRegion=target_region)
peer_conn.wait_until_exists()
# Need to accept from the other end.
remote_peer_conn = get_client(target_region).VpcPeeringConnection(peer_conn.id)
remote_peer_conn.wait_until_exists()
remote_peer_conn.accept()
return [peer_conn]
except Exception as e:
logging.error(e)
raise YBOpsRuntimeError("Unable to create VPC peering.")
def get_device_names(instance_type, num_volumes):
device_names = []
for i in xrange(num_volumes):
device_name_format = "nvme{}n1" if is_nvme(instance_type) else "xvd{}"
index = "{}".format(i if is_nvme(instance_type) else chr(ord('b') + i))
device_names.append(device_name_format.format(index))
return device_names
def is_next_gen(instance_type):
return instance_type.startswith(("c3", "c4", "c5", "m4", "r4"))
def is_nvme(instance_type):
return instance_type.startswith("i3")
def has_ephemerals(instance_type):
return not is_nvme(instance_type) and not is_next_gen(instance_type)
def create_instance(args):
client = get_client(args.region)
vars = {
"ImageId": args.machine_image,
"KeyName": args.key_pair_name,
"MinCount": 1,
"MaxCount": 1,
"InstanceType": args.instance_type,
}
# Network setup.
# Lets assume they have provided security group id comma delimited.
sg_ids = args.security_group_id.split(",") if args.security_group_id else None
if sg_ids is None:
# Figure out which VPC this instance will be brought up in and search for the SG in there.
# This is for a bit of backwards compatibility with the previous mode of potentially using
# YW's VPC, in which we would still deploy a SG with the same name as in our normal VPCs.
# This means there could be customers that had that deployment mode from the start AND have
# a SG we created back then, with the internal naming convention we use, but NOT in the YB
# VPC (which they likely will not even have).
vpc = get_vpc_for_subnet(client, args.cloud_subnet)
sg_name = get_yb_sg_name(args.region)
sg = get_security_group(client, sg_name, vpc)
sg_ids = [sg.id]
vars["NetworkInterfaces"] = [{
"DeviceIndex": 0,
"AssociatePublicIpAddress": args.assign_public_ip,
"SubnetId": args.cloud_subnet,
"Groups": sg_ids
}]
# Volume setup.
volumes = []
ebs = {
"DeleteOnTermination": True,
# TODO: constant
"VolumeSize": 40,
"VolumeType": "gp2"
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
volumes.append({
"DeviceName": "/dev/sda1",
"Ebs": ebs
})
device_names = get_device_names(args.instance_type, args.num_volumes)
# TODO: Clean up semantics on nvme vs "next-gen" vs ephemerals, as this is currently whack...
for i, device_name in enumerate(device_names):
volume = {}
if has_ephemerals(args.instance_type):
volume = {
"DeviceName": "/dev/{}".format(device_name),
"VirtualName": "ephemeral{}".format(i)
}
elif is_next_gen(args.instance_type):
ebs = {
"DeleteOnTermination": True,
"VolumeType": args.volume_type,
# TODO: make this int.
"VolumeSize": args.volume_size
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
if args.volume_type == "io1":
# TODO: make this int.
ebs["Iops"] = args.disk_iops
volume = {
"DeviceName": "/dev/{}".format(device_name),
"Ebs": ebs
}
volumes.append(volume)
vars["BlockDeviceMappings"] = volumes
# Tag setup.
def __create_tag(k, v):
return {"Key": k, "Value": v}
# Add Name all the time.
instance_tags = [
__create_tag("Name", args.search_pattern),
__create_tag("launched-by", os.environ.get("USER", "unknown")),
__create_tag("yb-server-type", args.type)
]
custom_tags = args.instance_tags if args.instance_tags is not None else '{}'
for k, v in json.loads(custom_tags).iteritems():
instance_tags.append(__create_tag(k, v))
vars["TagSpecifications"] = [{
"ResourceType": "instance",
"Tags": instance_tags
}]
# TODO: user_data > templates/cloud_init.yml.j2, still needed?
instance_ids = client.create_instances(**vars)
if len(instance_ids) != 1:
logging.error("Invalid create_instances response: {}".format(instance_ids))
raise YBOpsRuntimeError("Expected to create 1 instance, got {}".format(
len(instance_ids)))
instance = instance_ids[0]
instance.wait_until_running()
def modify_tags(region, instance_id, tags_to_set_str, tags_to_remove_str):
instance = get_client(region).Instance(instance_id)
# Remove all the tags we were asked to, except the internal ones.
tags_to_remove = set(tags_to_remove_str.split(",") if tags_to_remove_str else [])
# TODO: combine these with the above instance creation function.
internal_tags = set(["Name", "launched-by", "yb-server-type"])
if tags_to_remove & internal_tags:
raise YBOpsRuntimeError(
"Was asked to remove tags: {}, which contain internal tags: {}".format(
tags_to_remove, internal_tags
))
# Note: passing an empty list to Tags will remove all tags from the instance.
if tags_to_remove:
instance.delete_tags(Tags=[{"Key": k} for k in tags_to_remove])
# Set all the tags provided.
tags_to_set = json.loads(tags_to_set_str if tags_to_set_str else "{}")
customer_tags = []
for k, v in tags_to_set.iteritems():
customer_tags.append({"Key": k, "Value": v})
instance.create_tags(Tags=customer_tags)
def delete_route(rt, cidr):
route = get_route_by_cidr(rt, cidr)
if route is not None:
route.delete()
def get_vpc_for_subnet(client, subnet):
return client.Subnet(subnet).vpc
def get_yb_sg_name(region):
return SG_YUGABYTE_PREFIX_FORMAT.format(region)
def list_dns_record_set(hosted_zone_id):
return boto3.client('route53').get_hosted_zone(Id=hosted_zone_id)
def create_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'CREATE')
def edit_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'UPSERT')
def delete_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'DELETE')
def _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, action):
client = boto3.client('route53')
records = []
for ip in ip_list:
records.append({'Value': ip})
result = list_dns_record_set(hosted_zone_id)
hosted_zone_name = result['HostedZone']['Name']
change_batch = {
'Comment': "YugaWare driven Record Set",
'Changes': [{
'Action': action,
'ResourceRecordSet': {
'Name': "{}.{}".format(domain_name_prefix, hosted_zone_name),
'Type': 'A',
'TTL': 5,
'ResourceRecords': records
}
}]
}
result = client.change_resource_record_sets(
HostedZoneId=hosted_zone_id,
ChangeBatch=change_batch)
client.get_waiter('resource_record_sets_changed').wait(
Id=result['ChangeInfo']['Id'],
WaiterConfig={
'Delay': 10,
'MaxAttempts': 60
})
|
create_igw
|
Method to create Internet Gateway based on tag_name in given VPC. If the gateway
already exists, it would return that object. If the object doesn't have a tag, we
would tag it accordingly.
Args:
client (boto client): Region specific boto client
tag_name (str): Tag name for internet gateway.
vpc (VPC object): VPC object to create Internet Gateway
Returns:
internet gateway: newly internet gateway object.
|
#!/usr/bin/env python
#
# Copyright 2019 YugaByte, Inc. and Contributors
#
# Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt
import boto3
import json
import logging
import os
import re
from ipaddr import IPNetwork
from ybops.utils import get_or_create, get_and_cleanup
from ybops.common.exceptions import YBOpsRuntimeError
from ybops.cloud.common.utils import request_retry_decorator
RESOURCE_PREFIX_FORMAT = "yb-{}"
IGW_CIDR = "0.0.0.0/0"
SUBNET_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT
IGW_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-igw"
ROUTE_TABLE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-rt"
SG_YUGABYTE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-sg"
PEER_CONN_FORMAT = "yb-peer-conn-{}-to-{}"
class AwsBootstrapRegion():
def __init__(self, region, metadata, region_cidrs):
self.region = region
self.metadata = metadata
self.region_cidrs = region_cidrs
self.client = get_client(self.region)
# Outputs.
self.vpc = None
self.igw = None
self.peer_vpc = None
self.sg_yugabyte = None
self.subnets = []
self.route_table = None
def bootstrap(self):
self.setup_vpc()
self.setup_igw()
self.setup_subnets()
self.setup_yugabyte_sg()
self.setup_rt()
def setup_vpc(self):
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(self.region)
vpc = create_vpc(client=self.client, tag_name=vpc_region_tag,
cidr=get_region_cidr(self.metadata, self.region))
vpc.wait_until_available()
self.vpc = vpc
def setup_igw(self):
igw_tag = IGW_PREFIX_FORMAT.format(self.region)
igw = create_igw(client=self.client, tag_name=igw_tag, vpc=self.vpc)
self.igw = igw
def setup_subnets(self):
zones = get_zones(self.region)
subnets = {}
for zone_index, zone in enumerate(sorted(zones.keys())):
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
zone_cidr = self.metadata["zone_cidr_format"].format(
get_cidr_prefix(self.metadata, self.region), (zone_index + 1) * 16)
subnet = create_subnet(self.client, self.vpc, zone, zone_cidr, vpc_zone_tag)
subnets[zone] = subnet
self.subnets = subnets
def setup_yugabyte_sg(self):
sg_group_name = get_yb_sg_name(self.region)
rules = list(self.metadata["sg_rules"])
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
sg = create_security_group(client=self.client, group_name=sg_group_name,
description="YugaByte SG", vpc=self.vpc,
rules=rules)
self.sg_yugabyte = sg
def setup_rt(self):
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(self.region)
route_table = create_route_table(client=self.client, tag_name=route_table_tag,
vpc=self.vpc)
# TODO: handle private/public case at somepoint, also NAT.
add_route_to_rt(route_table, IGW_CIDR, "GatewayId", self.igw.id)
current_associated_subnet_ids = [assoc.subnet_id for assoc in route_table.associations]
missing_ids = [subnet.id for subnet in self.subnets.values()
if subnet.id not in current_associated_subnet_ids]
for subnet_id in missing_ids:
route_table.associate_with_subnet(SubnetId=subnet_id)
self.route_table = route_table
def add_sg_ingress_to_sg(self, incoming_sg, target_sg):
current_sg_ids = set([pair["GroupId"]
for perm in target_sg.ip_permissions
for pair in perm["UserIdGroupPairs"]])
if incoming_sg.id not in current_sg_ids:
target_sg.authorize_ingress(
IpPermissions=[{
"IpProtocol": "-1",
"UserIdGroupPairs": [{"GroupId": incoming_sg.id}]}])
def add_route_to_rt(route_table, cidr, target_type, target_id):
kwargs = {target_type: target_id}
route = get_route_by_cidr(route_table, cidr)
if route is None:
route_table.create_route(DestinationCidrBlock=cidr, **kwargs)
elif getattr(route, dumb_camel_to_snake(target_type)) != target_id:
route.replace(**kwargs)
def add_cidr_to_rules(rules, cidr):
rule_block = {
"ip_protocol": "-1",
"from_port": 0,
"to_port": 65535,
"cidr_ip": cidr
}
rules.append(rule_block)
def get_cidr_prefix(metadata, region):
return metadata["regions"][region]["cidr_prefix"]
def get_region_cidr(metadata, region):
return metadata["region_cidr_format"].format(get_cidr_prefix(metadata, region))
def get_region_cidrs(metadata):
return dict([(r, get_region_cidr(metadata, r)) for r in metadata["regions"].keys()])
def dumb_camel_to_snake(s):
return re.sub("([A-Z])", "_\\1", s).lower()[1:]
class YbVpcComponents:
def __init__(self):
self.region = None
self.vpc = None
self.sg_yugabyte = None
self.customer_sgs = None
self.route_table = None
self.subnets = None
@staticmethod
def from_pieces(region, vpc_id, sg_id, rt_id, az_to_subnet_ids):
c = YbVpcComponents()
c.region = region
client = get_client(region)
c.vpc = client.Vpc(vpc_id)
c.sg_yugabyte = client.SecurityGroup(sg_id)
c.route_table = client.RouteTable(rt_id)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
@staticmethod
def from_user_json(region, per_region_meta):
c = YbVpcComponents()
c.region = region
client = get_client(region)
vpc_id = per_region_meta.get("vpcId")
if vpc_id:
c.vpc = client.Vpc(vpc_id)
else:
c.vpc = get_vpc(client, RESOURCE_PREFIX_FORMAT.format(region))
sg_ids = per_region_meta.get("customSecurityGroupId")
if sg_ids:
c.customer_sgs = [client.SecurityGroup(sg_id) for sg_id in sg_ids.split(",")]
else:
c.sg_yugabyte = get_security_group(
client, SG_YUGABYTE_PREFIX_FORMAT.format(region), c.vpc)
if not vpc_id:
c.route_table = get_route_table(client, ROUTE_TABLE_PREFIX_FORMAT.format(region))
az_to_subnet_ids = {}
if vpc_id:
az_to_subnet_ids = per_region_meta.get("azToSubnetIds", {})
else:
az_to_subnet_ids = get_zones(region)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
def as_json(self):
sgs = self.customer_sgs if self.customer_sgs else [self.sg_yugabyte]
return vpc_components_as_json(self.vpc, sgs, self.subnets)
class AwsBootstrapClient():
def __init__(self, metadata, host_vpc_id, host_vpc_region):
self.metadata = metadata
self.host_vpc_id = host_vpc_id
self.host_vpc_region = host_vpc_region
self.region_cidrs = get_region_cidrs(self.metadata)
# Validation.
self._validate_cidr_overlap()
def _validate_cidr_overlap(self):
region_networks = [IPNetwork(cidr) for cidr in self.region_cidrs.values()]
all_networks = region_networks
for i in xrange(len(all_networks)):
for j in xrange(i + 1, len(all_networks)):
left = all_networks[i]
right = all_networks[j]
if left.overlaps(right):
raise YBOpsRuntimeError(
"IP blocks in the CIDRs overlap: {} - {}".format(left, right))
def bootstrap_individual_region(self, region):
if region is None:
raise YBOpsRuntimeError("Must provider region to bootstrap!")
client = AwsBootstrapRegion(region, self.metadata, self.region_cidrs)
client.bootstrap()
return YbVpcComponents.from_pieces(
region, client.vpc.id, client.sg_yugabyte.id, client.route_table.id,
{az: s.id for az, s in client.subnets.iteritems()})
def cross_link_regions(self, components):
# Do the cross linking, adding CIDR entries to RTs and SGs, as well as doing vpc peerings.
region_and_vpc_tuples = [(r, c.vpc) for r, c in components.iteritems()]
host_vpc = None
if self.host_vpc_id and self.host_vpc_region:
host_vpc = get_client(self.host_vpc_region).Vpc(self.host_vpc_id)
region_and_vpc_tuples.append((self.host_vpc_region, host_vpc))
# Setup VPC peerings.
for i in xrange(len(region_and_vpc_tuples) - 1):
i_region, i_vpc = region_and_vpc_tuples[i]
for j in xrange(i + 1, len(region_and_vpc_tuples)):
j_region, j_vpc = region_and_vpc_tuples[j]
peering = create_vpc_peering(
# i is the host, j is the target.
client=get_client(i_region), vpc=j_vpc, host_vpc=i_vpc, target_region=j_region)
if len(peering) != 1:
raise YBOpsRuntimeError(
"Expecting one peering connection, got {}".format(peer_conn))
peering = peering[0]
# Add route i -> j.
add_route_to_rt(components[i_region].route_table, j_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
# Add route j -> i.
# Note: If we have a host_vpc, it is the last in the list, and it doesn't have an
# associated component, so we special case it.
if host_vpc is None or j != len(region_and_vpc_tuples) - 1:
add_route_to_rt(components[j_region].route_table, i_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
else:
# TODO: should ideally filter to the RT that is relevant, but we do not really
# know the subnets which matter from this host_vpc...
for rt in list(host_vpc.route_tables.all()):
add_route_to_rt(rt, i_vpc.cidr_block, "VpcPeeringConnectionId", peering.id)
# Setup SG entries for all the CIDRs.
all_cidrs = [vpc.cidr_block for r, vpc in region_and_vpc_tuples]
rules = []
# Add CIDRs from all the VPCs, including the host.
for cidr in all_cidrs:
add_cidr_to_rules(rules, cidr)
# Add CIDRs from any custom networks we have internally, primarily the OpenVPN in AWS.
# TODO(bogdan): custom CIDR entries
for cidr in self.metadata.get("custom_network_whitelisted_ip_cidrs", []):
add_cidr_to_rules(rules, cidr)
for region, component in components.iteritems():
sg = component.sg_yugabyte
ip_perms = sg.ip_permissions
for rule in rules:
found = False
for perm in ip_perms:
if perm.get("FromPort") == rule["from_port"] and \
perm.get("ToPort") == rule["to_port"] and \
perm.get("IpProtocol") == rule["ip_protocol"] and \
len([True for r in perm.get("IpRanges", [])
if r.get("CidrIp") == rule["cidr_ip"]]) > 0:
# This rule matches this permission, so no need to add it.
found = True
break
if not found:
try:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
if "InvalidPermission.Duplicate" not in str(e):
raise YBOpsRuntimeError(
"Authorize Security Group Ingress failed: {}".format(repr(e)))
def aws_exception_handler(e):
"""AWS specific exception handler.
Args:
e: the exception that was raised by the underlying API call that just failed.
Returns:
True if this exception can be retried, False otherwise.
"""
return "Request limit exceeded" in str(e)
def aws_request_limit_retry(fn):
"""A decorator for retrying an AWS operation after exceeding request limit. Does retries with
randomized jitter. Ideally, we should reconfigure boto3 to do the right kind of retries
internally, but as of May 2017 there does not seem to be a good way of doing that.
Initially not adding this decorator to all functions in this module. This should be done
gradually as we encounter rate limiting errors.
Relevant boto issues:
https://github.com/boto/boto3/issues/770
https://github.com/boto/botocore/issues/882
"""
return request_retry_decorator(fn, aws_exception_handler)
def get_client(region):
"""Method to get boto3 ec2 resource for given region
Args:
region (str): Region name
Returns:
boto3 resource
"""
return boto3.resource("ec2", region_name=region)
def get_clients(regions):
"""Method to get boto3 clients for given region or all the regions if none specified.
Args:
regions (list): List of regions to return clients for
Returns:
clients(obj): Map of region to boto3 resource
"""
return {region: get_client(region) for region in regions}
def get_available_regions(metadata):
return metadata["regions"].keys()
def get_spot_pricing(region, zone, instance_type):
client = boto3.client('ec2', region_name=region)
prod_desc = ['Linux/UNIX (Amazon VPC)']
spot_price = client.describe_spot_price_history(InstanceTypes=[instance_type],
MaxResults=1,
ProductDescriptions=prod_desc,
AvailabilityZone=zone)
if len(spot_price['SpotPriceHistory']) == 0:
raise YBOpsRuntimeError('Invalid instance type {} for zone {}'.format(instance_type, zone))
return spot_price['SpotPriceHistory'][0]['SpotPrice']
def get_zones(region, dest_vpc_id=None):
"""Method to fetch zones for given region or all the regions if none specified.
Args:
region (str): Name of region to get zones of.
Returns:
zones (obj): Map of zone -> subnet
"""
result = {}
filters = get_filters("state", "available")
client = boto3.client("ec2", region_name=region)
zones = client.describe_availability_zones(Filters=filters).get("AvailabilityZones", [])
new_client = get_client(region)
zone_mapping = {}
for z in zones:
zone_name = z["ZoneName"]
zone_tag = SUBNET_PREFIX_FORMAT.format(zone_name)
region_vpc = None
if dest_vpc_id:
region_vpc = new_client.Vpc(dest_vpc_id)
else:
region_vpc = get_vpc(new_client, RESOURCE_PREFIX_FORMAT.format(region))
subnet = next(iter(fetch_subnets(region_vpc, zone_tag)), None)
if subnet is None:
subnet = next(iter([s for s in region_vpc.subnets.all()
if s.availability_zone == zone_name]), None)
zone_mapping[zone_name] = subnet.id if subnet is not None else None
return zone_mapping
def get_vpc(client, tag_name, **kwargs):
"""Method to fetch vpc based on the tag_name.
Args:
client (boto client): Boto Client for the region to query.
tag_name (str): VPC tag name.
Returns:
VPC obj: VPC object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.vpcs.filter(Filters=filters)), None)
def fetch_subnets(vpc, tag_name):
"""Method to fetch subnets based on the tag_name.
Args:
vpc (vpc obj): VPC object to search for subnets
tag_name (str): subnet tag name.
Returns:
subnets (list): list of aws subnets for given vpc.
"""
filters = get_tag_filter(tag_name)
return vpc.subnets.filter(Filters=filters)
def create_subnet(client, vpc, zone, cidr, tag_name):
"""Method to create subnet based on cidr and tag name.
Args:
client (boto client): Region specific boto client
vpc (VPC object): VPC object to create subnet.
zone (str): Availability zone name
cidr (str): CIDR string
tag_name (str): Tag name for subnet.
Returns:
subnet: Newly created subnet object.
"""
subnet = next((s for s in fetch_subnets(vpc, tag_name) if s.cidr_block == cidr), None)
if subnet is None:
subnet = vpc.create_subnet(CidrBlock=cidr, AvailabilityZone=zone)
# TODO: no direct waiter on subnet just yet, it seems...
client.meta.client.get_waiter("subnet_available").wait(SubnetIds=[subnet.id])
tag_resource_name(client, subnet.id, tag_name)
return subnet
def get_security_group(client, group_name, vpc, **kwargs):
"""Method to fetch security group based on the group_name.
Args:
client (boto client): Region specific boto client
group_name (str): Security Group name
vpc (VPC object): The VPC in which to check for the SG
Returns:
SecurityGroup: Matching security group.
"""
filters = get_filters("group-name", group_name) + get_filters("vpc-id", vpc.id)
return next(iter(client.security_groups.filter(Filters=filters)), None)
@get_or_create(get_security_group)
def create_security_group(client, group_name, vpc, description, rules):
"""Method to create a security group based on the group_name and authorize ingress with
the rules provided.
Args:
client (boto client): Region specific boto client
group_name (str): security group name
description (str): description of the security group
vpc (VPC Object): VPC object to create the security group
rules (dict): List of rules to add to security group.
"""
sg = vpc.create_security_group(GroupName=group_name, Description=description)
try:
for rule in rules:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
logging.error("Authorize Security Group Ingress failed: {}".format(e))
sg.delete()
raise YBOpsRuntimeError("Security Group creation failed.")
return sg
def get_igw(client, tag_name, **kwargs):
"""Method to fetch Internet Gateway based on tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): Internet Gateway tag name.
Returns:
internet_gateway: internet gateway object.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.internet_gateways.filter(Filters=filters)), None)
# MASKED: create_igw function (lines 486-509)
def get_route_table(client, tag_name, **kwargs):
"""Method to fetch route table based on tag_name
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name to search for.
Returns:
RouteTable (obj): Matching route table object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.route_tables.filter(Filters=filters)), None)
@get_or_create(get_route_table)
def create_route_table(client, tag_name, vpc):
"""Method to create route table based on tag_name in given VPC. It will first
query for the tag name to see if the route table already exists or if one is already
attached to the VPC, if so it will return that route table.
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name
vpc (vpc object): VPC object to create the route table against
Returns:
RouteTable (obj): newly created RouteTable object.
"""
# Check to see if there is a route table attached to VPC, if so, we can just tag it
existing_route_table = next(iter(vpc.route_tables.all()), None)
if existing_route_table is not None:
tag_resource_name(client, existing_route_table.id, tag_name)
return existing_route_table
# If no route table exists, we can create one and tag it.
route_table = vpc.create_route_table()
tag_resource_name(client, route_table.id, tag_name)
return route_table
@get_and_cleanup(get_security_group)
def cleanup_security_group(sg, **kwargs):
"""Method to cleanup security group for the matching group_name.
Args:
sg: Instance of security group matching the group_name.
"""
sg.delete()
@get_and_cleanup(get_igw)
def cleanup_igw(igw, **kwargs):
"""Method to cleanup Internet Gateway matching the tag name. And also remove any vpc
that is attached to the Internet Gateway.
Args:
igw: Instance of Internet Gateway matching tag_name.
"""
for vpc in igw.attachments:
igw.detach_from_vpc(VpcId=vpc['VpcId'])
igw.delete()
@get_and_cleanup(get_route_table)
def cleanup_route_table(rt, **kwargs):
"""Method to cleanup the Route Table matching the tag name.
Args:
rt: Instance of Route Table matching tag_name.
"""
rt.delete()
def get_route_by_cidr(route_table, cidr):
"""Method to check if given CIDR already attached to route table.
Args:
RouteTable (obj): Route Table object.
cidr (str): CIDR string to check in route table.
Returns:
Route: the route for this CIDR or None if not found
"""
return dict((r.destination_cidr_block, r) for r in route_table.routes).get(cidr)
@get_or_create(get_vpc)
def create_vpc(client, tag_name, cidr):
"""Method to create vpc based on the cidr and tag with tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): VPC tag name
cidr (str): CIDR string.
Returns:
VPC(Object): Newly created VPC object.
"""
vpc = client.create_vpc(CidrBlock=cidr)
vpc.modify_attribute(EnableDnsHostnames={'Value': True})
tag_resource_name(client, vpc.id, tag_name)
return vpc
def set_yb_sg_and_fetch_vpc(metadata, region, dest_vpc_id):
"""Method to bootstrap vpc and security group, and enable vpc peering
with the host_instance vpc.
Args:
metadata (obj): Cloud metadata object with cidr prefix and other metadata.
region (str): Region name to create the vpc in.
dest_vpc_id (str): Id of the VPC that yugabyte machines will reside in.
Returns:
vpc_info (json): return vpc, subnet and security group as json.
"""
client = get_client(region)
dest_vpc = client.Vpc(dest_vpc_id)
subnets = {subnet.availability_zone: subnet for subnet in dest_vpc.subnets.all()}
sg_group_name = get_yb_sg_name(region)
rules = metadata["sg_rules"]
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
add_cidr_to_rules(rules, dest_vpc.cidr_block)
sgs = [create_security_group(client=client, group_name=sg_group_name, vpc=dest_vpc,
description="YugaByte SG", rules=rules)]
return vpc_components_as_json(dest_vpc, sgs, subnets)
def query_vpc(region):
"""Method to query VPC against given region and respective subnets.
Args:
region (str): Region name to query the VPC.
Returns:
vpc and subnet info (obj): Object with region and zone subnet id.
"""
per_vpc_info = {}
# Fetch all available AZs, as we want to group subnets by AZ.
raw_client = boto3.client("ec2", region_name=region)
zones = [z["ZoneName"]
for z in raw_client.describe_availability_zones(
Filters=get_filters("state", "available")).get("AvailabilityZones", [])]
# Default to empty lists, in case some zones do not have subnets, so we can use this as a query
# for all available AZs in this region.
subnets_by_zone = {z: [] for z in zones}
# Fetch SGs and group them by VPC ID.
client = get_client(region)
per_vpc_sgs = {}
sgs = client.security_groups.all()
for sg in sgs:
sg_list = per_vpc_sgs.setdefault(sg.vpc_id, [])
sg_list.append({
"sg_id": sg.group_id,
# Note: Name tag is not mandatory or always present but group_name is!
"sg_name": sg.group_name
})
# Fetch all available VPCs so we can group by VPC ID.
region_vpcs = client.vpcs.all()
for vpc in region_vpcs:
# Filter for available subnets and group by AZ.
subnets = vpc.subnets.filter(Filters=get_filters("state", "available"))
for s in subnets:
subnets_for_this_az = subnets_by_zone.setdefault(s.availability_zone, [])
subnets_for_this_az.append({
"subnet_id": s.subnet_id,
"name": _get_name_from_tags(s.tags),
"public": s.map_public_ip_on_launch
})
vpc_info = {
"subnets_by_zone": subnets_by_zone,
# In case we somehow did not find any SGs, default to empty list.
"security_groups": per_vpc_sgs.get(vpc.id, [])
}
per_vpc_info[vpc.id] = vpc_info
region_json = {
"per_vpc_info": per_vpc_info
}
return region_json
def _get_name_from_tags(tags):
for t in tags if tags else []:
if t.get("Key") == "Name":
return t.get("Value", None)
return None
def vpc_components_as_json(vpc, sgs, subnets):
"""Method takes VPC, Security Group and Subnets and returns a json data format with ids.
Args:
vpc (VPC Object): Region specific VPC object
sgs (List of Security Group Object): Region specific Security Group object
subnets (subnet object map): Map of Subnet objects keyed of zone.
Retuns:
json (str): A Json string for yugaware to consume with necessary ids.
"""
result = {}
result["vpc_id"] = vpc.id
result["security_group"] = [{"id": sg.group_id, "name": sg.group_name} for sg in sgs]
result["zones"] = {}
for zone, subnet in subnets.iteritems():
result["zones"][zone] = subnet.id
return result
def delete_vpc(region, host_vpc_id=None, host_vpc_region=None):
"""Method to delete VPC, Subnet, Internet Gateway, Route Table and VPC peering.
Args:
region (str): Region name to query the VPC.
"""
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(region)
client = get_client(region)
region_vpc = get_vpc(client, vpc_region_tag)
if region_vpc is None:
raise YBOpsRuntimeError("VPC not setup.")
zones = get_zones(region)
# Remove the yugabyte SG first.
sg_group_name = get_yb_sg_name(region)
cleanup_security_group(client=client, group_name=sg_group_name, vpc=region_vpc)
# Cleanup the subnets.
for zone, subnet_id in zones.iteritems():
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
if subnet_id is not None:
client.Subnet(subnet_id).delete()
# Remove the IGW.
igw_tag = IGW_PREFIX_FORMAT.format(region)
cleanup_igw(client=client, tag_name=igw_tag)
# Remove this region's CIDR from the RT of the host vpc.
host_vpc = None
if host_vpc_id is not None and host_vpc_region is not None:
host_vpc = get_client(host_vpc_region).Vpc(host_vpc_id)
for rt in list(host_vpc.route_tables.all()):
delete_route(rt, region_vpc.cidr_block)
# Remove all of the VPC peerings of this vpc.
cleanup_vpc_peering(client=client, vpc=region_vpc, host_vpc=None)
# Delete the VPC itself.
region_vpc.delete()
# Finally cleanup the Routing Table.
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(region)
cleanup_route_table(client=client, tag_name=route_table_tag)
return {"success": "VPC deleted."}
def tag_resource_name(client, resource_id, tag_name):
"""Method to create name tag for given resource.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_name (str): Tag name.
"""
tag_resource(client, resource_id, "Name", tag_name)
def tag_resource(client, resource_id, tag_key, tag_value):
"""Method to attach arbitrary key-value tags to resources.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_key: Tag key
tag_value: Tag value
"""
tags = [{"Key": tag_key, "Value": tag_value}]
client.create_tags(Resources=[resource_id], Tags=tags)
def get_filters(key, value):
return [{'Name': key, 'Values': [value]}]
def get_tag_filter(tag_name):
return get_filters("tag:Name", tag_name)
def get_vpc_peerings(vpc, host_vpc, **kwargs):
"""Method to fetch all the VPC peerings against given VPC. If host_vpc is provided
it will check if there is a peering against that vpc.
Args:
vpc(VPC object): VPC Object to search for peerings
host_vpc (Host VPC object): Can be Null as well, to check if specific host_vpc
peering is done.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
output = []
# Search through accepted vpc peerings.
vpc_peerings = vpc.accepted_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.requester_vpc == host_vpc)])
# Also search through requested vpc peerings.
vpc_peerings = vpc.requested_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.accepter_vpc == host_vpc)])
return output
@get_and_cleanup(get_vpc_peerings)
def cleanup_vpc_peering(vpc_peerings, **kwargs):
for vpc_peering in vpc_peerings:
vpc_peering.delete()
@get_or_create(get_vpc_peerings)
def create_vpc_peering(client, vpc, host_vpc, target_region):
"""Method would create a vpc peering between the newly created VPC and caller's VPC
Also makes sure, if they aren't the same, then there is no need for vpc peering.
Args:
client (boto client): Region specific boto client
vpc (VPC object): Newly created VPC object
host_vpc(Host VPC object): Host VPC to peer with.
target_region (region name): Region name in which peering is being created.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
try:
peer_conn = client.create_vpc_peering_connection(
VpcId=host_vpc.id, PeerVpcId=vpc.id, PeerRegion=target_region)
peer_conn.wait_until_exists()
# Need to accept from the other end.
remote_peer_conn = get_client(target_region).VpcPeeringConnection(peer_conn.id)
remote_peer_conn.wait_until_exists()
remote_peer_conn.accept()
return [peer_conn]
except Exception as e:
logging.error(e)
raise YBOpsRuntimeError("Unable to create VPC peering.")
def get_device_names(instance_type, num_volumes):
device_names = []
for i in xrange(num_volumes):
device_name_format = "nvme{}n1" if is_nvme(instance_type) else "xvd{}"
index = "{}".format(i if is_nvme(instance_type) else chr(ord('b') + i))
device_names.append(device_name_format.format(index))
return device_names
def is_next_gen(instance_type):
return instance_type.startswith(("c3", "c4", "c5", "m4", "r4"))
def is_nvme(instance_type):
return instance_type.startswith("i3")
def has_ephemerals(instance_type):
return not is_nvme(instance_type) and not is_next_gen(instance_type)
def create_instance(args):
client = get_client(args.region)
vars = {
"ImageId": args.machine_image,
"KeyName": args.key_pair_name,
"MinCount": 1,
"MaxCount": 1,
"InstanceType": args.instance_type,
}
# Network setup.
# Lets assume they have provided security group id comma delimited.
sg_ids = args.security_group_id.split(",") if args.security_group_id else None
if sg_ids is None:
# Figure out which VPC this instance will be brought up in and search for the SG in there.
# This is for a bit of backwards compatibility with the previous mode of potentially using
# YW's VPC, in which we would still deploy a SG with the same name as in our normal VPCs.
# This means there could be customers that had that deployment mode from the start AND have
# a SG we created back then, with the internal naming convention we use, but NOT in the YB
# VPC (which they likely will not even have).
vpc = get_vpc_for_subnet(client, args.cloud_subnet)
sg_name = get_yb_sg_name(args.region)
sg = get_security_group(client, sg_name, vpc)
sg_ids = [sg.id]
vars["NetworkInterfaces"] = [{
"DeviceIndex": 0,
"AssociatePublicIpAddress": args.assign_public_ip,
"SubnetId": args.cloud_subnet,
"Groups": sg_ids
}]
# Volume setup.
volumes = []
ebs = {
"DeleteOnTermination": True,
# TODO: constant
"VolumeSize": 40,
"VolumeType": "gp2"
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
volumes.append({
"DeviceName": "/dev/sda1",
"Ebs": ebs
})
device_names = get_device_names(args.instance_type, args.num_volumes)
# TODO: Clean up semantics on nvme vs "next-gen" vs ephemerals, as this is currently whack...
for i, device_name in enumerate(device_names):
volume = {}
if has_ephemerals(args.instance_type):
volume = {
"DeviceName": "/dev/{}".format(device_name),
"VirtualName": "ephemeral{}".format(i)
}
elif is_next_gen(args.instance_type):
ebs = {
"DeleteOnTermination": True,
"VolumeType": args.volume_type,
# TODO: make this int.
"VolumeSize": args.volume_size
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
if args.volume_type == "io1":
# TODO: make this int.
ebs["Iops"] = args.disk_iops
volume = {
"DeviceName": "/dev/{}".format(device_name),
"Ebs": ebs
}
volumes.append(volume)
vars["BlockDeviceMappings"] = volumes
# Tag setup.
def __create_tag(k, v):
return {"Key": k, "Value": v}
# Add Name all the time.
instance_tags = [
__create_tag("Name", args.search_pattern),
__create_tag("launched-by", os.environ.get("USER", "unknown")),
__create_tag("yb-server-type", args.type)
]
custom_tags = args.instance_tags if args.instance_tags is not None else '{}'
for k, v in json.loads(custom_tags).iteritems():
instance_tags.append(__create_tag(k, v))
vars["TagSpecifications"] = [{
"ResourceType": "instance",
"Tags": instance_tags
}]
# TODO: user_data > templates/cloud_init.yml.j2, still needed?
instance_ids = client.create_instances(**vars)
if len(instance_ids) != 1:
logging.error("Invalid create_instances response: {}".format(instance_ids))
raise YBOpsRuntimeError("Expected to create 1 instance, got {}".format(
len(instance_ids)))
instance = instance_ids[0]
instance.wait_until_running()
def modify_tags(region, instance_id, tags_to_set_str, tags_to_remove_str):
instance = get_client(region).Instance(instance_id)
# Remove all the tags we were asked to, except the internal ones.
tags_to_remove = set(tags_to_remove_str.split(",") if tags_to_remove_str else [])
# TODO: combine these with the above instance creation function.
internal_tags = set(["Name", "launched-by", "yb-server-type"])
if tags_to_remove & internal_tags:
raise YBOpsRuntimeError(
"Was asked to remove tags: {}, which contain internal tags: {}".format(
tags_to_remove, internal_tags
))
# Note: passing an empty list to Tags will remove all tags from the instance.
if tags_to_remove:
instance.delete_tags(Tags=[{"Key": k} for k in tags_to_remove])
# Set all the tags provided.
tags_to_set = json.loads(tags_to_set_str if tags_to_set_str else "{}")
customer_tags = []
for k, v in tags_to_set.iteritems():
customer_tags.append({"Key": k, "Value": v})
instance.create_tags(Tags=customer_tags)
def delete_route(rt, cidr):
route = get_route_by_cidr(rt, cidr)
if route is not None:
route.delete()
def get_vpc_for_subnet(client, subnet):
return client.Subnet(subnet).vpc
def get_yb_sg_name(region):
return SG_YUGABYTE_PREFIX_FORMAT.format(region)
def list_dns_record_set(hosted_zone_id):
return boto3.client('route53').get_hosted_zone(Id=hosted_zone_id)
def create_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'CREATE')
def edit_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'UPSERT')
def delete_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'DELETE')
def _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, action):
client = boto3.client('route53')
records = []
for ip in ip_list:
records.append({'Value': ip})
result = list_dns_record_set(hosted_zone_id)
hosted_zone_name = result['HostedZone']['Name']
change_batch = {
'Comment': "YugaWare driven Record Set",
'Changes': [{
'Action': action,
'ResourceRecordSet': {
'Name': "{}.{}".format(domain_name_prefix, hosted_zone_name),
'Type': 'A',
'TTL': 5,
'ResourceRecords': records
}
}]
}
result = client.change_resource_record_sets(
HostedZoneId=hosted_zone_id,
ChangeBatch=change_batch)
client.get_waiter('resource_record_sets_changed').wait(
Id=result['ChangeInfo']['Id'],
WaiterConfig={
'Delay': 10,
'MaxAttempts': 60
})
|
@get_or_create(get_igw)
def create_igw(client, tag_name, vpc):
"""Method to create Internet Gateway based on tag_name in given VPC. If the gateway
already exists, it would return that object. If the object doesn't have a tag, we
would tag it accordingly.
Args:
client (boto client): Region specific boto client
tag_name (str): Tag name for internet gateway.
vpc (VPC object): VPC object to create Internet Gateway
Returns:
internet gateway: newly internet gateway object.
"""
# Query to make sure the region doesn't have any IGW already attached.
existing_igw = next(iter(vpc.internet_gateways.all()), None)
if existing_igw is not None:
# If we have existing igw for the region, lets just tag it with yb-XX-igw
tag_resource_name(client, existing_igw.id, tag_name)
return existing_igw
# If we don't have a internet gateway, lets create one and attach it to vpc
igw = client.create_internet_gateway()
tag_resource_name(client, igw.id, tag_name)
vpc.attach_internet_gateway(InternetGatewayId=igw.id)
return igw
| 486 | 509 |
#!/usr/bin/env python
#
# Copyright 2019 YugaByte, Inc. and Contributors
#
# Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt
import boto3
import json
import logging
import os
import re
from ipaddr import IPNetwork
from ybops.utils import get_or_create, get_and_cleanup
from ybops.common.exceptions import YBOpsRuntimeError
from ybops.cloud.common.utils import request_retry_decorator
RESOURCE_PREFIX_FORMAT = "yb-{}"
IGW_CIDR = "0.0.0.0/0"
SUBNET_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT
IGW_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-igw"
ROUTE_TABLE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-rt"
SG_YUGABYTE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-sg"
PEER_CONN_FORMAT = "yb-peer-conn-{}-to-{}"
class AwsBootstrapRegion():
def __init__(self, region, metadata, region_cidrs):
self.region = region
self.metadata = metadata
self.region_cidrs = region_cidrs
self.client = get_client(self.region)
# Outputs.
self.vpc = None
self.igw = None
self.peer_vpc = None
self.sg_yugabyte = None
self.subnets = []
self.route_table = None
def bootstrap(self):
self.setup_vpc()
self.setup_igw()
self.setup_subnets()
self.setup_yugabyte_sg()
self.setup_rt()
def setup_vpc(self):
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(self.region)
vpc = create_vpc(client=self.client, tag_name=vpc_region_tag,
cidr=get_region_cidr(self.metadata, self.region))
vpc.wait_until_available()
self.vpc = vpc
def setup_igw(self):
igw_tag = IGW_PREFIX_FORMAT.format(self.region)
igw = create_igw(client=self.client, tag_name=igw_tag, vpc=self.vpc)
self.igw = igw
def setup_subnets(self):
zones = get_zones(self.region)
subnets = {}
for zone_index, zone in enumerate(sorted(zones.keys())):
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
zone_cidr = self.metadata["zone_cidr_format"].format(
get_cidr_prefix(self.metadata, self.region), (zone_index + 1) * 16)
subnet = create_subnet(self.client, self.vpc, zone, zone_cidr, vpc_zone_tag)
subnets[zone] = subnet
self.subnets = subnets
def setup_yugabyte_sg(self):
sg_group_name = get_yb_sg_name(self.region)
rules = list(self.metadata["sg_rules"])
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
sg = create_security_group(client=self.client, group_name=sg_group_name,
description="YugaByte SG", vpc=self.vpc,
rules=rules)
self.sg_yugabyte = sg
def setup_rt(self):
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(self.region)
route_table = create_route_table(client=self.client, tag_name=route_table_tag,
vpc=self.vpc)
# TODO: handle private/public case at somepoint, also NAT.
add_route_to_rt(route_table, IGW_CIDR, "GatewayId", self.igw.id)
current_associated_subnet_ids = [assoc.subnet_id for assoc in route_table.associations]
missing_ids = [subnet.id for subnet in self.subnets.values()
if subnet.id not in current_associated_subnet_ids]
for subnet_id in missing_ids:
route_table.associate_with_subnet(SubnetId=subnet_id)
self.route_table = route_table
def add_sg_ingress_to_sg(self, incoming_sg, target_sg):
current_sg_ids = set([pair["GroupId"]
for perm in target_sg.ip_permissions
for pair in perm["UserIdGroupPairs"]])
if incoming_sg.id not in current_sg_ids:
target_sg.authorize_ingress(
IpPermissions=[{
"IpProtocol": "-1",
"UserIdGroupPairs": [{"GroupId": incoming_sg.id}]}])
def add_route_to_rt(route_table, cidr, target_type, target_id):
kwargs = {target_type: target_id}
route = get_route_by_cidr(route_table, cidr)
if route is None:
route_table.create_route(DestinationCidrBlock=cidr, **kwargs)
elif getattr(route, dumb_camel_to_snake(target_type)) != target_id:
route.replace(**kwargs)
def add_cidr_to_rules(rules, cidr):
rule_block = {
"ip_protocol": "-1",
"from_port": 0,
"to_port": 65535,
"cidr_ip": cidr
}
rules.append(rule_block)
def get_cidr_prefix(metadata, region):
return metadata["regions"][region]["cidr_prefix"]
def get_region_cidr(metadata, region):
return metadata["region_cidr_format"].format(get_cidr_prefix(metadata, region))
def get_region_cidrs(metadata):
return dict([(r, get_region_cidr(metadata, r)) for r in metadata["regions"].keys()])
def dumb_camel_to_snake(s):
return re.sub("([A-Z])", "_\\1", s).lower()[1:]
class YbVpcComponents:
def __init__(self):
self.region = None
self.vpc = None
self.sg_yugabyte = None
self.customer_sgs = None
self.route_table = None
self.subnets = None
@staticmethod
def from_pieces(region, vpc_id, sg_id, rt_id, az_to_subnet_ids):
c = YbVpcComponents()
c.region = region
client = get_client(region)
c.vpc = client.Vpc(vpc_id)
c.sg_yugabyte = client.SecurityGroup(sg_id)
c.route_table = client.RouteTable(rt_id)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
@staticmethod
def from_user_json(region, per_region_meta):
c = YbVpcComponents()
c.region = region
client = get_client(region)
vpc_id = per_region_meta.get("vpcId")
if vpc_id:
c.vpc = client.Vpc(vpc_id)
else:
c.vpc = get_vpc(client, RESOURCE_PREFIX_FORMAT.format(region))
sg_ids = per_region_meta.get("customSecurityGroupId")
if sg_ids:
c.customer_sgs = [client.SecurityGroup(sg_id) for sg_id in sg_ids.split(",")]
else:
c.sg_yugabyte = get_security_group(
client, SG_YUGABYTE_PREFIX_FORMAT.format(region), c.vpc)
if not vpc_id:
c.route_table = get_route_table(client, ROUTE_TABLE_PREFIX_FORMAT.format(region))
az_to_subnet_ids = {}
if vpc_id:
az_to_subnet_ids = per_region_meta.get("azToSubnetIds", {})
else:
az_to_subnet_ids = get_zones(region)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
def as_json(self):
sgs = self.customer_sgs if self.customer_sgs else [self.sg_yugabyte]
return vpc_components_as_json(self.vpc, sgs, self.subnets)
class AwsBootstrapClient():
def __init__(self, metadata, host_vpc_id, host_vpc_region):
self.metadata = metadata
self.host_vpc_id = host_vpc_id
self.host_vpc_region = host_vpc_region
self.region_cidrs = get_region_cidrs(self.metadata)
# Validation.
self._validate_cidr_overlap()
def _validate_cidr_overlap(self):
region_networks = [IPNetwork(cidr) for cidr in self.region_cidrs.values()]
all_networks = region_networks
for i in xrange(len(all_networks)):
for j in xrange(i + 1, len(all_networks)):
left = all_networks[i]
right = all_networks[j]
if left.overlaps(right):
raise YBOpsRuntimeError(
"IP blocks in the CIDRs overlap: {} - {}".format(left, right))
def bootstrap_individual_region(self, region):
if region is None:
raise YBOpsRuntimeError("Must provider region to bootstrap!")
client = AwsBootstrapRegion(region, self.metadata, self.region_cidrs)
client.bootstrap()
return YbVpcComponents.from_pieces(
region, client.vpc.id, client.sg_yugabyte.id, client.route_table.id,
{az: s.id for az, s in client.subnets.iteritems()})
def cross_link_regions(self, components):
# Do the cross linking, adding CIDR entries to RTs and SGs, as well as doing vpc peerings.
region_and_vpc_tuples = [(r, c.vpc) for r, c in components.iteritems()]
host_vpc = None
if self.host_vpc_id and self.host_vpc_region:
host_vpc = get_client(self.host_vpc_region).Vpc(self.host_vpc_id)
region_and_vpc_tuples.append((self.host_vpc_region, host_vpc))
# Setup VPC peerings.
for i in xrange(len(region_and_vpc_tuples) - 1):
i_region, i_vpc = region_and_vpc_tuples[i]
for j in xrange(i + 1, len(region_and_vpc_tuples)):
j_region, j_vpc = region_and_vpc_tuples[j]
peering = create_vpc_peering(
# i is the host, j is the target.
client=get_client(i_region), vpc=j_vpc, host_vpc=i_vpc, target_region=j_region)
if len(peering) != 1:
raise YBOpsRuntimeError(
"Expecting one peering connection, got {}".format(peer_conn))
peering = peering[0]
# Add route i -> j.
add_route_to_rt(components[i_region].route_table, j_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
# Add route j -> i.
# Note: If we have a host_vpc, it is the last in the list, and it doesn't have an
# associated component, so we special case it.
if host_vpc is None or j != len(region_and_vpc_tuples) - 1:
add_route_to_rt(components[j_region].route_table, i_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
else:
# TODO: should ideally filter to the RT that is relevant, but we do not really
# know the subnets which matter from this host_vpc...
for rt in list(host_vpc.route_tables.all()):
add_route_to_rt(rt, i_vpc.cidr_block, "VpcPeeringConnectionId", peering.id)
# Setup SG entries for all the CIDRs.
all_cidrs = [vpc.cidr_block for r, vpc in region_and_vpc_tuples]
rules = []
# Add CIDRs from all the VPCs, including the host.
for cidr in all_cidrs:
add_cidr_to_rules(rules, cidr)
# Add CIDRs from any custom networks we have internally, primarily the OpenVPN in AWS.
# TODO(bogdan): custom CIDR entries
for cidr in self.metadata.get("custom_network_whitelisted_ip_cidrs", []):
add_cidr_to_rules(rules, cidr)
for region, component in components.iteritems():
sg = component.sg_yugabyte
ip_perms = sg.ip_permissions
for rule in rules:
found = False
for perm in ip_perms:
if perm.get("FromPort") == rule["from_port"] and \
perm.get("ToPort") == rule["to_port"] and \
perm.get("IpProtocol") == rule["ip_protocol"] and \
len([True for r in perm.get("IpRanges", [])
if r.get("CidrIp") == rule["cidr_ip"]]) > 0:
# This rule matches this permission, so no need to add it.
found = True
break
if not found:
try:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
if "InvalidPermission.Duplicate" not in str(e):
raise YBOpsRuntimeError(
"Authorize Security Group Ingress failed: {}".format(repr(e)))
def aws_exception_handler(e):
"""AWS specific exception handler.
Args:
e: the exception that was raised by the underlying API call that just failed.
Returns:
True if this exception can be retried, False otherwise.
"""
return "Request limit exceeded" in str(e)
def aws_request_limit_retry(fn):
"""A decorator for retrying an AWS operation after exceeding request limit. Does retries with
randomized jitter. Ideally, we should reconfigure boto3 to do the right kind of retries
internally, but as of May 2017 there does not seem to be a good way of doing that.
Initially not adding this decorator to all functions in this module. This should be done
gradually as we encounter rate limiting errors.
Relevant boto issues:
https://github.com/boto/boto3/issues/770
https://github.com/boto/botocore/issues/882
"""
return request_retry_decorator(fn, aws_exception_handler)
def get_client(region):
"""Method to get boto3 ec2 resource for given region
Args:
region (str): Region name
Returns:
boto3 resource
"""
return boto3.resource("ec2", region_name=region)
def get_clients(regions):
"""Method to get boto3 clients for given region or all the regions if none specified.
Args:
regions (list): List of regions to return clients for
Returns:
clients(obj): Map of region to boto3 resource
"""
return {region: get_client(region) for region in regions}
def get_available_regions(metadata):
return metadata["regions"].keys()
def get_spot_pricing(region, zone, instance_type):
client = boto3.client('ec2', region_name=region)
prod_desc = ['Linux/UNIX (Amazon VPC)']
spot_price = client.describe_spot_price_history(InstanceTypes=[instance_type],
MaxResults=1,
ProductDescriptions=prod_desc,
AvailabilityZone=zone)
if len(spot_price['SpotPriceHistory']) == 0:
raise YBOpsRuntimeError('Invalid instance type {} for zone {}'.format(instance_type, zone))
return spot_price['SpotPriceHistory'][0]['SpotPrice']
def get_zones(region, dest_vpc_id=None):
"""Method to fetch zones for given region or all the regions if none specified.
Args:
region (str): Name of region to get zones of.
Returns:
zones (obj): Map of zone -> subnet
"""
result = {}
filters = get_filters("state", "available")
client = boto3.client("ec2", region_name=region)
zones = client.describe_availability_zones(Filters=filters).get("AvailabilityZones", [])
new_client = get_client(region)
zone_mapping = {}
for z in zones:
zone_name = z["ZoneName"]
zone_tag = SUBNET_PREFIX_FORMAT.format(zone_name)
region_vpc = None
if dest_vpc_id:
region_vpc = new_client.Vpc(dest_vpc_id)
else:
region_vpc = get_vpc(new_client, RESOURCE_PREFIX_FORMAT.format(region))
subnet = next(iter(fetch_subnets(region_vpc, zone_tag)), None)
if subnet is None:
subnet = next(iter([s for s in region_vpc.subnets.all()
if s.availability_zone == zone_name]), None)
zone_mapping[zone_name] = subnet.id if subnet is not None else None
return zone_mapping
def get_vpc(client, tag_name, **kwargs):
"""Method to fetch vpc based on the tag_name.
Args:
client (boto client): Boto Client for the region to query.
tag_name (str): VPC tag name.
Returns:
VPC obj: VPC object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.vpcs.filter(Filters=filters)), None)
def fetch_subnets(vpc, tag_name):
"""Method to fetch subnets based on the tag_name.
Args:
vpc (vpc obj): VPC object to search for subnets
tag_name (str): subnet tag name.
Returns:
subnets (list): list of aws subnets for given vpc.
"""
filters = get_tag_filter(tag_name)
return vpc.subnets.filter(Filters=filters)
def create_subnet(client, vpc, zone, cidr, tag_name):
"""Method to create subnet based on cidr and tag name.
Args:
client (boto client): Region specific boto client
vpc (VPC object): VPC object to create subnet.
zone (str): Availability zone name
cidr (str): CIDR string
tag_name (str): Tag name for subnet.
Returns:
subnet: Newly created subnet object.
"""
subnet = next((s for s in fetch_subnets(vpc, tag_name) if s.cidr_block == cidr), None)
if subnet is None:
subnet = vpc.create_subnet(CidrBlock=cidr, AvailabilityZone=zone)
# TODO: no direct waiter on subnet just yet, it seems...
client.meta.client.get_waiter("subnet_available").wait(SubnetIds=[subnet.id])
tag_resource_name(client, subnet.id, tag_name)
return subnet
def get_security_group(client, group_name, vpc, **kwargs):
"""Method to fetch security group based on the group_name.
Args:
client (boto client): Region specific boto client
group_name (str): Security Group name
vpc (VPC object): The VPC in which to check for the SG
Returns:
SecurityGroup: Matching security group.
"""
filters = get_filters("group-name", group_name) + get_filters("vpc-id", vpc.id)
return next(iter(client.security_groups.filter(Filters=filters)), None)
@get_or_create(get_security_group)
def create_security_group(client, group_name, vpc, description, rules):
"""Method to create a security group based on the group_name and authorize ingress with
the rules provided.
Args:
client (boto client): Region specific boto client
group_name (str): security group name
description (str): description of the security group
vpc (VPC Object): VPC object to create the security group
rules (dict): List of rules to add to security group.
"""
sg = vpc.create_security_group(GroupName=group_name, Description=description)
try:
for rule in rules:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
logging.error("Authorize Security Group Ingress failed: {}".format(e))
sg.delete()
raise YBOpsRuntimeError("Security Group creation failed.")
return sg
def get_igw(client, tag_name, **kwargs):
"""Method to fetch Internet Gateway based on tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): Internet Gateway tag name.
Returns:
internet_gateway: internet gateway object.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.internet_gateways.filter(Filters=filters)), None)
@get_or_create(get_igw)
def create_igw(client, tag_name, vpc):
"""Method to create Internet Gateway based on tag_name in given VPC. If the gateway
already exists, it would return that object. If the object doesn't have a tag, we
would tag it accordingly.
Args:
client (boto client): Region specific boto client
tag_name (str): Tag name for internet gateway.
vpc (VPC object): VPC object to create Internet Gateway
Returns:
internet gateway: newly internet gateway object.
"""
# Query to make sure the region doesn't have any IGW already attached.
existing_igw = next(iter(vpc.internet_gateways.all()), None)
if existing_igw is not None:
# If we have existing igw for the region, lets just tag it with yb-XX-igw
tag_resource_name(client, existing_igw.id, tag_name)
return existing_igw
# If we don't have a internet gateway, lets create one and attach it to vpc
igw = client.create_internet_gateway()
tag_resource_name(client, igw.id, tag_name)
vpc.attach_internet_gateway(InternetGatewayId=igw.id)
return igw
def get_route_table(client, tag_name, **kwargs):
"""Method to fetch route table based on tag_name
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name to search for.
Returns:
RouteTable (obj): Matching route table object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.route_tables.filter(Filters=filters)), None)
@get_or_create(get_route_table)
def create_route_table(client, tag_name, vpc):
"""Method to create route table based on tag_name in given VPC. It will first
query for the tag name to see if the route table already exists or if one is already
attached to the VPC, if so it will return that route table.
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name
vpc (vpc object): VPC object to create the route table against
Returns:
RouteTable (obj): newly created RouteTable object.
"""
# Check to see if there is a route table attached to VPC, if so, we can just tag it
existing_route_table = next(iter(vpc.route_tables.all()), None)
if existing_route_table is not None:
tag_resource_name(client, existing_route_table.id, tag_name)
return existing_route_table
# If no route table exists, we can create one and tag it.
route_table = vpc.create_route_table()
tag_resource_name(client, route_table.id, tag_name)
return route_table
@get_and_cleanup(get_security_group)
def cleanup_security_group(sg, **kwargs):
"""Method to cleanup security group for the matching group_name.
Args:
sg: Instance of security group matching the group_name.
"""
sg.delete()
@get_and_cleanup(get_igw)
def cleanup_igw(igw, **kwargs):
"""Method to cleanup Internet Gateway matching the tag name. And also remove any vpc
that is attached to the Internet Gateway.
Args:
igw: Instance of Internet Gateway matching tag_name.
"""
for vpc in igw.attachments:
igw.detach_from_vpc(VpcId=vpc['VpcId'])
igw.delete()
@get_and_cleanup(get_route_table)
def cleanup_route_table(rt, **kwargs):
"""Method to cleanup the Route Table matching the tag name.
Args:
rt: Instance of Route Table matching tag_name.
"""
rt.delete()
def get_route_by_cidr(route_table, cidr):
"""Method to check if given CIDR already attached to route table.
Args:
RouteTable (obj): Route Table object.
cidr (str): CIDR string to check in route table.
Returns:
Route: the route for this CIDR or None if not found
"""
return dict((r.destination_cidr_block, r) for r in route_table.routes).get(cidr)
@get_or_create(get_vpc)
def create_vpc(client, tag_name, cidr):
"""Method to create vpc based on the cidr and tag with tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): VPC tag name
cidr (str): CIDR string.
Returns:
VPC(Object): Newly created VPC object.
"""
vpc = client.create_vpc(CidrBlock=cidr)
vpc.modify_attribute(EnableDnsHostnames={'Value': True})
tag_resource_name(client, vpc.id, tag_name)
return vpc
def set_yb_sg_and_fetch_vpc(metadata, region, dest_vpc_id):
"""Method to bootstrap vpc and security group, and enable vpc peering
with the host_instance vpc.
Args:
metadata (obj): Cloud metadata object with cidr prefix and other metadata.
region (str): Region name to create the vpc in.
dest_vpc_id (str): Id of the VPC that yugabyte machines will reside in.
Returns:
vpc_info (json): return vpc, subnet and security group as json.
"""
client = get_client(region)
dest_vpc = client.Vpc(dest_vpc_id)
subnets = {subnet.availability_zone: subnet for subnet in dest_vpc.subnets.all()}
sg_group_name = get_yb_sg_name(region)
rules = metadata["sg_rules"]
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
add_cidr_to_rules(rules, dest_vpc.cidr_block)
sgs = [create_security_group(client=client, group_name=sg_group_name, vpc=dest_vpc,
description="YugaByte SG", rules=rules)]
return vpc_components_as_json(dest_vpc, sgs, subnets)
def query_vpc(region):
"""Method to query VPC against given region and respective subnets.
Args:
region (str): Region name to query the VPC.
Returns:
vpc and subnet info (obj): Object with region and zone subnet id.
"""
per_vpc_info = {}
# Fetch all available AZs, as we want to group subnets by AZ.
raw_client = boto3.client("ec2", region_name=region)
zones = [z["ZoneName"]
for z in raw_client.describe_availability_zones(
Filters=get_filters("state", "available")).get("AvailabilityZones", [])]
# Default to empty lists, in case some zones do not have subnets, so we can use this as a query
# for all available AZs in this region.
subnets_by_zone = {z: [] for z in zones}
# Fetch SGs and group them by VPC ID.
client = get_client(region)
per_vpc_sgs = {}
sgs = client.security_groups.all()
for sg in sgs:
sg_list = per_vpc_sgs.setdefault(sg.vpc_id, [])
sg_list.append({
"sg_id": sg.group_id,
# Note: Name tag is not mandatory or always present but group_name is!
"sg_name": sg.group_name
})
# Fetch all available VPCs so we can group by VPC ID.
region_vpcs = client.vpcs.all()
for vpc in region_vpcs:
# Filter for available subnets and group by AZ.
subnets = vpc.subnets.filter(Filters=get_filters("state", "available"))
for s in subnets:
subnets_for_this_az = subnets_by_zone.setdefault(s.availability_zone, [])
subnets_for_this_az.append({
"subnet_id": s.subnet_id,
"name": _get_name_from_tags(s.tags),
"public": s.map_public_ip_on_launch
})
vpc_info = {
"subnets_by_zone": subnets_by_zone,
# In case we somehow did not find any SGs, default to empty list.
"security_groups": per_vpc_sgs.get(vpc.id, [])
}
per_vpc_info[vpc.id] = vpc_info
region_json = {
"per_vpc_info": per_vpc_info
}
return region_json
def _get_name_from_tags(tags):
for t in tags if tags else []:
if t.get("Key") == "Name":
return t.get("Value", None)
return None
def vpc_components_as_json(vpc, sgs, subnets):
"""Method takes VPC, Security Group and Subnets and returns a json data format with ids.
Args:
vpc (VPC Object): Region specific VPC object
sgs (List of Security Group Object): Region specific Security Group object
subnets (subnet object map): Map of Subnet objects keyed of zone.
Retuns:
json (str): A Json string for yugaware to consume with necessary ids.
"""
result = {}
result["vpc_id"] = vpc.id
result["security_group"] = [{"id": sg.group_id, "name": sg.group_name} for sg in sgs]
result["zones"] = {}
for zone, subnet in subnets.iteritems():
result["zones"][zone] = subnet.id
return result
def delete_vpc(region, host_vpc_id=None, host_vpc_region=None):
"""Method to delete VPC, Subnet, Internet Gateway, Route Table and VPC peering.
Args:
region (str): Region name to query the VPC.
"""
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(region)
client = get_client(region)
region_vpc = get_vpc(client, vpc_region_tag)
if region_vpc is None:
raise YBOpsRuntimeError("VPC not setup.")
zones = get_zones(region)
# Remove the yugabyte SG first.
sg_group_name = get_yb_sg_name(region)
cleanup_security_group(client=client, group_name=sg_group_name, vpc=region_vpc)
# Cleanup the subnets.
for zone, subnet_id in zones.iteritems():
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
if subnet_id is not None:
client.Subnet(subnet_id).delete()
# Remove the IGW.
igw_tag = IGW_PREFIX_FORMAT.format(region)
cleanup_igw(client=client, tag_name=igw_tag)
# Remove this region's CIDR from the RT of the host vpc.
host_vpc = None
if host_vpc_id is not None and host_vpc_region is not None:
host_vpc = get_client(host_vpc_region).Vpc(host_vpc_id)
for rt in list(host_vpc.route_tables.all()):
delete_route(rt, region_vpc.cidr_block)
# Remove all of the VPC peerings of this vpc.
cleanup_vpc_peering(client=client, vpc=region_vpc, host_vpc=None)
# Delete the VPC itself.
region_vpc.delete()
# Finally cleanup the Routing Table.
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(region)
cleanup_route_table(client=client, tag_name=route_table_tag)
return {"success": "VPC deleted."}
def tag_resource_name(client, resource_id, tag_name):
"""Method to create name tag for given resource.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_name (str): Tag name.
"""
tag_resource(client, resource_id, "Name", tag_name)
def tag_resource(client, resource_id, tag_key, tag_value):
"""Method to attach arbitrary key-value tags to resources.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_key: Tag key
tag_value: Tag value
"""
tags = [{"Key": tag_key, "Value": tag_value}]
client.create_tags(Resources=[resource_id], Tags=tags)
def get_filters(key, value):
return [{'Name': key, 'Values': [value]}]
def get_tag_filter(tag_name):
return get_filters("tag:Name", tag_name)
def get_vpc_peerings(vpc, host_vpc, **kwargs):
"""Method to fetch all the VPC peerings against given VPC. If host_vpc is provided
it will check if there is a peering against that vpc.
Args:
vpc(VPC object): VPC Object to search for peerings
host_vpc (Host VPC object): Can be Null as well, to check if specific host_vpc
peering is done.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
output = []
# Search through accepted vpc peerings.
vpc_peerings = vpc.accepted_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.requester_vpc == host_vpc)])
# Also search through requested vpc peerings.
vpc_peerings = vpc.requested_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.accepter_vpc == host_vpc)])
return output
@get_and_cleanup(get_vpc_peerings)
def cleanup_vpc_peering(vpc_peerings, **kwargs):
for vpc_peering in vpc_peerings:
vpc_peering.delete()
@get_or_create(get_vpc_peerings)
def create_vpc_peering(client, vpc, host_vpc, target_region):
"""Method would create a vpc peering between the newly created VPC and caller's VPC
Also makes sure, if they aren't the same, then there is no need for vpc peering.
Args:
client (boto client): Region specific boto client
vpc (VPC object): Newly created VPC object
host_vpc(Host VPC object): Host VPC to peer with.
target_region (region name): Region name in which peering is being created.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
try:
peer_conn = client.create_vpc_peering_connection(
VpcId=host_vpc.id, PeerVpcId=vpc.id, PeerRegion=target_region)
peer_conn.wait_until_exists()
# Need to accept from the other end.
remote_peer_conn = get_client(target_region).VpcPeeringConnection(peer_conn.id)
remote_peer_conn.wait_until_exists()
remote_peer_conn.accept()
return [peer_conn]
except Exception as e:
logging.error(e)
raise YBOpsRuntimeError("Unable to create VPC peering.")
def get_device_names(instance_type, num_volumes):
device_names = []
for i in xrange(num_volumes):
device_name_format = "nvme{}n1" if is_nvme(instance_type) else "xvd{}"
index = "{}".format(i if is_nvme(instance_type) else chr(ord('b') + i))
device_names.append(device_name_format.format(index))
return device_names
def is_next_gen(instance_type):
return instance_type.startswith(("c3", "c4", "c5", "m4", "r4"))
def is_nvme(instance_type):
return instance_type.startswith("i3")
def has_ephemerals(instance_type):
return not is_nvme(instance_type) and not is_next_gen(instance_type)
def create_instance(args):
client = get_client(args.region)
vars = {
"ImageId": args.machine_image,
"KeyName": args.key_pair_name,
"MinCount": 1,
"MaxCount": 1,
"InstanceType": args.instance_type,
}
# Network setup.
# Lets assume they have provided security group id comma delimited.
sg_ids = args.security_group_id.split(",") if args.security_group_id else None
if sg_ids is None:
# Figure out which VPC this instance will be brought up in and search for the SG in there.
# This is for a bit of backwards compatibility with the previous mode of potentially using
# YW's VPC, in which we would still deploy a SG with the same name as in our normal VPCs.
# This means there could be customers that had that deployment mode from the start AND have
# a SG we created back then, with the internal naming convention we use, but NOT in the YB
# VPC (which they likely will not even have).
vpc = get_vpc_for_subnet(client, args.cloud_subnet)
sg_name = get_yb_sg_name(args.region)
sg = get_security_group(client, sg_name, vpc)
sg_ids = [sg.id]
vars["NetworkInterfaces"] = [{
"DeviceIndex": 0,
"AssociatePublicIpAddress": args.assign_public_ip,
"SubnetId": args.cloud_subnet,
"Groups": sg_ids
}]
# Volume setup.
volumes = []
ebs = {
"DeleteOnTermination": True,
# TODO: constant
"VolumeSize": 40,
"VolumeType": "gp2"
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
volumes.append({
"DeviceName": "/dev/sda1",
"Ebs": ebs
})
device_names = get_device_names(args.instance_type, args.num_volumes)
# TODO: Clean up semantics on nvme vs "next-gen" vs ephemerals, as this is currently whack...
for i, device_name in enumerate(device_names):
volume = {}
if has_ephemerals(args.instance_type):
volume = {
"DeviceName": "/dev/{}".format(device_name),
"VirtualName": "ephemeral{}".format(i)
}
elif is_next_gen(args.instance_type):
ebs = {
"DeleteOnTermination": True,
"VolumeType": args.volume_type,
# TODO: make this int.
"VolumeSize": args.volume_size
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
if args.volume_type == "io1":
# TODO: make this int.
ebs["Iops"] = args.disk_iops
volume = {
"DeviceName": "/dev/{}".format(device_name),
"Ebs": ebs
}
volumes.append(volume)
vars["BlockDeviceMappings"] = volumes
# Tag setup.
def __create_tag(k, v):
return {"Key": k, "Value": v}
# Add Name all the time.
instance_tags = [
__create_tag("Name", args.search_pattern),
__create_tag("launched-by", os.environ.get("USER", "unknown")),
__create_tag("yb-server-type", args.type)
]
custom_tags = args.instance_tags if args.instance_tags is not None else '{}'
for k, v in json.loads(custom_tags).iteritems():
instance_tags.append(__create_tag(k, v))
vars["TagSpecifications"] = [{
"ResourceType": "instance",
"Tags": instance_tags
}]
# TODO: user_data > templates/cloud_init.yml.j2, still needed?
instance_ids = client.create_instances(**vars)
if len(instance_ids) != 1:
logging.error("Invalid create_instances response: {}".format(instance_ids))
raise YBOpsRuntimeError("Expected to create 1 instance, got {}".format(
len(instance_ids)))
instance = instance_ids[0]
instance.wait_until_running()
def modify_tags(region, instance_id, tags_to_set_str, tags_to_remove_str):
instance = get_client(region).Instance(instance_id)
# Remove all the tags we were asked to, except the internal ones.
tags_to_remove = set(tags_to_remove_str.split(",") if tags_to_remove_str else [])
# TODO: combine these with the above instance creation function.
internal_tags = set(["Name", "launched-by", "yb-server-type"])
if tags_to_remove & internal_tags:
raise YBOpsRuntimeError(
"Was asked to remove tags: {}, which contain internal tags: {}".format(
tags_to_remove, internal_tags
))
# Note: passing an empty list to Tags will remove all tags from the instance.
if tags_to_remove:
instance.delete_tags(Tags=[{"Key": k} for k in tags_to_remove])
# Set all the tags provided.
tags_to_set = json.loads(tags_to_set_str if tags_to_set_str else "{}")
customer_tags = []
for k, v in tags_to_set.iteritems():
customer_tags.append({"Key": k, "Value": v})
instance.create_tags(Tags=customer_tags)
def delete_route(rt, cidr):
route = get_route_by_cidr(rt, cidr)
if route is not None:
route.delete()
def get_vpc_for_subnet(client, subnet):
return client.Subnet(subnet).vpc
def get_yb_sg_name(region):
return SG_YUGABYTE_PREFIX_FORMAT.format(region)
def list_dns_record_set(hosted_zone_id):
return boto3.client('route53').get_hosted_zone(Id=hosted_zone_id)
def create_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'CREATE')
def edit_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'UPSERT')
def delete_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'DELETE')
def _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, action):
client = boto3.client('route53')
records = []
for ip in ip_list:
records.append({'Value': ip})
result = list_dns_record_set(hosted_zone_id)
hosted_zone_name = result['HostedZone']['Name']
change_batch = {
'Comment': "YugaWare driven Record Set",
'Changes': [{
'Action': action,
'ResourceRecordSet': {
'Name': "{}.{}".format(domain_name_prefix, hosted_zone_name),
'Type': 'A',
'TTL': 5,
'ResourceRecords': records
}
}]
}
result = client.change_resource_record_sets(
HostedZoneId=hosted_zone_id,
ChangeBatch=change_batch)
client.get_waiter('resource_record_sets_changed').wait(
Id=result['ChangeInfo']['Id'],
WaiterConfig={
'Delay': 10,
'MaxAttempts': 60
})
|
create_route_table
|
Method to create route table based on tag_name in given VPC. It will first
query for the tag name to see if the route table already exists or if one is already
attached to the VPC, if so it will return that route table.
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name
vpc (vpc object): VPC object to create the route table against
Returns:
RouteTable (obj): newly created RouteTable object.
|
#!/usr/bin/env python
#
# Copyright 2019 YugaByte, Inc. and Contributors
#
# Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt
import boto3
import json
import logging
import os
import re
from ipaddr import IPNetwork
from ybops.utils import get_or_create, get_and_cleanup
from ybops.common.exceptions import YBOpsRuntimeError
from ybops.cloud.common.utils import request_retry_decorator
RESOURCE_PREFIX_FORMAT = "yb-{}"
IGW_CIDR = "0.0.0.0/0"
SUBNET_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT
IGW_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-igw"
ROUTE_TABLE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-rt"
SG_YUGABYTE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-sg"
PEER_CONN_FORMAT = "yb-peer-conn-{}-to-{}"
class AwsBootstrapRegion():
def __init__(self, region, metadata, region_cidrs):
self.region = region
self.metadata = metadata
self.region_cidrs = region_cidrs
self.client = get_client(self.region)
# Outputs.
self.vpc = None
self.igw = None
self.peer_vpc = None
self.sg_yugabyte = None
self.subnets = []
self.route_table = None
def bootstrap(self):
self.setup_vpc()
self.setup_igw()
self.setup_subnets()
self.setup_yugabyte_sg()
self.setup_rt()
def setup_vpc(self):
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(self.region)
vpc = create_vpc(client=self.client, tag_name=vpc_region_tag,
cidr=get_region_cidr(self.metadata, self.region))
vpc.wait_until_available()
self.vpc = vpc
def setup_igw(self):
igw_tag = IGW_PREFIX_FORMAT.format(self.region)
igw = create_igw(client=self.client, tag_name=igw_tag, vpc=self.vpc)
self.igw = igw
def setup_subnets(self):
zones = get_zones(self.region)
subnets = {}
for zone_index, zone in enumerate(sorted(zones.keys())):
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
zone_cidr = self.metadata["zone_cidr_format"].format(
get_cidr_prefix(self.metadata, self.region), (zone_index + 1) * 16)
subnet = create_subnet(self.client, self.vpc, zone, zone_cidr, vpc_zone_tag)
subnets[zone] = subnet
self.subnets = subnets
def setup_yugabyte_sg(self):
sg_group_name = get_yb_sg_name(self.region)
rules = list(self.metadata["sg_rules"])
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
sg = create_security_group(client=self.client, group_name=sg_group_name,
description="YugaByte SG", vpc=self.vpc,
rules=rules)
self.sg_yugabyte = sg
def setup_rt(self):
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(self.region)
route_table = create_route_table(client=self.client, tag_name=route_table_tag,
vpc=self.vpc)
# TODO: handle private/public case at somepoint, also NAT.
add_route_to_rt(route_table, IGW_CIDR, "GatewayId", self.igw.id)
current_associated_subnet_ids = [assoc.subnet_id for assoc in route_table.associations]
missing_ids = [subnet.id for subnet in self.subnets.values()
if subnet.id not in current_associated_subnet_ids]
for subnet_id in missing_ids:
route_table.associate_with_subnet(SubnetId=subnet_id)
self.route_table = route_table
def add_sg_ingress_to_sg(self, incoming_sg, target_sg):
current_sg_ids = set([pair["GroupId"]
for perm in target_sg.ip_permissions
for pair in perm["UserIdGroupPairs"]])
if incoming_sg.id not in current_sg_ids:
target_sg.authorize_ingress(
IpPermissions=[{
"IpProtocol": "-1",
"UserIdGroupPairs": [{"GroupId": incoming_sg.id}]}])
def add_route_to_rt(route_table, cidr, target_type, target_id):
kwargs = {target_type: target_id}
route = get_route_by_cidr(route_table, cidr)
if route is None:
route_table.create_route(DestinationCidrBlock=cidr, **kwargs)
elif getattr(route, dumb_camel_to_snake(target_type)) != target_id:
route.replace(**kwargs)
def add_cidr_to_rules(rules, cidr):
rule_block = {
"ip_protocol": "-1",
"from_port": 0,
"to_port": 65535,
"cidr_ip": cidr
}
rules.append(rule_block)
def get_cidr_prefix(metadata, region):
return metadata["regions"][region]["cidr_prefix"]
def get_region_cidr(metadata, region):
return metadata["region_cidr_format"].format(get_cidr_prefix(metadata, region))
def get_region_cidrs(metadata):
return dict([(r, get_region_cidr(metadata, r)) for r in metadata["regions"].keys()])
def dumb_camel_to_snake(s):
return re.sub("([A-Z])", "_\\1", s).lower()[1:]
class YbVpcComponents:
def __init__(self):
self.region = None
self.vpc = None
self.sg_yugabyte = None
self.customer_sgs = None
self.route_table = None
self.subnets = None
@staticmethod
def from_pieces(region, vpc_id, sg_id, rt_id, az_to_subnet_ids):
c = YbVpcComponents()
c.region = region
client = get_client(region)
c.vpc = client.Vpc(vpc_id)
c.sg_yugabyte = client.SecurityGroup(sg_id)
c.route_table = client.RouteTable(rt_id)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
@staticmethod
def from_user_json(region, per_region_meta):
c = YbVpcComponents()
c.region = region
client = get_client(region)
vpc_id = per_region_meta.get("vpcId")
if vpc_id:
c.vpc = client.Vpc(vpc_id)
else:
c.vpc = get_vpc(client, RESOURCE_PREFIX_FORMAT.format(region))
sg_ids = per_region_meta.get("customSecurityGroupId")
if sg_ids:
c.customer_sgs = [client.SecurityGroup(sg_id) for sg_id in sg_ids.split(",")]
else:
c.sg_yugabyte = get_security_group(
client, SG_YUGABYTE_PREFIX_FORMAT.format(region), c.vpc)
if not vpc_id:
c.route_table = get_route_table(client, ROUTE_TABLE_PREFIX_FORMAT.format(region))
az_to_subnet_ids = {}
if vpc_id:
az_to_subnet_ids = per_region_meta.get("azToSubnetIds", {})
else:
az_to_subnet_ids = get_zones(region)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
def as_json(self):
sgs = self.customer_sgs if self.customer_sgs else [self.sg_yugabyte]
return vpc_components_as_json(self.vpc, sgs, self.subnets)
class AwsBootstrapClient():
def __init__(self, metadata, host_vpc_id, host_vpc_region):
self.metadata = metadata
self.host_vpc_id = host_vpc_id
self.host_vpc_region = host_vpc_region
self.region_cidrs = get_region_cidrs(self.metadata)
# Validation.
self._validate_cidr_overlap()
def _validate_cidr_overlap(self):
region_networks = [IPNetwork(cidr) for cidr in self.region_cidrs.values()]
all_networks = region_networks
for i in xrange(len(all_networks)):
for j in xrange(i + 1, len(all_networks)):
left = all_networks[i]
right = all_networks[j]
if left.overlaps(right):
raise YBOpsRuntimeError(
"IP blocks in the CIDRs overlap: {} - {}".format(left, right))
def bootstrap_individual_region(self, region):
if region is None:
raise YBOpsRuntimeError("Must provider region to bootstrap!")
client = AwsBootstrapRegion(region, self.metadata, self.region_cidrs)
client.bootstrap()
return YbVpcComponents.from_pieces(
region, client.vpc.id, client.sg_yugabyte.id, client.route_table.id,
{az: s.id for az, s in client.subnets.iteritems()})
def cross_link_regions(self, components):
# Do the cross linking, adding CIDR entries to RTs and SGs, as well as doing vpc peerings.
region_and_vpc_tuples = [(r, c.vpc) for r, c in components.iteritems()]
host_vpc = None
if self.host_vpc_id and self.host_vpc_region:
host_vpc = get_client(self.host_vpc_region).Vpc(self.host_vpc_id)
region_and_vpc_tuples.append((self.host_vpc_region, host_vpc))
# Setup VPC peerings.
for i in xrange(len(region_and_vpc_tuples) - 1):
i_region, i_vpc = region_and_vpc_tuples[i]
for j in xrange(i + 1, len(region_and_vpc_tuples)):
j_region, j_vpc = region_and_vpc_tuples[j]
peering = create_vpc_peering(
# i is the host, j is the target.
client=get_client(i_region), vpc=j_vpc, host_vpc=i_vpc, target_region=j_region)
if len(peering) != 1:
raise YBOpsRuntimeError(
"Expecting one peering connection, got {}".format(peer_conn))
peering = peering[0]
# Add route i -> j.
add_route_to_rt(components[i_region].route_table, j_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
# Add route j -> i.
# Note: If we have a host_vpc, it is the last in the list, and it doesn't have an
# associated component, so we special case it.
if host_vpc is None or j != len(region_and_vpc_tuples) - 1:
add_route_to_rt(components[j_region].route_table, i_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
else:
# TODO: should ideally filter to the RT that is relevant, but we do not really
# know the subnets which matter from this host_vpc...
for rt in list(host_vpc.route_tables.all()):
add_route_to_rt(rt, i_vpc.cidr_block, "VpcPeeringConnectionId", peering.id)
# Setup SG entries for all the CIDRs.
all_cidrs = [vpc.cidr_block for r, vpc in region_and_vpc_tuples]
rules = []
# Add CIDRs from all the VPCs, including the host.
for cidr in all_cidrs:
add_cidr_to_rules(rules, cidr)
# Add CIDRs from any custom networks we have internally, primarily the OpenVPN in AWS.
# TODO(bogdan): custom CIDR entries
for cidr in self.metadata.get("custom_network_whitelisted_ip_cidrs", []):
add_cidr_to_rules(rules, cidr)
for region, component in components.iteritems():
sg = component.sg_yugabyte
ip_perms = sg.ip_permissions
for rule in rules:
found = False
for perm in ip_perms:
if perm.get("FromPort") == rule["from_port"] and \
perm.get("ToPort") == rule["to_port"] and \
perm.get("IpProtocol") == rule["ip_protocol"] and \
len([True for r in perm.get("IpRanges", [])
if r.get("CidrIp") == rule["cidr_ip"]]) > 0:
# This rule matches this permission, so no need to add it.
found = True
break
if not found:
try:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
if "InvalidPermission.Duplicate" not in str(e):
raise YBOpsRuntimeError(
"Authorize Security Group Ingress failed: {}".format(repr(e)))
def aws_exception_handler(e):
"""AWS specific exception handler.
Args:
e: the exception that was raised by the underlying API call that just failed.
Returns:
True if this exception can be retried, False otherwise.
"""
return "Request limit exceeded" in str(e)
def aws_request_limit_retry(fn):
"""A decorator for retrying an AWS operation after exceeding request limit. Does retries with
randomized jitter. Ideally, we should reconfigure boto3 to do the right kind of retries
internally, but as of May 2017 there does not seem to be a good way of doing that.
Initially not adding this decorator to all functions in this module. This should be done
gradually as we encounter rate limiting errors.
Relevant boto issues:
https://github.com/boto/boto3/issues/770
https://github.com/boto/botocore/issues/882
"""
return request_retry_decorator(fn, aws_exception_handler)
def get_client(region):
"""Method to get boto3 ec2 resource for given region
Args:
region (str): Region name
Returns:
boto3 resource
"""
return boto3.resource("ec2", region_name=region)
def get_clients(regions):
"""Method to get boto3 clients for given region or all the regions if none specified.
Args:
regions (list): List of regions to return clients for
Returns:
clients(obj): Map of region to boto3 resource
"""
return {region: get_client(region) for region in regions}
def get_available_regions(metadata):
return metadata["regions"].keys()
def get_spot_pricing(region, zone, instance_type):
client = boto3.client('ec2', region_name=region)
prod_desc = ['Linux/UNIX (Amazon VPC)']
spot_price = client.describe_spot_price_history(InstanceTypes=[instance_type],
MaxResults=1,
ProductDescriptions=prod_desc,
AvailabilityZone=zone)
if len(spot_price['SpotPriceHistory']) == 0:
raise YBOpsRuntimeError('Invalid instance type {} for zone {}'.format(instance_type, zone))
return spot_price['SpotPriceHistory'][0]['SpotPrice']
def get_zones(region, dest_vpc_id=None):
"""Method to fetch zones for given region or all the regions if none specified.
Args:
region (str): Name of region to get zones of.
Returns:
zones (obj): Map of zone -> subnet
"""
result = {}
filters = get_filters("state", "available")
client = boto3.client("ec2", region_name=region)
zones = client.describe_availability_zones(Filters=filters).get("AvailabilityZones", [])
new_client = get_client(region)
zone_mapping = {}
for z in zones:
zone_name = z["ZoneName"]
zone_tag = SUBNET_PREFIX_FORMAT.format(zone_name)
region_vpc = None
if dest_vpc_id:
region_vpc = new_client.Vpc(dest_vpc_id)
else:
region_vpc = get_vpc(new_client, RESOURCE_PREFIX_FORMAT.format(region))
subnet = next(iter(fetch_subnets(region_vpc, zone_tag)), None)
if subnet is None:
subnet = next(iter([s for s in region_vpc.subnets.all()
if s.availability_zone == zone_name]), None)
zone_mapping[zone_name] = subnet.id if subnet is not None else None
return zone_mapping
def get_vpc(client, tag_name, **kwargs):
"""Method to fetch vpc based on the tag_name.
Args:
client (boto client): Boto Client for the region to query.
tag_name (str): VPC tag name.
Returns:
VPC obj: VPC object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.vpcs.filter(Filters=filters)), None)
def fetch_subnets(vpc, tag_name):
"""Method to fetch subnets based on the tag_name.
Args:
vpc (vpc obj): VPC object to search for subnets
tag_name (str): subnet tag name.
Returns:
subnets (list): list of aws subnets for given vpc.
"""
filters = get_tag_filter(tag_name)
return vpc.subnets.filter(Filters=filters)
def create_subnet(client, vpc, zone, cidr, tag_name):
"""Method to create subnet based on cidr and tag name.
Args:
client (boto client): Region specific boto client
vpc (VPC object): VPC object to create subnet.
zone (str): Availability zone name
cidr (str): CIDR string
tag_name (str): Tag name for subnet.
Returns:
subnet: Newly created subnet object.
"""
subnet = next((s for s in fetch_subnets(vpc, tag_name) if s.cidr_block == cidr), None)
if subnet is None:
subnet = vpc.create_subnet(CidrBlock=cidr, AvailabilityZone=zone)
# TODO: no direct waiter on subnet just yet, it seems...
client.meta.client.get_waiter("subnet_available").wait(SubnetIds=[subnet.id])
tag_resource_name(client, subnet.id, tag_name)
return subnet
def get_security_group(client, group_name, vpc, **kwargs):
"""Method to fetch security group based on the group_name.
Args:
client (boto client): Region specific boto client
group_name (str): Security Group name
vpc (VPC object): The VPC in which to check for the SG
Returns:
SecurityGroup: Matching security group.
"""
filters = get_filters("group-name", group_name) + get_filters("vpc-id", vpc.id)
return next(iter(client.security_groups.filter(Filters=filters)), None)
@get_or_create(get_security_group)
def create_security_group(client, group_name, vpc, description, rules):
"""Method to create a security group based on the group_name and authorize ingress with
the rules provided.
Args:
client (boto client): Region specific boto client
group_name (str): security group name
description (str): description of the security group
vpc (VPC Object): VPC object to create the security group
rules (dict): List of rules to add to security group.
"""
sg = vpc.create_security_group(GroupName=group_name, Description=description)
try:
for rule in rules:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
logging.error("Authorize Security Group Ingress failed: {}".format(e))
sg.delete()
raise YBOpsRuntimeError("Security Group creation failed.")
return sg
def get_igw(client, tag_name, **kwargs):
"""Method to fetch Internet Gateway based on tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): Internet Gateway tag name.
Returns:
internet_gateway: internet gateway object.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.internet_gateways.filter(Filters=filters)), None)
@get_or_create(get_igw)
def create_igw(client, tag_name, vpc):
"""Method to create Internet Gateway based on tag_name in given VPC. If the gateway
already exists, it would return that object. If the object doesn't have a tag, we
would tag it accordingly.
Args:
client (boto client): Region specific boto client
tag_name (str): Tag name for internet gateway.
vpc (VPC object): VPC object to create Internet Gateway
Returns:
internet gateway: newly internet gateway object.
"""
# Query to make sure the region doesn't have any IGW already attached.
existing_igw = next(iter(vpc.internet_gateways.all()), None)
if existing_igw is not None:
# If we have existing igw for the region, lets just tag it with yb-XX-igw
tag_resource_name(client, existing_igw.id, tag_name)
return existing_igw
# If we don't have a internet gateway, lets create one and attach it to vpc
igw = client.create_internet_gateway()
tag_resource_name(client, igw.id, tag_name)
vpc.attach_internet_gateway(InternetGatewayId=igw.id)
return igw
def get_route_table(client, tag_name, **kwargs):
"""Method to fetch route table based on tag_name
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name to search for.
Returns:
RouteTable (obj): Matching route table object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.route_tables.filter(Filters=filters)), None)
# MASKED: create_route_table function (lines 524-545)
@get_and_cleanup(get_security_group)
def cleanup_security_group(sg, **kwargs):
"""Method to cleanup security group for the matching group_name.
Args:
sg: Instance of security group matching the group_name.
"""
sg.delete()
@get_and_cleanup(get_igw)
def cleanup_igw(igw, **kwargs):
"""Method to cleanup Internet Gateway matching the tag name. And also remove any vpc
that is attached to the Internet Gateway.
Args:
igw: Instance of Internet Gateway matching tag_name.
"""
for vpc in igw.attachments:
igw.detach_from_vpc(VpcId=vpc['VpcId'])
igw.delete()
@get_and_cleanup(get_route_table)
def cleanup_route_table(rt, **kwargs):
"""Method to cleanup the Route Table matching the tag name.
Args:
rt: Instance of Route Table matching tag_name.
"""
rt.delete()
def get_route_by_cidr(route_table, cidr):
"""Method to check if given CIDR already attached to route table.
Args:
RouteTable (obj): Route Table object.
cidr (str): CIDR string to check in route table.
Returns:
Route: the route for this CIDR or None if not found
"""
return dict((r.destination_cidr_block, r) for r in route_table.routes).get(cidr)
@get_or_create(get_vpc)
def create_vpc(client, tag_name, cidr):
"""Method to create vpc based on the cidr and tag with tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): VPC tag name
cidr (str): CIDR string.
Returns:
VPC(Object): Newly created VPC object.
"""
vpc = client.create_vpc(CidrBlock=cidr)
vpc.modify_attribute(EnableDnsHostnames={'Value': True})
tag_resource_name(client, vpc.id, tag_name)
return vpc
def set_yb_sg_and_fetch_vpc(metadata, region, dest_vpc_id):
"""Method to bootstrap vpc and security group, and enable vpc peering
with the host_instance vpc.
Args:
metadata (obj): Cloud metadata object with cidr prefix and other metadata.
region (str): Region name to create the vpc in.
dest_vpc_id (str): Id of the VPC that yugabyte machines will reside in.
Returns:
vpc_info (json): return vpc, subnet and security group as json.
"""
client = get_client(region)
dest_vpc = client.Vpc(dest_vpc_id)
subnets = {subnet.availability_zone: subnet for subnet in dest_vpc.subnets.all()}
sg_group_name = get_yb_sg_name(region)
rules = metadata["sg_rules"]
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
add_cidr_to_rules(rules, dest_vpc.cidr_block)
sgs = [create_security_group(client=client, group_name=sg_group_name, vpc=dest_vpc,
description="YugaByte SG", rules=rules)]
return vpc_components_as_json(dest_vpc, sgs, subnets)
def query_vpc(region):
"""Method to query VPC against given region and respective subnets.
Args:
region (str): Region name to query the VPC.
Returns:
vpc and subnet info (obj): Object with region and zone subnet id.
"""
per_vpc_info = {}
# Fetch all available AZs, as we want to group subnets by AZ.
raw_client = boto3.client("ec2", region_name=region)
zones = [z["ZoneName"]
for z in raw_client.describe_availability_zones(
Filters=get_filters("state", "available")).get("AvailabilityZones", [])]
# Default to empty lists, in case some zones do not have subnets, so we can use this as a query
# for all available AZs in this region.
subnets_by_zone = {z: [] for z in zones}
# Fetch SGs and group them by VPC ID.
client = get_client(region)
per_vpc_sgs = {}
sgs = client.security_groups.all()
for sg in sgs:
sg_list = per_vpc_sgs.setdefault(sg.vpc_id, [])
sg_list.append({
"sg_id": sg.group_id,
# Note: Name tag is not mandatory or always present but group_name is!
"sg_name": sg.group_name
})
# Fetch all available VPCs so we can group by VPC ID.
region_vpcs = client.vpcs.all()
for vpc in region_vpcs:
# Filter for available subnets and group by AZ.
subnets = vpc.subnets.filter(Filters=get_filters("state", "available"))
for s in subnets:
subnets_for_this_az = subnets_by_zone.setdefault(s.availability_zone, [])
subnets_for_this_az.append({
"subnet_id": s.subnet_id,
"name": _get_name_from_tags(s.tags),
"public": s.map_public_ip_on_launch
})
vpc_info = {
"subnets_by_zone": subnets_by_zone,
# In case we somehow did not find any SGs, default to empty list.
"security_groups": per_vpc_sgs.get(vpc.id, [])
}
per_vpc_info[vpc.id] = vpc_info
region_json = {
"per_vpc_info": per_vpc_info
}
return region_json
def _get_name_from_tags(tags):
for t in tags if tags else []:
if t.get("Key") == "Name":
return t.get("Value", None)
return None
def vpc_components_as_json(vpc, sgs, subnets):
"""Method takes VPC, Security Group and Subnets and returns a json data format with ids.
Args:
vpc (VPC Object): Region specific VPC object
sgs (List of Security Group Object): Region specific Security Group object
subnets (subnet object map): Map of Subnet objects keyed of zone.
Retuns:
json (str): A Json string for yugaware to consume with necessary ids.
"""
result = {}
result["vpc_id"] = vpc.id
result["security_group"] = [{"id": sg.group_id, "name": sg.group_name} for sg in sgs]
result["zones"] = {}
for zone, subnet in subnets.iteritems():
result["zones"][zone] = subnet.id
return result
def delete_vpc(region, host_vpc_id=None, host_vpc_region=None):
"""Method to delete VPC, Subnet, Internet Gateway, Route Table and VPC peering.
Args:
region (str): Region name to query the VPC.
"""
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(region)
client = get_client(region)
region_vpc = get_vpc(client, vpc_region_tag)
if region_vpc is None:
raise YBOpsRuntimeError("VPC not setup.")
zones = get_zones(region)
# Remove the yugabyte SG first.
sg_group_name = get_yb_sg_name(region)
cleanup_security_group(client=client, group_name=sg_group_name, vpc=region_vpc)
# Cleanup the subnets.
for zone, subnet_id in zones.iteritems():
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
if subnet_id is not None:
client.Subnet(subnet_id).delete()
# Remove the IGW.
igw_tag = IGW_PREFIX_FORMAT.format(region)
cleanup_igw(client=client, tag_name=igw_tag)
# Remove this region's CIDR from the RT of the host vpc.
host_vpc = None
if host_vpc_id is not None and host_vpc_region is not None:
host_vpc = get_client(host_vpc_region).Vpc(host_vpc_id)
for rt in list(host_vpc.route_tables.all()):
delete_route(rt, region_vpc.cidr_block)
# Remove all of the VPC peerings of this vpc.
cleanup_vpc_peering(client=client, vpc=region_vpc, host_vpc=None)
# Delete the VPC itself.
region_vpc.delete()
# Finally cleanup the Routing Table.
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(region)
cleanup_route_table(client=client, tag_name=route_table_tag)
return {"success": "VPC deleted."}
def tag_resource_name(client, resource_id, tag_name):
"""Method to create name tag for given resource.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_name (str): Tag name.
"""
tag_resource(client, resource_id, "Name", tag_name)
def tag_resource(client, resource_id, tag_key, tag_value):
"""Method to attach arbitrary key-value tags to resources.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_key: Tag key
tag_value: Tag value
"""
tags = [{"Key": tag_key, "Value": tag_value}]
client.create_tags(Resources=[resource_id], Tags=tags)
def get_filters(key, value):
return [{'Name': key, 'Values': [value]}]
def get_tag_filter(tag_name):
return get_filters("tag:Name", tag_name)
def get_vpc_peerings(vpc, host_vpc, **kwargs):
"""Method to fetch all the VPC peerings against given VPC. If host_vpc is provided
it will check if there is a peering against that vpc.
Args:
vpc(VPC object): VPC Object to search for peerings
host_vpc (Host VPC object): Can be Null as well, to check if specific host_vpc
peering is done.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
output = []
# Search through accepted vpc peerings.
vpc_peerings = vpc.accepted_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.requester_vpc == host_vpc)])
# Also search through requested vpc peerings.
vpc_peerings = vpc.requested_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.accepter_vpc == host_vpc)])
return output
@get_and_cleanup(get_vpc_peerings)
def cleanup_vpc_peering(vpc_peerings, **kwargs):
for vpc_peering in vpc_peerings:
vpc_peering.delete()
@get_or_create(get_vpc_peerings)
def create_vpc_peering(client, vpc, host_vpc, target_region):
"""Method would create a vpc peering between the newly created VPC and caller's VPC
Also makes sure, if they aren't the same, then there is no need for vpc peering.
Args:
client (boto client): Region specific boto client
vpc (VPC object): Newly created VPC object
host_vpc(Host VPC object): Host VPC to peer with.
target_region (region name): Region name in which peering is being created.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
try:
peer_conn = client.create_vpc_peering_connection(
VpcId=host_vpc.id, PeerVpcId=vpc.id, PeerRegion=target_region)
peer_conn.wait_until_exists()
# Need to accept from the other end.
remote_peer_conn = get_client(target_region).VpcPeeringConnection(peer_conn.id)
remote_peer_conn.wait_until_exists()
remote_peer_conn.accept()
return [peer_conn]
except Exception as e:
logging.error(e)
raise YBOpsRuntimeError("Unable to create VPC peering.")
def get_device_names(instance_type, num_volumes):
device_names = []
for i in xrange(num_volumes):
device_name_format = "nvme{}n1" if is_nvme(instance_type) else "xvd{}"
index = "{}".format(i if is_nvme(instance_type) else chr(ord('b') + i))
device_names.append(device_name_format.format(index))
return device_names
def is_next_gen(instance_type):
return instance_type.startswith(("c3", "c4", "c5", "m4", "r4"))
def is_nvme(instance_type):
return instance_type.startswith("i3")
def has_ephemerals(instance_type):
return not is_nvme(instance_type) and not is_next_gen(instance_type)
def create_instance(args):
client = get_client(args.region)
vars = {
"ImageId": args.machine_image,
"KeyName": args.key_pair_name,
"MinCount": 1,
"MaxCount": 1,
"InstanceType": args.instance_type,
}
# Network setup.
# Lets assume they have provided security group id comma delimited.
sg_ids = args.security_group_id.split(",") if args.security_group_id else None
if sg_ids is None:
# Figure out which VPC this instance will be brought up in and search for the SG in there.
# This is for a bit of backwards compatibility with the previous mode of potentially using
# YW's VPC, in which we would still deploy a SG with the same name as in our normal VPCs.
# This means there could be customers that had that deployment mode from the start AND have
# a SG we created back then, with the internal naming convention we use, but NOT in the YB
# VPC (which they likely will not even have).
vpc = get_vpc_for_subnet(client, args.cloud_subnet)
sg_name = get_yb_sg_name(args.region)
sg = get_security_group(client, sg_name, vpc)
sg_ids = [sg.id]
vars["NetworkInterfaces"] = [{
"DeviceIndex": 0,
"AssociatePublicIpAddress": args.assign_public_ip,
"SubnetId": args.cloud_subnet,
"Groups": sg_ids
}]
# Volume setup.
volumes = []
ebs = {
"DeleteOnTermination": True,
# TODO: constant
"VolumeSize": 40,
"VolumeType": "gp2"
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
volumes.append({
"DeviceName": "/dev/sda1",
"Ebs": ebs
})
device_names = get_device_names(args.instance_type, args.num_volumes)
# TODO: Clean up semantics on nvme vs "next-gen" vs ephemerals, as this is currently whack...
for i, device_name in enumerate(device_names):
volume = {}
if has_ephemerals(args.instance_type):
volume = {
"DeviceName": "/dev/{}".format(device_name),
"VirtualName": "ephemeral{}".format(i)
}
elif is_next_gen(args.instance_type):
ebs = {
"DeleteOnTermination": True,
"VolumeType": args.volume_type,
# TODO: make this int.
"VolumeSize": args.volume_size
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
if args.volume_type == "io1":
# TODO: make this int.
ebs["Iops"] = args.disk_iops
volume = {
"DeviceName": "/dev/{}".format(device_name),
"Ebs": ebs
}
volumes.append(volume)
vars["BlockDeviceMappings"] = volumes
# Tag setup.
def __create_tag(k, v):
return {"Key": k, "Value": v}
# Add Name all the time.
instance_tags = [
__create_tag("Name", args.search_pattern),
__create_tag("launched-by", os.environ.get("USER", "unknown")),
__create_tag("yb-server-type", args.type)
]
custom_tags = args.instance_tags if args.instance_tags is not None else '{}'
for k, v in json.loads(custom_tags).iteritems():
instance_tags.append(__create_tag(k, v))
vars["TagSpecifications"] = [{
"ResourceType": "instance",
"Tags": instance_tags
}]
# TODO: user_data > templates/cloud_init.yml.j2, still needed?
instance_ids = client.create_instances(**vars)
if len(instance_ids) != 1:
logging.error("Invalid create_instances response: {}".format(instance_ids))
raise YBOpsRuntimeError("Expected to create 1 instance, got {}".format(
len(instance_ids)))
instance = instance_ids[0]
instance.wait_until_running()
def modify_tags(region, instance_id, tags_to_set_str, tags_to_remove_str):
instance = get_client(region).Instance(instance_id)
# Remove all the tags we were asked to, except the internal ones.
tags_to_remove = set(tags_to_remove_str.split(",") if tags_to_remove_str else [])
# TODO: combine these with the above instance creation function.
internal_tags = set(["Name", "launched-by", "yb-server-type"])
if tags_to_remove & internal_tags:
raise YBOpsRuntimeError(
"Was asked to remove tags: {}, which contain internal tags: {}".format(
tags_to_remove, internal_tags
))
# Note: passing an empty list to Tags will remove all tags from the instance.
if tags_to_remove:
instance.delete_tags(Tags=[{"Key": k} for k in tags_to_remove])
# Set all the tags provided.
tags_to_set = json.loads(tags_to_set_str if tags_to_set_str else "{}")
customer_tags = []
for k, v in tags_to_set.iteritems():
customer_tags.append({"Key": k, "Value": v})
instance.create_tags(Tags=customer_tags)
def delete_route(rt, cidr):
route = get_route_by_cidr(rt, cidr)
if route is not None:
route.delete()
def get_vpc_for_subnet(client, subnet):
return client.Subnet(subnet).vpc
def get_yb_sg_name(region):
return SG_YUGABYTE_PREFIX_FORMAT.format(region)
def list_dns_record_set(hosted_zone_id):
return boto3.client('route53').get_hosted_zone(Id=hosted_zone_id)
def create_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'CREATE')
def edit_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'UPSERT')
def delete_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'DELETE')
def _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, action):
client = boto3.client('route53')
records = []
for ip in ip_list:
records.append({'Value': ip})
result = list_dns_record_set(hosted_zone_id)
hosted_zone_name = result['HostedZone']['Name']
change_batch = {
'Comment': "YugaWare driven Record Set",
'Changes': [{
'Action': action,
'ResourceRecordSet': {
'Name': "{}.{}".format(domain_name_prefix, hosted_zone_name),
'Type': 'A',
'TTL': 5,
'ResourceRecords': records
}
}]
}
result = client.change_resource_record_sets(
HostedZoneId=hosted_zone_id,
ChangeBatch=change_batch)
client.get_waiter('resource_record_sets_changed').wait(
Id=result['ChangeInfo']['Id'],
WaiterConfig={
'Delay': 10,
'MaxAttempts': 60
})
|
@get_or_create(get_route_table)
def create_route_table(client, tag_name, vpc):
"""Method to create route table based on tag_name in given VPC. It will first
query for the tag name to see if the route table already exists or if one is already
attached to the VPC, if so it will return that route table.
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name
vpc (vpc object): VPC object to create the route table against
Returns:
RouteTable (obj): newly created RouteTable object.
"""
# Check to see if there is a route table attached to VPC, if so, we can just tag it
existing_route_table = next(iter(vpc.route_tables.all()), None)
if existing_route_table is not None:
tag_resource_name(client, existing_route_table.id, tag_name)
return existing_route_table
# If no route table exists, we can create one and tag it.
route_table = vpc.create_route_table()
tag_resource_name(client, route_table.id, tag_name)
return route_table
| 524 | 545 |
#!/usr/bin/env python
#
# Copyright 2019 YugaByte, Inc. and Contributors
#
# Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt
import boto3
import json
import logging
import os
import re
from ipaddr import IPNetwork
from ybops.utils import get_or_create, get_and_cleanup
from ybops.common.exceptions import YBOpsRuntimeError
from ybops.cloud.common.utils import request_retry_decorator
RESOURCE_PREFIX_FORMAT = "yb-{}"
IGW_CIDR = "0.0.0.0/0"
SUBNET_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT
IGW_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-igw"
ROUTE_TABLE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-rt"
SG_YUGABYTE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-sg"
PEER_CONN_FORMAT = "yb-peer-conn-{}-to-{}"
class AwsBootstrapRegion():
def __init__(self, region, metadata, region_cidrs):
self.region = region
self.metadata = metadata
self.region_cidrs = region_cidrs
self.client = get_client(self.region)
# Outputs.
self.vpc = None
self.igw = None
self.peer_vpc = None
self.sg_yugabyte = None
self.subnets = []
self.route_table = None
def bootstrap(self):
self.setup_vpc()
self.setup_igw()
self.setup_subnets()
self.setup_yugabyte_sg()
self.setup_rt()
def setup_vpc(self):
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(self.region)
vpc = create_vpc(client=self.client, tag_name=vpc_region_tag,
cidr=get_region_cidr(self.metadata, self.region))
vpc.wait_until_available()
self.vpc = vpc
def setup_igw(self):
igw_tag = IGW_PREFIX_FORMAT.format(self.region)
igw = create_igw(client=self.client, tag_name=igw_tag, vpc=self.vpc)
self.igw = igw
def setup_subnets(self):
zones = get_zones(self.region)
subnets = {}
for zone_index, zone in enumerate(sorted(zones.keys())):
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
zone_cidr = self.metadata["zone_cidr_format"].format(
get_cidr_prefix(self.metadata, self.region), (zone_index + 1) * 16)
subnet = create_subnet(self.client, self.vpc, zone, zone_cidr, vpc_zone_tag)
subnets[zone] = subnet
self.subnets = subnets
def setup_yugabyte_sg(self):
sg_group_name = get_yb_sg_name(self.region)
rules = list(self.metadata["sg_rules"])
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
sg = create_security_group(client=self.client, group_name=sg_group_name,
description="YugaByte SG", vpc=self.vpc,
rules=rules)
self.sg_yugabyte = sg
def setup_rt(self):
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(self.region)
route_table = create_route_table(client=self.client, tag_name=route_table_tag,
vpc=self.vpc)
# TODO: handle private/public case at somepoint, also NAT.
add_route_to_rt(route_table, IGW_CIDR, "GatewayId", self.igw.id)
current_associated_subnet_ids = [assoc.subnet_id for assoc in route_table.associations]
missing_ids = [subnet.id for subnet in self.subnets.values()
if subnet.id not in current_associated_subnet_ids]
for subnet_id in missing_ids:
route_table.associate_with_subnet(SubnetId=subnet_id)
self.route_table = route_table
def add_sg_ingress_to_sg(self, incoming_sg, target_sg):
current_sg_ids = set([pair["GroupId"]
for perm in target_sg.ip_permissions
for pair in perm["UserIdGroupPairs"]])
if incoming_sg.id not in current_sg_ids:
target_sg.authorize_ingress(
IpPermissions=[{
"IpProtocol": "-1",
"UserIdGroupPairs": [{"GroupId": incoming_sg.id}]}])
def add_route_to_rt(route_table, cidr, target_type, target_id):
kwargs = {target_type: target_id}
route = get_route_by_cidr(route_table, cidr)
if route is None:
route_table.create_route(DestinationCidrBlock=cidr, **kwargs)
elif getattr(route, dumb_camel_to_snake(target_type)) != target_id:
route.replace(**kwargs)
def add_cidr_to_rules(rules, cidr):
rule_block = {
"ip_protocol": "-1",
"from_port": 0,
"to_port": 65535,
"cidr_ip": cidr
}
rules.append(rule_block)
def get_cidr_prefix(metadata, region):
return metadata["regions"][region]["cidr_prefix"]
def get_region_cidr(metadata, region):
return metadata["region_cidr_format"].format(get_cidr_prefix(metadata, region))
def get_region_cidrs(metadata):
return dict([(r, get_region_cidr(metadata, r)) for r in metadata["regions"].keys()])
def dumb_camel_to_snake(s):
return re.sub("([A-Z])", "_\\1", s).lower()[1:]
class YbVpcComponents:
def __init__(self):
self.region = None
self.vpc = None
self.sg_yugabyte = None
self.customer_sgs = None
self.route_table = None
self.subnets = None
@staticmethod
def from_pieces(region, vpc_id, sg_id, rt_id, az_to_subnet_ids):
c = YbVpcComponents()
c.region = region
client = get_client(region)
c.vpc = client.Vpc(vpc_id)
c.sg_yugabyte = client.SecurityGroup(sg_id)
c.route_table = client.RouteTable(rt_id)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
@staticmethod
def from_user_json(region, per_region_meta):
c = YbVpcComponents()
c.region = region
client = get_client(region)
vpc_id = per_region_meta.get("vpcId")
if vpc_id:
c.vpc = client.Vpc(vpc_id)
else:
c.vpc = get_vpc(client, RESOURCE_PREFIX_FORMAT.format(region))
sg_ids = per_region_meta.get("customSecurityGroupId")
if sg_ids:
c.customer_sgs = [client.SecurityGroup(sg_id) for sg_id in sg_ids.split(",")]
else:
c.sg_yugabyte = get_security_group(
client, SG_YUGABYTE_PREFIX_FORMAT.format(region), c.vpc)
if not vpc_id:
c.route_table = get_route_table(client, ROUTE_TABLE_PREFIX_FORMAT.format(region))
az_to_subnet_ids = {}
if vpc_id:
az_to_subnet_ids = per_region_meta.get("azToSubnetIds", {})
else:
az_to_subnet_ids = get_zones(region)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
def as_json(self):
sgs = self.customer_sgs if self.customer_sgs else [self.sg_yugabyte]
return vpc_components_as_json(self.vpc, sgs, self.subnets)
class AwsBootstrapClient():
def __init__(self, metadata, host_vpc_id, host_vpc_region):
self.metadata = metadata
self.host_vpc_id = host_vpc_id
self.host_vpc_region = host_vpc_region
self.region_cidrs = get_region_cidrs(self.metadata)
# Validation.
self._validate_cidr_overlap()
def _validate_cidr_overlap(self):
region_networks = [IPNetwork(cidr) for cidr in self.region_cidrs.values()]
all_networks = region_networks
for i in xrange(len(all_networks)):
for j in xrange(i + 1, len(all_networks)):
left = all_networks[i]
right = all_networks[j]
if left.overlaps(right):
raise YBOpsRuntimeError(
"IP blocks in the CIDRs overlap: {} - {}".format(left, right))
def bootstrap_individual_region(self, region):
if region is None:
raise YBOpsRuntimeError("Must provider region to bootstrap!")
client = AwsBootstrapRegion(region, self.metadata, self.region_cidrs)
client.bootstrap()
return YbVpcComponents.from_pieces(
region, client.vpc.id, client.sg_yugabyte.id, client.route_table.id,
{az: s.id for az, s in client.subnets.iteritems()})
def cross_link_regions(self, components):
# Do the cross linking, adding CIDR entries to RTs and SGs, as well as doing vpc peerings.
region_and_vpc_tuples = [(r, c.vpc) for r, c in components.iteritems()]
host_vpc = None
if self.host_vpc_id and self.host_vpc_region:
host_vpc = get_client(self.host_vpc_region).Vpc(self.host_vpc_id)
region_and_vpc_tuples.append((self.host_vpc_region, host_vpc))
# Setup VPC peerings.
for i in xrange(len(region_and_vpc_tuples) - 1):
i_region, i_vpc = region_and_vpc_tuples[i]
for j in xrange(i + 1, len(region_and_vpc_tuples)):
j_region, j_vpc = region_and_vpc_tuples[j]
peering = create_vpc_peering(
# i is the host, j is the target.
client=get_client(i_region), vpc=j_vpc, host_vpc=i_vpc, target_region=j_region)
if len(peering) != 1:
raise YBOpsRuntimeError(
"Expecting one peering connection, got {}".format(peer_conn))
peering = peering[0]
# Add route i -> j.
add_route_to_rt(components[i_region].route_table, j_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
# Add route j -> i.
# Note: If we have a host_vpc, it is the last in the list, and it doesn't have an
# associated component, so we special case it.
if host_vpc is None or j != len(region_and_vpc_tuples) - 1:
add_route_to_rt(components[j_region].route_table, i_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
else:
# TODO: should ideally filter to the RT that is relevant, but we do not really
# know the subnets which matter from this host_vpc...
for rt in list(host_vpc.route_tables.all()):
add_route_to_rt(rt, i_vpc.cidr_block, "VpcPeeringConnectionId", peering.id)
# Setup SG entries for all the CIDRs.
all_cidrs = [vpc.cidr_block for r, vpc in region_and_vpc_tuples]
rules = []
# Add CIDRs from all the VPCs, including the host.
for cidr in all_cidrs:
add_cidr_to_rules(rules, cidr)
# Add CIDRs from any custom networks we have internally, primarily the OpenVPN in AWS.
# TODO(bogdan): custom CIDR entries
for cidr in self.metadata.get("custom_network_whitelisted_ip_cidrs", []):
add_cidr_to_rules(rules, cidr)
for region, component in components.iteritems():
sg = component.sg_yugabyte
ip_perms = sg.ip_permissions
for rule in rules:
found = False
for perm in ip_perms:
if perm.get("FromPort") == rule["from_port"] and \
perm.get("ToPort") == rule["to_port"] and \
perm.get("IpProtocol") == rule["ip_protocol"] and \
len([True for r in perm.get("IpRanges", [])
if r.get("CidrIp") == rule["cidr_ip"]]) > 0:
# This rule matches this permission, so no need to add it.
found = True
break
if not found:
try:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
if "InvalidPermission.Duplicate" not in str(e):
raise YBOpsRuntimeError(
"Authorize Security Group Ingress failed: {}".format(repr(e)))
def aws_exception_handler(e):
"""AWS specific exception handler.
Args:
e: the exception that was raised by the underlying API call that just failed.
Returns:
True if this exception can be retried, False otherwise.
"""
return "Request limit exceeded" in str(e)
def aws_request_limit_retry(fn):
"""A decorator for retrying an AWS operation after exceeding request limit. Does retries with
randomized jitter. Ideally, we should reconfigure boto3 to do the right kind of retries
internally, but as of May 2017 there does not seem to be a good way of doing that.
Initially not adding this decorator to all functions in this module. This should be done
gradually as we encounter rate limiting errors.
Relevant boto issues:
https://github.com/boto/boto3/issues/770
https://github.com/boto/botocore/issues/882
"""
return request_retry_decorator(fn, aws_exception_handler)
def get_client(region):
"""Method to get boto3 ec2 resource for given region
Args:
region (str): Region name
Returns:
boto3 resource
"""
return boto3.resource("ec2", region_name=region)
def get_clients(regions):
"""Method to get boto3 clients for given region or all the regions if none specified.
Args:
regions (list): List of regions to return clients for
Returns:
clients(obj): Map of region to boto3 resource
"""
return {region: get_client(region) for region in regions}
def get_available_regions(metadata):
return metadata["regions"].keys()
def get_spot_pricing(region, zone, instance_type):
client = boto3.client('ec2', region_name=region)
prod_desc = ['Linux/UNIX (Amazon VPC)']
spot_price = client.describe_spot_price_history(InstanceTypes=[instance_type],
MaxResults=1,
ProductDescriptions=prod_desc,
AvailabilityZone=zone)
if len(spot_price['SpotPriceHistory']) == 0:
raise YBOpsRuntimeError('Invalid instance type {} for zone {}'.format(instance_type, zone))
return spot_price['SpotPriceHistory'][0]['SpotPrice']
def get_zones(region, dest_vpc_id=None):
"""Method to fetch zones for given region or all the regions if none specified.
Args:
region (str): Name of region to get zones of.
Returns:
zones (obj): Map of zone -> subnet
"""
result = {}
filters = get_filters("state", "available")
client = boto3.client("ec2", region_name=region)
zones = client.describe_availability_zones(Filters=filters).get("AvailabilityZones", [])
new_client = get_client(region)
zone_mapping = {}
for z in zones:
zone_name = z["ZoneName"]
zone_tag = SUBNET_PREFIX_FORMAT.format(zone_name)
region_vpc = None
if dest_vpc_id:
region_vpc = new_client.Vpc(dest_vpc_id)
else:
region_vpc = get_vpc(new_client, RESOURCE_PREFIX_FORMAT.format(region))
subnet = next(iter(fetch_subnets(region_vpc, zone_tag)), None)
if subnet is None:
subnet = next(iter([s for s in region_vpc.subnets.all()
if s.availability_zone == zone_name]), None)
zone_mapping[zone_name] = subnet.id if subnet is not None else None
return zone_mapping
def get_vpc(client, tag_name, **kwargs):
"""Method to fetch vpc based on the tag_name.
Args:
client (boto client): Boto Client for the region to query.
tag_name (str): VPC tag name.
Returns:
VPC obj: VPC object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.vpcs.filter(Filters=filters)), None)
def fetch_subnets(vpc, tag_name):
"""Method to fetch subnets based on the tag_name.
Args:
vpc (vpc obj): VPC object to search for subnets
tag_name (str): subnet tag name.
Returns:
subnets (list): list of aws subnets for given vpc.
"""
filters = get_tag_filter(tag_name)
return vpc.subnets.filter(Filters=filters)
def create_subnet(client, vpc, zone, cidr, tag_name):
"""Method to create subnet based on cidr and tag name.
Args:
client (boto client): Region specific boto client
vpc (VPC object): VPC object to create subnet.
zone (str): Availability zone name
cidr (str): CIDR string
tag_name (str): Tag name for subnet.
Returns:
subnet: Newly created subnet object.
"""
subnet = next((s for s in fetch_subnets(vpc, tag_name) if s.cidr_block == cidr), None)
if subnet is None:
subnet = vpc.create_subnet(CidrBlock=cidr, AvailabilityZone=zone)
# TODO: no direct waiter on subnet just yet, it seems...
client.meta.client.get_waiter("subnet_available").wait(SubnetIds=[subnet.id])
tag_resource_name(client, subnet.id, tag_name)
return subnet
def get_security_group(client, group_name, vpc, **kwargs):
"""Method to fetch security group based on the group_name.
Args:
client (boto client): Region specific boto client
group_name (str): Security Group name
vpc (VPC object): The VPC in which to check for the SG
Returns:
SecurityGroup: Matching security group.
"""
filters = get_filters("group-name", group_name) + get_filters("vpc-id", vpc.id)
return next(iter(client.security_groups.filter(Filters=filters)), None)
@get_or_create(get_security_group)
def create_security_group(client, group_name, vpc, description, rules):
"""Method to create a security group based on the group_name and authorize ingress with
the rules provided.
Args:
client (boto client): Region specific boto client
group_name (str): security group name
description (str): description of the security group
vpc (VPC Object): VPC object to create the security group
rules (dict): List of rules to add to security group.
"""
sg = vpc.create_security_group(GroupName=group_name, Description=description)
try:
for rule in rules:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
logging.error("Authorize Security Group Ingress failed: {}".format(e))
sg.delete()
raise YBOpsRuntimeError("Security Group creation failed.")
return sg
def get_igw(client, tag_name, **kwargs):
"""Method to fetch Internet Gateway based on tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): Internet Gateway tag name.
Returns:
internet_gateway: internet gateway object.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.internet_gateways.filter(Filters=filters)), None)
@get_or_create(get_igw)
def create_igw(client, tag_name, vpc):
"""Method to create Internet Gateway based on tag_name in given VPC. If the gateway
already exists, it would return that object. If the object doesn't have a tag, we
would tag it accordingly.
Args:
client (boto client): Region specific boto client
tag_name (str): Tag name for internet gateway.
vpc (VPC object): VPC object to create Internet Gateway
Returns:
internet gateway: newly internet gateway object.
"""
# Query to make sure the region doesn't have any IGW already attached.
existing_igw = next(iter(vpc.internet_gateways.all()), None)
if existing_igw is not None:
# If we have existing igw for the region, lets just tag it with yb-XX-igw
tag_resource_name(client, existing_igw.id, tag_name)
return existing_igw
# If we don't have a internet gateway, lets create one and attach it to vpc
igw = client.create_internet_gateway()
tag_resource_name(client, igw.id, tag_name)
vpc.attach_internet_gateway(InternetGatewayId=igw.id)
return igw
def get_route_table(client, tag_name, **kwargs):
"""Method to fetch route table based on tag_name
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name to search for.
Returns:
RouteTable (obj): Matching route table object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.route_tables.filter(Filters=filters)), None)
@get_or_create(get_route_table)
def create_route_table(client, tag_name, vpc):
"""Method to create route table based on tag_name in given VPC. It will first
query for the tag name to see if the route table already exists or if one is already
attached to the VPC, if so it will return that route table.
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name
vpc (vpc object): VPC object to create the route table against
Returns:
RouteTable (obj): newly created RouteTable object.
"""
# Check to see if there is a route table attached to VPC, if so, we can just tag it
existing_route_table = next(iter(vpc.route_tables.all()), None)
if existing_route_table is not None:
tag_resource_name(client, existing_route_table.id, tag_name)
return existing_route_table
# If no route table exists, we can create one and tag it.
route_table = vpc.create_route_table()
tag_resource_name(client, route_table.id, tag_name)
return route_table
@get_and_cleanup(get_security_group)
def cleanup_security_group(sg, **kwargs):
"""Method to cleanup security group for the matching group_name.
Args:
sg: Instance of security group matching the group_name.
"""
sg.delete()
@get_and_cleanup(get_igw)
def cleanup_igw(igw, **kwargs):
"""Method to cleanup Internet Gateway matching the tag name. And also remove any vpc
that is attached to the Internet Gateway.
Args:
igw: Instance of Internet Gateway matching tag_name.
"""
for vpc in igw.attachments:
igw.detach_from_vpc(VpcId=vpc['VpcId'])
igw.delete()
@get_and_cleanup(get_route_table)
def cleanup_route_table(rt, **kwargs):
"""Method to cleanup the Route Table matching the tag name.
Args:
rt: Instance of Route Table matching tag_name.
"""
rt.delete()
def get_route_by_cidr(route_table, cidr):
"""Method to check if given CIDR already attached to route table.
Args:
RouteTable (obj): Route Table object.
cidr (str): CIDR string to check in route table.
Returns:
Route: the route for this CIDR or None if not found
"""
return dict((r.destination_cidr_block, r) for r in route_table.routes).get(cidr)
@get_or_create(get_vpc)
def create_vpc(client, tag_name, cidr):
"""Method to create vpc based on the cidr and tag with tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): VPC tag name
cidr (str): CIDR string.
Returns:
VPC(Object): Newly created VPC object.
"""
vpc = client.create_vpc(CidrBlock=cidr)
vpc.modify_attribute(EnableDnsHostnames={'Value': True})
tag_resource_name(client, vpc.id, tag_name)
return vpc
def set_yb_sg_and_fetch_vpc(metadata, region, dest_vpc_id):
"""Method to bootstrap vpc and security group, and enable vpc peering
with the host_instance vpc.
Args:
metadata (obj): Cloud metadata object with cidr prefix and other metadata.
region (str): Region name to create the vpc in.
dest_vpc_id (str): Id of the VPC that yugabyte machines will reside in.
Returns:
vpc_info (json): return vpc, subnet and security group as json.
"""
client = get_client(region)
dest_vpc = client.Vpc(dest_vpc_id)
subnets = {subnet.availability_zone: subnet for subnet in dest_vpc.subnets.all()}
sg_group_name = get_yb_sg_name(region)
rules = metadata["sg_rules"]
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
add_cidr_to_rules(rules, dest_vpc.cidr_block)
sgs = [create_security_group(client=client, group_name=sg_group_name, vpc=dest_vpc,
description="YugaByte SG", rules=rules)]
return vpc_components_as_json(dest_vpc, sgs, subnets)
def query_vpc(region):
"""Method to query VPC against given region and respective subnets.
Args:
region (str): Region name to query the VPC.
Returns:
vpc and subnet info (obj): Object with region and zone subnet id.
"""
per_vpc_info = {}
# Fetch all available AZs, as we want to group subnets by AZ.
raw_client = boto3.client("ec2", region_name=region)
zones = [z["ZoneName"]
for z in raw_client.describe_availability_zones(
Filters=get_filters("state", "available")).get("AvailabilityZones", [])]
# Default to empty lists, in case some zones do not have subnets, so we can use this as a query
# for all available AZs in this region.
subnets_by_zone = {z: [] for z in zones}
# Fetch SGs and group them by VPC ID.
client = get_client(region)
per_vpc_sgs = {}
sgs = client.security_groups.all()
for sg in sgs:
sg_list = per_vpc_sgs.setdefault(sg.vpc_id, [])
sg_list.append({
"sg_id": sg.group_id,
# Note: Name tag is not mandatory or always present but group_name is!
"sg_name": sg.group_name
})
# Fetch all available VPCs so we can group by VPC ID.
region_vpcs = client.vpcs.all()
for vpc in region_vpcs:
# Filter for available subnets and group by AZ.
subnets = vpc.subnets.filter(Filters=get_filters("state", "available"))
for s in subnets:
subnets_for_this_az = subnets_by_zone.setdefault(s.availability_zone, [])
subnets_for_this_az.append({
"subnet_id": s.subnet_id,
"name": _get_name_from_tags(s.tags),
"public": s.map_public_ip_on_launch
})
vpc_info = {
"subnets_by_zone": subnets_by_zone,
# In case we somehow did not find any SGs, default to empty list.
"security_groups": per_vpc_sgs.get(vpc.id, [])
}
per_vpc_info[vpc.id] = vpc_info
region_json = {
"per_vpc_info": per_vpc_info
}
return region_json
def _get_name_from_tags(tags):
for t in tags if tags else []:
if t.get("Key") == "Name":
return t.get("Value", None)
return None
def vpc_components_as_json(vpc, sgs, subnets):
"""Method takes VPC, Security Group and Subnets and returns a json data format with ids.
Args:
vpc (VPC Object): Region specific VPC object
sgs (List of Security Group Object): Region specific Security Group object
subnets (subnet object map): Map of Subnet objects keyed of zone.
Retuns:
json (str): A Json string for yugaware to consume with necessary ids.
"""
result = {}
result["vpc_id"] = vpc.id
result["security_group"] = [{"id": sg.group_id, "name": sg.group_name} for sg in sgs]
result["zones"] = {}
for zone, subnet in subnets.iteritems():
result["zones"][zone] = subnet.id
return result
def delete_vpc(region, host_vpc_id=None, host_vpc_region=None):
"""Method to delete VPC, Subnet, Internet Gateway, Route Table and VPC peering.
Args:
region (str): Region name to query the VPC.
"""
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(region)
client = get_client(region)
region_vpc = get_vpc(client, vpc_region_tag)
if region_vpc is None:
raise YBOpsRuntimeError("VPC not setup.")
zones = get_zones(region)
# Remove the yugabyte SG first.
sg_group_name = get_yb_sg_name(region)
cleanup_security_group(client=client, group_name=sg_group_name, vpc=region_vpc)
# Cleanup the subnets.
for zone, subnet_id in zones.iteritems():
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
if subnet_id is not None:
client.Subnet(subnet_id).delete()
# Remove the IGW.
igw_tag = IGW_PREFIX_FORMAT.format(region)
cleanup_igw(client=client, tag_name=igw_tag)
# Remove this region's CIDR from the RT of the host vpc.
host_vpc = None
if host_vpc_id is not None and host_vpc_region is not None:
host_vpc = get_client(host_vpc_region).Vpc(host_vpc_id)
for rt in list(host_vpc.route_tables.all()):
delete_route(rt, region_vpc.cidr_block)
# Remove all of the VPC peerings of this vpc.
cleanup_vpc_peering(client=client, vpc=region_vpc, host_vpc=None)
# Delete the VPC itself.
region_vpc.delete()
# Finally cleanup the Routing Table.
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(region)
cleanup_route_table(client=client, tag_name=route_table_tag)
return {"success": "VPC deleted."}
def tag_resource_name(client, resource_id, tag_name):
"""Method to create name tag for given resource.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_name (str): Tag name.
"""
tag_resource(client, resource_id, "Name", tag_name)
def tag_resource(client, resource_id, tag_key, tag_value):
"""Method to attach arbitrary key-value tags to resources.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_key: Tag key
tag_value: Tag value
"""
tags = [{"Key": tag_key, "Value": tag_value}]
client.create_tags(Resources=[resource_id], Tags=tags)
def get_filters(key, value):
return [{'Name': key, 'Values': [value]}]
def get_tag_filter(tag_name):
return get_filters("tag:Name", tag_name)
def get_vpc_peerings(vpc, host_vpc, **kwargs):
"""Method to fetch all the VPC peerings against given VPC. If host_vpc is provided
it will check if there is a peering against that vpc.
Args:
vpc(VPC object): VPC Object to search for peerings
host_vpc (Host VPC object): Can be Null as well, to check if specific host_vpc
peering is done.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
output = []
# Search through accepted vpc peerings.
vpc_peerings = vpc.accepted_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.requester_vpc == host_vpc)])
# Also search through requested vpc peerings.
vpc_peerings = vpc.requested_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.accepter_vpc == host_vpc)])
return output
@get_and_cleanup(get_vpc_peerings)
def cleanup_vpc_peering(vpc_peerings, **kwargs):
for vpc_peering in vpc_peerings:
vpc_peering.delete()
@get_or_create(get_vpc_peerings)
def create_vpc_peering(client, vpc, host_vpc, target_region):
"""Method would create a vpc peering between the newly created VPC and caller's VPC
Also makes sure, if they aren't the same, then there is no need for vpc peering.
Args:
client (boto client): Region specific boto client
vpc (VPC object): Newly created VPC object
host_vpc(Host VPC object): Host VPC to peer with.
target_region (region name): Region name in which peering is being created.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
try:
peer_conn = client.create_vpc_peering_connection(
VpcId=host_vpc.id, PeerVpcId=vpc.id, PeerRegion=target_region)
peer_conn.wait_until_exists()
# Need to accept from the other end.
remote_peer_conn = get_client(target_region).VpcPeeringConnection(peer_conn.id)
remote_peer_conn.wait_until_exists()
remote_peer_conn.accept()
return [peer_conn]
except Exception as e:
logging.error(e)
raise YBOpsRuntimeError("Unable to create VPC peering.")
def get_device_names(instance_type, num_volumes):
device_names = []
for i in xrange(num_volumes):
device_name_format = "nvme{}n1" if is_nvme(instance_type) else "xvd{}"
index = "{}".format(i if is_nvme(instance_type) else chr(ord('b') + i))
device_names.append(device_name_format.format(index))
return device_names
def is_next_gen(instance_type):
return instance_type.startswith(("c3", "c4", "c5", "m4", "r4"))
def is_nvme(instance_type):
return instance_type.startswith("i3")
def has_ephemerals(instance_type):
return not is_nvme(instance_type) and not is_next_gen(instance_type)
def create_instance(args):
client = get_client(args.region)
vars = {
"ImageId": args.machine_image,
"KeyName": args.key_pair_name,
"MinCount": 1,
"MaxCount": 1,
"InstanceType": args.instance_type,
}
# Network setup.
# Lets assume they have provided security group id comma delimited.
sg_ids = args.security_group_id.split(",") if args.security_group_id else None
if sg_ids is None:
# Figure out which VPC this instance will be brought up in and search for the SG in there.
# This is for a bit of backwards compatibility with the previous mode of potentially using
# YW's VPC, in which we would still deploy a SG with the same name as in our normal VPCs.
# This means there could be customers that had that deployment mode from the start AND have
# a SG we created back then, with the internal naming convention we use, but NOT in the YB
# VPC (which they likely will not even have).
vpc = get_vpc_for_subnet(client, args.cloud_subnet)
sg_name = get_yb_sg_name(args.region)
sg = get_security_group(client, sg_name, vpc)
sg_ids = [sg.id]
vars["NetworkInterfaces"] = [{
"DeviceIndex": 0,
"AssociatePublicIpAddress": args.assign_public_ip,
"SubnetId": args.cloud_subnet,
"Groups": sg_ids
}]
# Volume setup.
volumes = []
ebs = {
"DeleteOnTermination": True,
# TODO: constant
"VolumeSize": 40,
"VolumeType": "gp2"
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
volumes.append({
"DeviceName": "/dev/sda1",
"Ebs": ebs
})
device_names = get_device_names(args.instance_type, args.num_volumes)
# TODO: Clean up semantics on nvme vs "next-gen" vs ephemerals, as this is currently whack...
for i, device_name in enumerate(device_names):
volume = {}
if has_ephemerals(args.instance_type):
volume = {
"DeviceName": "/dev/{}".format(device_name),
"VirtualName": "ephemeral{}".format(i)
}
elif is_next_gen(args.instance_type):
ebs = {
"DeleteOnTermination": True,
"VolumeType": args.volume_type,
# TODO: make this int.
"VolumeSize": args.volume_size
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
if args.volume_type == "io1":
# TODO: make this int.
ebs["Iops"] = args.disk_iops
volume = {
"DeviceName": "/dev/{}".format(device_name),
"Ebs": ebs
}
volumes.append(volume)
vars["BlockDeviceMappings"] = volumes
# Tag setup.
def __create_tag(k, v):
return {"Key": k, "Value": v}
# Add Name all the time.
instance_tags = [
__create_tag("Name", args.search_pattern),
__create_tag("launched-by", os.environ.get("USER", "unknown")),
__create_tag("yb-server-type", args.type)
]
custom_tags = args.instance_tags if args.instance_tags is not None else '{}'
for k, v in json.loads(custom_tags).iteritems():
instance_tags.append(__create_tag(k, v))
vars["TagSpecifications"] = [{
"ResourceType": "instance",
"Tags": instance_tags
}]
# TODO: user_data > templates/cloud_init.yml.j2, still needed?
instance_ids = client.create_instances(**vars)
if len(instance_ids) != 1:
logging.error("Invalid create_instances response: {}".format(instance_ids))
raise YBOpsRuntimeError("Expected to create 1 instance, got {}".format(
len(instance_ids)))
instance = instance_ids[0]
instance.wait_until_running()
def modify_tags(region, instance_id, tags_to_set_str, tags_to_remove_str):
instance = get_client(region).Instance(instance_id)
# Remove all the tags we were asked to, except the internal ones.
tags_to_remove = set(tags_to_remove_str.split(",") if tags_to_remove_str else [])
# TODO: combine these with the above instance creation function.
internal_tags = set(["Name", "launched-by", "yb-server-type"])
if tags_to_remove & internal_tags:
raise YBOpsRuntimeError(
"Was asked to remove tags: {}, which contain internal tags: {}".format(
tags_to_remove, internal_tags
))
# Note: passing an empty list to Tags will remove all tags from the instance.
if tags_to_remove:
instance.delete_tags(Tags=[{"Key": k} for k in tags_to_remove])
# Set all the tags provided.
tags_to_set = json.loads(tags_to_set_str if tags_to_set_str else "{}")
customer_tags = []
for k, v in tags_to_set.iteritems():
customer_tags.append({"Key": k, "Value": v})
instance.create_tags(Tags=customer_tags)
def delete_route(rt, cidr):
route = get_route_by_cidr(rt, cidr)
if route is not None:
route.delete()
def get_vpc_for_subnet(client, subnet):
return client.Subnet(subnet).vpc
def get_yb_sg_name(region):
return SG_YUGABYTE_PREFIX_FORMAT.format(region)
def list_dns_record_set(hosted_zone_id):
return boto3.client('route53').get_hosted_zone(Id=hosted_zone_id)
def create_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'CREATE')
def edit_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'UPSERT')
def delete_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'DELETE')
def _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, action):
client = boto3.client('route53')
records = []
for ip in ip_list:
records.append({'Value': ip})
result = list_dns_record_set(hosted_zone_id)
hosted_zone_name = result['HostedZone']['Name']
change_batch = {
'Comment': "YugaWare driven Record Set",
'Changes': [{
'Action': action,
'ResourceRecordSet': {
'Name': "{}.{}".format(domain_name_prefix, hosted_zone_name),
'Type': 'A',
'TTL': 5,
'ResourceRecords': records
}
}]
}
result = client.change_resource_record_sets(
HostedZoneId=hosted_zone_id,
ChangeBatch=change_batch)
client.get_waiter('resource_record_sets_changed').wait(
Id=result['ChangeInfo']['Id'],
WaiterConfig={
'Delay': 10,
'MaxAttempts': 60
})
|
cleanup_igw
|
Method to cleanup Internet Gateway matching the tag name. And also remove any vpc
that is attached to the Internet Gateway.
Args:
igw: Instance of Internet Gateway matching tag_name.
|
#!/usr/bin/env python
#
# Copyright 2019 YugaByte, Inc. and Contributors
#
# Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt
import boto3
import json
import logging
import os
import re
from ipaddr import IPNetwork
from ybops.utils import get_or_create, get_and_cleanup
from ybops.common.exceptions import YBOpsRuntimeError
from ybops.cloud.common.utils import request_retry_decorator
RESOURCE_PREFIX_FORMAT = "yb-{}"
IGW_CIDR = "0.0.0.0/0"
SUBNET_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT
IGW_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-igw"
ROUTE_TABLE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-rt"
SG_YUGABYTE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-sg"
PEER_CONN_FORMAT = "yb-peer-conn-{}-to-{}"
class AwsBootstrapRegion():
def __init__(self, region, metadata, region_cidrs):
self.region = region
self.metadata = metadata
self.region_cidrs = region_cidrs
self.client = get_client(self.region)
# Outputs.
self.vpc = None
self.igw = None
self.peer_vpc = None
self.sg_yugabyte = None
self.subnets = []
self.route_table = None
def bootstrap(self):
self.setup_vpc()
self.setup_igw()
self.setup_subnets()
self.setup_yugabyte_sg()
self.setup_rt()
def setup_vpc(self):
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(self.region)
vpc = create_vpc(client=self.client, tag_name=vpc_region_tag,
cidr=get_region_cidr(self.metadata, self.region))
vpc.wait_until_available()
self.vpc = vpc
def setup_igw(self):
igw_tag = IGW_PREFIX_FORMAT.format(self.region)
igw = create_igw(client=self.client, tag_name=igw_tag, vpc=self.vpc)
self.igw = igw
def setup_subnets(self):
zones = get_zones(self.region)
subnets = {}
for zone_index, zone in enumerate(sorted(zones.keys())):
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
zone_cidr = self.metadata["zone_cidr_format"].format(
get_cidr_prefix(self.metadata, self.region), (zone_index + 1) * 16)
subnet = create_subnet(self.client, self.vpc, zone, zone_cidr, vpc_zone_tag)
subnets[zone] = subnet
self.subnets = subnets
def setup_yugabyte_sg(self):
sg_group_name = get_yb_sg_name(self.region)
rules = list(self.metadata["sg_rules"])
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
sg = create_security_group(client=self.client, group_name=sg_group_name,
description="YugaByte SG", vpc=self.vpc,
rules=rules)
self.sg_yugabyte = sg
def setup_rt(self):
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(self.region)
route_table = create_route_table(client=self.client, tag_name=route_table_tag,
vpc=self.vpc)
# TODO: handle private/public case at somepoint, also NAT.
add_route_to_rt(route_table, IGW_CIDR, "GatewayId", self.igw.id)
current_associated_subnet_ids = [assoc.subnet_id for assoc in route_table.associations]
missing_ids = [subnet.id for subnet in self.subnets.values()
if subnet.id not in current_associated_subnet_ids]
for subnet_id in missing_ids:
route_table.associate_with_subnet(SubnetId=subnet_id)
self.route_table = route_table
def add_sg_ingress_to_sg(self, incoming_sg, target_sg):
current_sg_ids = set([pair["GroupId"]
for perm in target_sg.ip_permissions
for pair in perm["UserIdGroupPairs"]])
if incoming_sg.id not in current_sg_ids:
target_sg.authorize_ingress(
IpPermissions=[{
"IpProtocol": "-1",
"UserIdGroupPairs": [{"GroupId": incoming_sg.id}]}])
def add_route_to_rt(route_table, cidr, target_type, target_id):
kwargs = {target_type: target_id}
route = get_route_by_cidr(route_table, cidr)
if route is None:
route_table.create_route(DestinationCidrBlock=cidr, **kwargs)
elif getattr(route, dumb_camel_to_snake(target_type)) != target_id:
route.replace(**kwargs)
def add_cidr_to_rules(rules, cidr):
rule_block = {
"ip_protocol": "-1",
"from_port": 0,
"to_port": 65535,
"cidr_ip": cidr
}
rules.append(rule_block)
def get_cidr_prefix(metadata, region):
return metadata["regions"][region]["cidr_prefix"]
def get_region_cidr(metadata, region):
return metadata["region_cidr_format"].format(get_cidr_prefix(metadata, region))
def get_region_cidrs(metadata):
return dict([(r, get_region_cidr(metadata, r)) for r in metadata["regions"].keys()])
def dumb_camel_to_snake(s):
return re.sub("([A-Z])", "_\\1", s).lower()[1:]
class YbVpcComponents:
def __init__(self):
self.region = None
self.vpc = None
self.sg_yugabyte = None
self.customer_sgs = None
self.route_table = None
self.subnets = None
@staticmethod
def from_pieces(region, vpc_id, sg_id, rt_id, az_to_subnet_ids):
c = YbVpcComponents()
c.region = region
client = get_client(region)
c.vpc = client.Vpc(vpc_id)
c.sg_yugabyte = client.SecurityGroup(sg_id)
c.route_table = client.RouteTable(rt_id)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
@staticmethod
def from_user_json(region, per_region_meta):
c = YbVpcComponents()
c.region = region
client = get_client(region)
vpc_id = per_region_meta.get("vpcId")
if vpc_id:
c.vpc = client.Vpc(vpc_id)
else:
c.vpc = get_vpc(client, RESOURCE_PREFIX_FORMAT.format(region))
sg_ids = per_region_meta.get("customSecurityGroupId")
if sg_ids:
c.customer_sgs = [client.SecurityGroup(sg_id) for sg_id in sg_ids.split(",")]
else:
c.sg_yugabyte = get_security_group(
client, SG_YUGABYTE_PREFIX_FORMAT.format(region), c.vpc)
if not vpc_id:
c.route_table = get_route_table(client, ROUTE_TABLE_PREFIX_FORMAT.format(region))
az_to_subnet_ids = {}
if vpc_id:
az_to_subnet_ids = per_region_meta.get("azToSubnetIds", {})
else:
az_to_subnet_ids = get_zones(region)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
def as_json(self):
sgs = self.customer_sgs if self.customer_sgs else [self.sg_yugabyte]
return vpc_components_as_json(self.vpc, sgs, self.subnets)
class AwsBootstrapClient():
def __init__(self, metadata, host_vpc_id, host_vpc_region):
self.metadata = metadata
self.host_vpc_id = host_vpc_id
self.host_vpc_region = host_vpc_region
self.region_cidrs = get_region_cidrs(self.metadata)
# Validation.
self._validate_cidr_overlap()
def _validate_cidr_overlap(self):
region_networks = [IPNetwork(cidr) for cidr in self.region_cidrs.values()]
all_networks = region_networks
for i in xrange(len(all_networks)):
for j in xrange(i + 1, len(all_networks)):
left = all_networks[i]
right = all_networks[j]
if left.overlaps(right):
raise YBOpsRuntimeError(
"IP blocks in the CIDRs overlap: {} - {}".format(left, right))
def bootstrap_individual_region(self, region):
if region is None:
raise YBOpsRuntimeError("Must provider region to bootstrap!")
client = AwsBootstrapRegion(region, self.metadata, self.region_cidrs)
client.bootstrap()
return YbVpcComponents.from_pieces(
region, client.vpc.id, client.sg_yugabyte.id, client.route_table.id,
{az: s.id for az, s in client.subnets.iteritems()})
def cross_link_regions(self, components):
# Do the cross linking, adding CIDR entries to RTs and SGs, as well as doing vpc peerings.
region_and_vpc_tuples = [(r, c.vpc) for r, c in components.iteritems()]
host_vpc = None
if self.host_vpc_id and self.host_vpc_region:
host_vpc = get_client(self.host_vpc_region).Vpc(self.host_vpc_id)
region_and_vpc_tuples.append((self.host_vpc_region, host_vpc))
# Setup VPC peerings.
for i in xrange(len(region_and_vpc_tuples) - 1):
i_region, i_vpc = region_and_vpc_tuples[i]
for j in xrange(i + 1, len(region_and_vpc_tuples)):
j_region, j_vpc = region_and_vpc_tuples[j]
peering = create_vpc_peering(
# i is the host, j is the target.
client=get_client(i_region), vpc=j_vpc, host_vpc=i_vpc, target_region=j_region)
if len(peering) != 1:
raise YBOpsRuntimeError(
"Expecting one peering connection, got {}".format(peer_conn))
peering = peering[0]
# Add route i -> j.
add_route_to_rt(components[i_region].route_table, j_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
# Add route j -> i.
# Note: If we have a host_vpc, it is the last in the list, and it doesn't have an
# associated component, so we special case it.
if host_vpc is None or j != len(region_and_vpc_tuples) - 1:
add_route_to_rt(components[j_region].route_table, i_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
else:
# TODO: should ideally filter to the RT that is relevant, but we do not really
# know the subnets which matter from this host_vpc...
for rt in list(host_vpc.route_tables.all()):
add_route_to_rt(rt, i_vpc.cidr_block, "VpcPeeringConnectionId", peering.id)
# Setup SG entries for all the CIDRs.
all_cidrs = [vpc.cidr_block for r, vpc in region_and_vpc_tuples]
rules = []
# Add CIDRs from all the VPCs, including the host.
for cidr in all_cidrs:
add_cidr_to_rules(rules, cidr)
# Add CIDRs from any custom networks we have internally, primarily the OpenVPN in AWS.
# TODO(bogdan): custom CIDR entries
for cidr in self.metadata.get("custom_network_whitelisted_ip_cidrs", []):
add_cidr_to_rules(rules, cidr)
for region, component in components.iteritems():
sg = component.sg_yugabyte
ip_perms = sg.ip_permissions
for rule in rules:
found = False
for perm in ip_perms:
if perm.get("FromPort") == rule["from_port"] and \
perm.get("ToPort") == rule["to_port"] and \
perm.get("IpProtocol") == rule["ip_protocol"] and \
len([True for r in perm.get("IpRanges", [])
if r.get("CidrIp") == rule["cidr_ip"]]) > 0:
# This rule matches this permission, so no need to add it.
found = True
break
if not found:
try:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
if "InvalidPermission.Duplicate" not in str(e):
raise YBOpsRuntimeError(
"Authorize Security Group Ingress failed: {}".format(repr(e)))
def aws_exception_handler(e):
"""AWS specific exception handler.
Args:
e: the exception that was raised by the underlying API call that just failed.
Returns:
True if this exception can be retried, False otherwise.
"""
return "Request limit exceeded" in str(e)
def aws_request_limit_retry(fn):
"""A decorator for retrying an AWS operation after exceeding request limit. Does retries with
randomized jitter. Ideally, we should reconfigure boto3 to do the right kind of retries
internally, but as of May 2017 there does not seem to be a good way of doing that.
Initially not adding this decorator to all functions in this module. This should be done
gradually as we encounter rate limiting errors.
Relevant boto issues:
https://github.com/boto/boto3/issues/770
https://github.com/boto/botocore/issues/882
"""
return request_retry_decorator(fn, aws_exception_handler)
def get_client(region):
"""Method to get boto3 ec2 resource for given region
Args:
region (str): Region name
Returns:
boto3 resource
"""
return boto3.resource("ec2", region_name=region)
def get_clients(regions):
"""Method to get boto3 clients for given region or all the regions if none specified.
Args:
regions (list): List of regions to return clients for
Returns:
clients(obj): Map of region to boto3 resource
"""
return {region: get_client(region) for region in regions}
def get_available_regions(metadata):
return metadata["regions"].keys()
def get_spot_pricing(region, zone, instance_type):
client = boto3.client('ec2', region_name=region)
prod_desc = ['Linux/UNIX (Amazon VPC)']
spot_price = client.describe_spot_price_history(InstanceTypes=[instance_type],
MaxResults=1,
ProductDescriptions=prod_desc,
AvailabilityZone=zone)
if len(spot_price['SpotPriceHistory']) == 0:
raise YBOpsRuntimeError('Invalid instance type {} for zone {}'.format(instance_type, zone))
return spot_price['SpotPriceHistory'][0]['SpotPrice']
def get_zones(region, dest_vpc_id=None):
"""Method to fetch zones for given region or all the regions if none specified.
Args:
region (str): Name of region to get zones of.
Returns:
zones (obj): Map of zone -> subnet
"""
result = {}
filters = get_filters("state", "available")
client = boto3.client("ec2", region_name=region)
zones = client.describe_availability_zones(Filters=filters).get("AvailabilityZones", [])
new_client = get_client(region)
zone_mapping = {}
for z in zones:
zone_name = z["ZoneName"]
zone_tag = SUBNET_PREFIX_FORMAT.format(zone_name)
region_vpc = None
if dest_vpc_id:
region_vpc = new_client.Vpc(dest_vpc_id)
else:
region_vpc = get_vpc(new_client, RESOURCE_PREFIX_FORMAT.format(region))
subnet = next(iter(fetch_subnets(region_vpc, zone_tag)), None)
if subnet is None:
subnet = next(iter([s for s in region_vpc.subnets.all()
if s.availability_zone == zone_name]), None)
zone_mapping[zone_name] = subnet.id if subnet is not None else None
return zone_mapping
def get_vpc(client, tag_name, **kwargs):
"""Method to fetch vpc based on the tag_name.
Args:
client (boto client): Boto Client for the region to query.
tag_name (str): VPC tag name.
Returns:
VPC obj: VPC object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.vpcs.filter(Filters=filters)), None)
def fetch_subnets(vpc, tag_name):
"""Method to fetch subnets based on the tag_name.
Args:
vpc (vpc obj): VPC object to search for subnets
tag_name (str): subnet tag name.
Returns:
subnets (list): list of aws subnets for given vpc.
"""
filters = get_tag_filter(tag_name)
return vpc.subnets.filter(Filters=filters)
def create_subnet(client, vpc, zone, cidr, tag_name):
"""Method to create subnet based on cidr and tag name.
Args:
client (boto client): Region specific boto client
vpc (VPC object): VPC object to create subnet.
zone (str): Availability zone name
cidr (str): CIDR string
tag_name (str): Tag name for subnet.
Returns:
subnet: Newly created subnet object.
"""
subnet = next((s for s in fetch_subnets(vpc, tag_name) if s.cidr_block == cidr), None)
if subnet is None:
subnet = vpc.create_subnet(CidrBlock=cidr, AvailabilityZone=zone)
# TODO: no direct waiter on subnet just yet, it seems...
client.meta.client.get_waiter("subnet_available").wait(SubnetIds=[subnet.id])
tag_resource_name(client, subnet.id, tag_name)
return subnet
def get_security_group(client, group_name, vpc, **kwargs):
"""Method to fetch security group based on the group_name.
Args:
client (boto client): Region specific boto client
group_name (str): Security Group name
vpc (VPC object): The VPC in which to check for the SG
Returns:
SecurityGroup: Matching security group.
"""
filters = get_filters("group-name", group_name) + get_filters("vpc-id", vpc.id)
return next(iter(client.security_groups.filter(Filters=filters)), None)
@get_or_create(get_security_group)
def create_security_group(client, group_name, vpc, description, rules):
"""Method to create a security group based on the group_name and authorize ingress with
the rules provided.
Args:
client (boto client): Region specific boto client
group_name (str): security group name
description (str): description of the security group
vpc (VPC Object): VPC object to create the security group
rules (dict): List of rules to add to security group.
"""
sg = vpc.create_security_group(GroupName=group_name, Description=description)
try:
for rule in rules:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
logging.error("Authorize Security Group Ingress failed: {}".format(e))
sg.delete()
raise YBOpsRuntimeError("Security Group creation failed.")
return sg
def get_igw(client, tag_name, **kwargs):
"""Method to fetch Internet Gateway based on tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): Internet Gateway tag name.
Returns:
internet_gateway: internet gateway object.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.internet_gateways.filter(Filters=filters)), None)
@get_or_create(get_igw)
def create_igw(client, tag_name, vpc):
"""Method to create Internet Gateway based on tag_name in given VPC. If the gateway
already exists, it would return that object. If the object doesn't have a tag, we
would tag it accordingly.
Args:
client (boto client): Region specific boto client
tag_name (str): Tag name for internet gateway.
vpc (VPC object): VPC object to create Internet Gateway
Returns:
internet gateway: newly internet gateway object.
"""
# Query to make sure the region doesn't have any IGW already attached.
existing_igw = next(iter(vpc.internet_gateways.all()), None)
if existing_igw is not None:
# If we have existing igw for the region, lets just tag it with yb-XX-igw
tag_resource_name(client, existing_igw.id, tag_name)
return existing_igw
# If we don't have a internet gateway, lets create one and attach it to vpc
igw = client.create_internet_gateway()
tag_resource_name(client, igw.id, tag_name)
vpc.attach_internet_gateway(InternetGatewayId=igw.id)
return igw
def get_route_table(client, tag_name, **kwargs):
"""Method to fetch route table based on tag_name
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name to search for.
Returns:
RouteTable (obj): Matching route table object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.route_tables.filter(Filters=filters)), None)
@get_or_create(get_route_table)
def create_route_table(client, tag_name, vpc):
"""Method to create route table based on tag_name in given VPC. It will first
query for the tag name to see if the route table already exists or if one is already
attached to the VPC, if so it will return that route table.
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name
vpc (vpc object): VPC object to create the route table against
Returns:
RouteTable (obj): newly created RouteTable object.
"""
# Check to see if there is a route table attached to VPC, if so, we can just tag it
existing_route_table = next(iter(vpc.route_tables.all()), None)
if existing_route_table is not None:
tag_resource_name(client, existing_route_table.id, tag_name)
return existing_route_table
# If no route table exists, we can create one and tag it.
route_table = vpc.create_route_table()
tag_resource_name(client, route_table.id, tag_name)
return route_table
@get_and_cleanup(get_security_group)
def cleanup_security_group(sg, **kwargs):
"""Method to cleanup security group for the matching group_name.
Args:
sg: Instance of security group matching the group_name.
"""
sg.delete()
# MASKED: cleanup_igw function (lines 557-566)
@get_and_cleanup(get_route_table)
def cleanup_route_table(rt, **kwargs):
"""Method to cleanup the Route Table matching the tag name.
Args:
rt: Instance of Route Table matching tag_name.
"""
rt.delete()
def get_route_by_cidr(route_table, cidr):
"""Method to check if given CIDR already attached to route table.
Args:
RouteTable (obj): Route Table object.
cidr (str): CIDR string to check in route table.
Returns:
Route: the route for this CIDR or None if not found
"""
return dict((r.destination_cidr_block, r) for r in route_table.routes).get(cidr)
@get_or_create(get_vpc)
def create_vpc(client, tag_name, cidr):
"""Method to create vpc based on the cidr and tag with tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): VPC tag name
cidr (str): CIDR string.
Returns:
VPC(Object): Newly created VPC object.
"""
vpc = client.create_vpc(CidrBlock=cidr)
vpc.modify_attribute(EnableDnsHostnames={'Value': True})
tag_resource_name(client, vpc.id, tag_name)
return vpc
def set_yb_sg_and_fetch_vpc(metadata, region, dest_vpc_id):
"""Method to bootstrap vpc and security group, and enable vpc peering
with the host_instance vpc.
Args:
metadata (obj): Cloud metadata object with cidr prefix and other metadata.
region (str): Region name to create the vpc in.
dest_vpc_id (str): Id of the VPC that yugabyte machines will reside in.
Returns:
vpc_info (json): return vpc, subnet and security group as json.
"""
client = get_client(region)
dest_vpc = client.Vpc(dest_vpc_id)
subnets = {subnet.availability_zone: subnet for subnet in dest_vpc.subnets.all()}
sg_group_name = get_yb_sg_name(region)
rules = metadata["sg_rules"]
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
add_cidr_to_rules(rules, dest_vpc.cidr_block)
sgs = [create_security_group(client=client, group_name=sg_group_name, vpc=dest_vpc,
description="YugaByte SG", rules=rules)]
return vpc_components_as_json(dest_vpc, sgs, subnets)
def query_vpc(region):
"""Method to query VPC against given region and respective subnets.
Args:
region (str): Region name to query the VPC.
Returns:
vpc and subnet info (obj): Object with region and zone subnet id.
"""
per_vpc_info = {}
# Fetch all available AZs, as we want to group subnets by AZ.
raw_client = boto3.client("ec2", region_name=region)
zones = [z["ZoneName"]
for z in raw_client.describe_availability_zones(
Filters=get_filters("state", "available")).get("AvailabilityZones", [])]
# Default to empty lists, in case some zones do not have subnets, so we can use this as a query
# for all available AZs in this region.
subnets_by_zone = {z: [] for z in zones}
# Fetch SGs and group them by VPC ID.
client = get_client(region)
per_vpc_sgs = {}
sgs = client.security_groups.all()
for sg in sgs:
sg_list = per_vpc_sgs.setdefault(sg.vpc_id, [])
sg_list.append({
"sg_id": sg.group_id,
# Note: Name tag is not mandatory or always present but group_name is!
"sg_name": sg.group_name
})
# Fetch all available VPCs so we can group by VPC ID.
region_vpcs = client.vpcs.all()
for vpc in region_vpcs:
# Filter for available subnets and group by AZ.
subnets = vpc.subnets.filter(Filters=get_filters("state", "available"))
for s in subnets:
subnets_for_this_az = subnets_by_zone.setdefault(s.availability_zone, [])
subnets_for_this_az.append({
"subnet_id": s.subnet_id,
"name": _get_name_from_tags(s.tags),
"public": s.map_public_ip_on_launch
})
vpc_info = {
"subnets_by_zone": subnets_by_zone,
# In case we somehow did not find any SGs, default to empty list.
"security_groups": per_vpc_sgs.get(vpc.id, [])
}
per_vpc_info[vpc.id] = vpc_info
region_json = {
"per_vpc_info": per_vpc_info
}
return region_json
def _get_name_from_tags(tags):
for t in tags if tags else []:
if t.get("Key") == "Name":
return t.get("Value", None)
return None
def vpc_components_as_json(vpc, sgs, subnets):
"""Method takes VPC, Security Group and Subnets and returns a json data format with ids.
Args:
vpc (VPC Object): Region specific VPC object
sgs (List of Security Group Object): Region specific Security Group object
subnets (subnet object map): Map of Subnet objects keyed of zone.
Retuns:
json (str): A Json string for yugaware to consume with necessary ids.
"""
result = {}
result["vpc_id"] = vpc.id
result["security_group"] = [{"id": sg.group_id, "name": sg.group_name} for sg in sgs]
result["zones"] = {}
for zone, subnet in subnets.iteritems():
result["zones"][zone] = subnet.id
return result
def delete_vpc(region, host_vpc_id=None, host_vpc_region=None):
"""Method to delete VPC, Subnet, Internet Gateway, Route Table and VPC peering.
Args:
region (str): Region name to query the VPC.
"""
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(region)
client = get_client(region)
region_vpc = get_vpc(client, vpc_region_tag)
if region_vpc is None:
raise YBOpsRuntimeError("VPC not setup.")
zones = get_zones(region)
# Remove the yugabyte SG first.
sg_group_name = get_yb_sg_name(region)
cleanup_security_group(client=client, group_name=sg_group_name, vpc=region_vpc)
# Cleanup the subnets.
for zone, subnet_id in zones.iteritems():
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
if subnet_id is not None:
client.Subnet(subnet_id).delete()
# Remove the IGW.
igw_tag = IGW_PREFIX_FORMAT.format(region)
cleanup_igw(client=client, tag_name=igw_tag)
# Remove this region's CIDR from the RT of the host vpc.
host_vpc = None
if host_vpc_id is not None and host_vpc_region is not None:
host_vpc = get_client(host_vpc_region).Vpc(host_vpc_id)
for rt in list(host_vpc.route_tables.all()):
delete_route(rt, region_vpc.cidr_block)
# Remove all of the VPC peerings of this vpc.
cleanup_vpc_peering(client=client, vpc=region_vpc, host_vpc=None)
# Delete the VPC itself.
region_vpc.delete()
# Finally cleanup the Routing Table.
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(region)
cleanup_route_table(client=client, tag_name=route_table_tag)
return {"success": "VPC deleted."}
def tag_resource_name(client, resource_id, tag_name):
"""Method to create name tag for given resource.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_name (str): Tag name.
"""
tag_resource(client, resource_id, "Name", tag_name)
def tag_resource(client, resource_id, tag_key, tag_value):
"""Method to attach arbitrary key-value tags to resources.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_key: Tag key
tag_value: Tag value
"""
tags = [{"Key": tag_key, "Value": tag_value}]
client.create_tags(Resources=[resource_id], Tags=tags)
def get_filters(key, value):
return [{'Name': key, 'Values': [value]}]
def get_tag_filter(tag_name):
return get_filters("tag:Name", tag_name)
def get_vpc_peerings(vpc, host_vpc, **kwargs):
"""Method to fetch all the VPC peerings against given VPC. If host_vpc is provided
it will check if there is a peering against that vpc.
Args:
vpc(VPC object): VPC Object to search for peerings
host_vpc (Host VPC object): Can be Null as well, to check if specific host_vpc
peering is done.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
output = []
# Search through accepted vpc peerings.
vpc_peerings = vpc.accepted_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.requester_vpc == host_vpc)])
# Also search through requested vpc peerings.
vpc_peerings = vpc.requested_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.accepter_vpc == host_vpc)])
return output
@get_and_cleanup(get_vpc_peerings)
def cleanup_vpc_peering(vpc_peerings, **kwargs):
for vpc_peering in vpc_peerings:
vpc_peering.delete()
@get_or_create(get_vpc_peerings)
def create_vpc_peering(client, vpc, host_vpc, target_region):
"""Method would create a vpc peering between the newly created VPC and caller's VPC
Also makes sure, if they aren't the same, then there is no need for vpc peering.
Args:
client (boto client): Region specific boto client
vpc (VPC object): Newly created VPC object
host_vpc(Host VPC object): Host VPC to peer with.
target_region (region name): Region name in which peering is being created.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
try:
peer_conn = client.create_vpc_peering_connection(
VpcId=host_vpc.id, PeerVpcId=vpc.id, PeerRegion=target_region)
peer_conn.wait_until_exists()
# Need to accept from the other end.
remote_peer_conn = get_client(target_region).VpcPeeringConnection(peer_conn.id)
remote_peer_conn.wait_until_exists()
remote_peer_conn.accept()
return [peer_conn]
except Exception as e:
logging.error(e)
raise YBOpsRuntimeError("Unable to create VPC peering.")
def get_device_names(instance_type, num_volumes):
device_names = []
for i in xrange(num_volumes):
device_name_format = "nvme{}n1" if is_nvme(instance_type) else "xvd{}"
index = "{}".format(i if is_nvme(instance_type) else chr(ord('b') + i))
device_names.append(device_name_format.format(index))
return device_names
def is_next_gen(instance_type):
return instance_type.startswith(("c3", "c4", "c5", "m4", "r4"))
def is_nvme(instance_type):
return instance_type.startswith("i3")
def has_ephemerals(instance_type):
return not is_nvme(instance_type) and not is_next_gen(instance_type)
def create_instance(args):
client = get_client(args.region)
vars = {
"ImageId": args.machine_image,
"KeyName": args.key_pair_name,
"MinCount": 1,
"MaxCount": 1,
"InstanceType": args.instance_type,
}
# Network setup.
# Lets assume they have provided security group id comma delimited.
sg_ids = args.security_group_id.split(",") if args.security_group_id else None
if sg_ids is None:
# Figure out which VPC this instance will be brought up in and search for the SG in there.
# This is for a bit of backwards compatibility with the previous mode of potentially using
# YW's VPC, in which we would still deploy a SG with the same name as in our normal VPCs.
# This means there could be customers that had that deployment mode from the start AND have
# a SG we created back then, with the internal naming convention we use, but NOT in the YB
# VPC (which they likely will not even have).
vpc = get_vpc_for_subnet(client, args.cloud_subnet)
sg_name = get_yb_sg_name(args.region)
sg = get_security_group(client, sg_name, vpc)
sg_ids = [sg.id]
vars["NetworkInterfaces"] = [{
"DeviceIndex": 0,
"AssociatePublicIpAddress": args.assign_public_ip,
"SubnetId": args.cloud_subnet,
"Groups": sg_ids
}]
# Volume setup.
volumes = []
ebs = {
"DeleteOnTermination": True,
# TODO: constant
"VolumeSize": 40,
"VolumeType": "gp2"
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
volumes.append({
"DeviceName": "/dev/sda1",
"Ebs": ebs
})
device_names = get_device_names(args.instance_type, args.num_volumes)
# TODO: Clean up semantics on nvme vs "next-gen" vs ephemerals, as this is currently whack...
for i, device_name in enumerate(device_names):
volume = {}
if has_ephemerals(args.instance_type):
volume = {
"DeviceName": "/dev/{}".format(device_name),
"VirtualName": "ephemeral{}".format(i)
}
elif is_next_gen(args.instance_type):
ebs = {
"DeleteOnTermination": True,
"VolumeType": args.volume_type,
# TODO: make this int.
"VolumeSize": args.volume_size
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
if args.volume_type == "io1":
# TODO: make this int.
ebs["Iops"] = args.disk_iops
volume = {
"DeviceName": "/dev/{}".format(device_name),
"Ebs": ebs
}
volumes.append(volume)
vars["BlockDeviceMappings"] = volumes
# Tag setup.
def __create_tag(k, v):
return {"Key": k, "Value": v}
# Add Name all the time.
instance_tags = [
__create_tag("Name", args.search_pattern),
__create_tag("launched-by", os.environ.get("USER", "unknown")),
__create_tag("yb-server-type", args.type)
]
custom_tags = args.instance_tags if args.instance_tags is not None else '{}'
for k, v in json.loads(custom_tags).iteritems():
instance_tags.append(__create_tag(k, v))
vars["TagSpecifications"] = [{
"ResourceType": "instance",
"Tags": instance_tags
}]
# TODO: user_data > templates/cloud_init.yml.j2, still needed?
instance_ids = client.create_instances(**vars)
if len(instance_ids) != 1:
logging.error("Invalid create_instances response: {}".format(instance_ids))
raise YBOpsRuntimeError("Expected to create 1 instance, got {}".format(
len(instance_ids)))
instance = instance_ids[0]
instance.wait_until_running()
def modify_tags(region, instance_id, tags_to_set_str, tags_to_remove_str):
instance = get_client(region).Instance(instance_id)
# Remove all the tags we were asked to, except the internal ones.
tags_to_remove = set(tags_to_remove_str.split(",") if tags_to_remove_str else [])
# TODO: combine these with the above instance creation function.
internal_tags = set(["Name", "launched-by", "yb-server-type"])
if tags_to_remove & internal_tags:
raise YBOpsRuntimeError(
"Was asked to remove tags: {}, which contain internal tags: {}".format(
tags_to_remove, internal_tags
))
# Note: passing an empty list to Tags will remove all tags from the instance.
if tags_to_remove:
instance.delete_tags(Tags=[{"Key": k} for k in tags_to_remove])
# Set all the tags provided.
tags_to_set = json.loads(tags_to_set_str if tags_to_set_str else "{}")
customer_tags = []
for k, v in tags_to_set.iteritems():
customer_tags.append({"Key": k, "Value": v})
instance.create_tags(Tags=customer_tags)
def delete_route(rt, cidr):
route = get_route_by_cidr(rt, cidr)
if route is not None:
route.delete()
def get_vpc_for_subnet(client, subnet):
return client.Subnet(subnet).vpc
def get_yb_sg_name(region):
return SG_YUGABYTE_PREFIX_FORMAT.format(region)
def list_dns_record_set(hosted_zone_id):
return boto3.client('route53').get_hosted_zone(Id=hosted_zone_id)
def create_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'CREATE')
def edit_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'UPSERT')
def delete_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'DELETE')
def _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, action):
client = boto3.client('route53')
records = []
for ip in ip_list:
records.append({'Value': ip})
result = list_dns_record_set(hosted_zone_id)
hosted_zone_name = result['HostedZone']['Name']
change_batch = {
'Comment': "YugaWare driven Record Set",
'Changes': [{
'Action': action,
'ResourceRecordSet': {
'Name': "{}.{}".format(domain_name_prefix, hosted_zone_name),
'Type': 'A',
'TTL': 5,
'ResourceRecords': records
}
}]
}
result = client.change_resource_record_sets(
HostedZoneId=hosted_zone_id,
ChangeBatch=change_batch)
client.get_waiter('resource_record_sets_changed').wait(
Id=result['ChangeInfo']['Id'],
WaiterConfig={
'Delay': 10,
'MaxAttempts': 60
})
|
@get_and_cleanup(get_igw)
def cleanup_igw(igw, **kwargs):
"""Method to cleanup Internet Gateway matching the tag name. And also remove any vpc
that is attached to the Internet Gateway.
Args:
igw: Instance of Internet Gateway matching tag_name.
"""
for vpc in igw.attachments:
igw.detach_from_vpc(VpcId=vpc['VpcId'])
igw.delete()
| 557 | 566 |
#!/usr/bin/env python
#
# Copyright 2019 YugaByte, Inc. and Contributors
#
# Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt
import boto3
import json
import logging
import os
import re
from ipaddr import IPNetwork
from ybops.utils import get_or_create, get_and_cleanup
from ybops.common.exceptions import YBOpsRuntimeError
from ybops.cloud.common.utils import request_retry_decorator
RESOURCE_PREFIX_FORMAT = "yb-{}"
IGW_CIDR = "0.0.0.0/0"
SUBNET_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT
IGW_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-igw"
ROUTE_TABLE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-rt"
SG_YUGABYTE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-sg"
PEER_CONN_FORMAT = "yb-peer-conn-{}-to-{}"
class AwsBootstrapRegion():
def __init__(self, region, metadata, region_cidrs):
self.region = region
self.metadata = metadata
self.region_cidrs = region_cidrs
self.client = get_client(self.region)
# Outputs.
self.vpc = None
self.igw = None
self.peer_vpc = None
self.sg_yugabyte = None
self.subnets = []
self.route_table = None
def bootstrap(self):
self.setup_vpc()
self.setup_igw()
self.setup_subnets()
self.setup_yugabyte_sg()
self.setup_rt()
def setup_vpc(self):
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(self.region)
vpc = create_vpc(client=self.client, tag_name=vpc_region_tag,
cidr=get_region_cidr(self.metadata, self.region))
vpc.wait_until_available()
self.vpc = vpc
def setup_igw(self):
igw_tag = IGW_PREFIX_FORMAT.format(self.region)
igw = create_igw(client=self.client, tag_name=igw_tag, vpc=self.vpc)
self.igw = igw
def setup_subnets(self):
zones = get_zones(self.region)
subnets = {}
for zone_index, zone in enumerate(sorted(zones.keys())):
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
zone_cidr = self.metadata["zone_cidr_format"].format(
get_cidr_prefix(self.metadata, self.region), (zone_index + 1) * 16)
subnet = create_subnet(self.client, self.vpc, zone, zone_cidr, vpc_zone_tag)
subnets[zone] = subnet
self.subnets = subnets
def setup_yugabyte_sg(self):
sg_group_name = get_yb_sg_name(self.region)
rules = list(self.metadata["sg_rules"])
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
sg = create_security_group(client=self.client, group_name=sg_group_name,
description="YugaByte SG", vpc=self.vpc,
rules=rules)
self.sg_yugabyte = sg
def setup_rt(self):
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(self.region)
route_table = create_route_table(client=self.client, tag_name=route_table_tag,
vpc=self.vpc)
# TODO: handle private/public case at somepoint, also NAT.
add_route_to_rt(route_table, IGW_CIDR, "GatewayId", self.igw.id)
current_associated_subnet_ids = [assoc.subnet_id for assoc in route_table.associations]
missing_ids = [subnet.id for subnet in self.subnets.values()
if subnet.id not in current_associated_subnet_ids]
for subnet_id in missing_ids:
route_table.associate_with_subnet(SubnetId=subnet_id)
self.route_table = route_table
def add_sg_ingress_to_sg(self, incoming_sg, target_sg):
current_sg_ids = set([pair["GroupId"]
for perm in target_sg.ip_permissions
for pair in perm["UserIdGroupPairs"]])
if incoming_sg.id not in current_sg_ids:
target_sg.authorize_ingress(
IpPermissions=[{
"IpProtocol": "-1",
"UserIdGroupPairs": [{"GroupId": incoming_sg.id}]}])
def add_route_to_rt(route_table, cidr, target_type, target_id):
kwargs = {target_type: target_id}
route = get_route_by_cidr(route_table, cidr)
if route is None:
route_table.create_route(DestinationCidrBlock=cidr, **kwargs)
elif getattr(route, dumb_camel_to_snake(target_type)) != target_id:
route.replace(**kwargs)
def add_cidr_to_rules(rules, cidr):
rule_block = {
"ip_protocol": "-1",
"from_port": 0,
"to_port": 65535,
"cidr_ip": cidr
}
rules.append(rule_block)
def get_cidr_prefix(metadata, region):
return metadata["regions"][region]["cidr_prefix"]
def get_region_cidr(metadata, region):
return metadata["region_cidr_format"].format(get_cidr_prefix(metadata, region))
def get_region_cidrs(metadata):
return dict([(r, get_region_cidr(metadata, r)) for r in metadata["regions"].keys()])
def dumb_camel_to_snake(s):
return re.sub("([A-Z])", "_\\1", s).lower()[1:]
class YbVpcComponents:
def __init__(self):
self.region = None
self.vpc = None
self.sg_yugabyte = None
self.customer_sgs = None
self.route_table = None
self.subnets = None
@staticmethod
def from_pieces(region, vpc_id, sg_id, rt_id, az_to_subnet_ids):
c = YbVpcComponents()
c.region = region
client = get_client(region)
c.vpc = client.Vpc(vpc_id)
c.sg_yugabyte = client.SecurityGroup(sg_id)
c.route_table = client.RouteTable(rt_id)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
@staticmethod
def from_user_json(region, per_region_meta):
c = YbVpcComponents()
c.region = region
client = get_client(region)
vpc_id = per_region_meta.get("vpcId")
if vpc_id:
c.vpc = client.Vpc(vpc_id)
else:
c.vpc = get_vpc(client, RESOURCE_PREFIX_FORMAT.format(region))
sg_ids = per_region_meta.get("customSecurityGroupId")
if sg_ids:
c.customer_sgs = [client.SecurityGroup(sg_id) for sg_id in sg_ids.split(",")]
else:
c.sg_yugabyte = get_security_group(
client, SG_YUGABYTE_PREFIX_FORMAT.format(region), c.vpc)
if not vpc_id:
c.route_table = get_route_table(client, ROUTE_TABLE_PREFIX_FORMAT.format(region))
az_to_subnet_ids = {}
if vpc_id:
az_to_subnet_ids = per_region_meta.get("azToSubnetIds", {})
else:
az_to_subnet_ids = get_zones(region)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
def as_json(self):
sgs = self.customer_sgs if self.customer_sgs else [self.sg_yugabyte]
return vpc_components_as_json(self.vpc, sgs, self.subnets)
class AwsBootstrapClient():
def __init__(self, metadata, host_vpc_id, host_vpc_region):
self.metadata = metadata
self.host_vpc_id = host_vpc_id
self.host_vpc_region = host_vpc_region
self.region_cidrs = get_region_cidrs(self.metadata)
# Validation.
self._validate_cidr_overlap()
def _validate_cidr_overlap(self):
region_networks = [IPNetwork(cidr) for cidr in self.region_cidrs.values()]
all_networks = region_networks
for i in xrange(len(all_networks)):
for j in xrange(i + 1, len(all_networks)):
left = all_networks[i]
right = all_networks[j]
if left.overlaps(right):
raise YBOpsRuntimeError(
"IP blocks in the CIDRs overlap: {} - {}".format(left, right))
def bootstrap_individual_region(self, region):
if region is None:
raise YBOpsRuntimeError("Must provider region to bootstrap!")
client = AwsBootstrapRegion(region, self.metadata, self.region_cidrs)
client.bootstrap()
return YbVpcComponents.from_pieces(
region, client.vpc.id, client.sg_yugabyte.id, client.route_table.id,
{az: s.id for az, s in client.subnets.iteritems()})
def cross_link_regions(self, components):
# Do the cross linking, adding CIDR entries to RTs and SGs, as well as doing vpc peerings.
region_and_vpc_tuples = [(r, c.vpc) for r, c in components.iteritems()]
host_vpc = None
if self.host_vpc_id and self.host_vpc_region:
host_vpc = get_client(self.host_vpc_region).Vpc(self.host_vpc_id)
region_and_vpc_tuples.append((self.host_vpc_region, host_vpc))
# Setup VPC peerings.
for i in xrange(len(region_and_vpc_tuples) - 1):
i_region, i_vpc = region_and_vpc_tuples[i]
for j in xrange(i + 1, len(region_and_vpc_tuples)):
j_region, j_vpc = region_and_vpc_tuples[j]
peering = create_vpc_peering(
# i is the host, j is the target.
client=get_client(i_region), vpc=j_vpc, host_vpc=i_vpc, target_region=j_region)
if len(peering) != 1:
raise YBOpsRuntimeError(
"Expecting one peering connection, got {}".format(peer_conn))
peering = peering[0]
# Add route i -> j.
add_route_to_rt(components[i_region].route_table, j_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
# Add route j -> i.
# Note: If we have a host_vpc, it is the last in the list, and it doesn't have an
# associated component, so we special case it.
if host_vpc is None or j != len(region_and_vpc_tuples) - 1:
add_route_to_rt(components[j_region].route_table, i_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
else:
# TODO: should ideally filter to the RT that is relevant, but we do not really
# know the subnets which matter from this host_vpc...
for rt in list(host_vpc.route_tables.all()):
add_route_to_rt(rt, i_vpc.cidr_block, "VpcPeeringConnectionId", peering.id)
# Setup SG entries for all the CIDRs.
all_cidrs = [vpc.cidr_block for r, vpc in region_and_vpc_tuples]
rules = []
# Add CIDRs from all the VPCs, including the host.
for cidr in all_cidrs:
add_cidr_to_rules(rules, cidr)
# Add CIDRs from any custom networks we have internally, primarily the OpenVPN in AWS.
# TODO(bogdan): custom CIDR entries
for cidr in self.metadata.get("custom_network_whitelisted_ip_cidrs", []):
add_cidr_to_rules(rules, cidr)
for region, component in components.iteritems():
sg = component.sg_yugabyte
ip_perms = sg.ip_permissions
for rule in rules:
found = False
for perm in ip_perms:
if perm.get("FromPort") == rule["from_port"] and \
perm.get("ToPort") == rule["to_port"] and \
perm.get("IpProtocol") == rule["ip_protocol"] and \
len([True for r in perm.get("IpRanges", [])
if r.get("CidrIp") == rule["cidr_ip"]]) > 0:
# This rule matches this permission, so no need to add it.
found = True
break
if not found:
try:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
if "InvalidPermission.Duplicate" not in str(e):
raise YBOpsRuntimeError(
"Authorize Security Group Ingress failed: {}".format(repr(e)))
def aws_exception_handler(e):
"""AWS specific exception handler.
Args:
e: the exception that was raised by the underlying API call that just failed.
Returns:
True if this exception can be retried, False otherwise.
"""
return "Request limit exceeded" in str(e)
def aws_request_limit_retry(fn):
"""A decorator for retrying an AWS operation after exceeding request limit. Does retries with
randomized jitter. Ideally, we should reconfigure boto3 to do the right kind of retries
internally, but as of May 2017 there does not seem to be a good way of doing that.
Initially not adding this decorator to all functions in this module. This should be done
gradually as we encounter rate limiting errors.
Relevant boto issues:
https://github.com/boto/boto3/issues/770
https://github.com/boto/botocore/issues/882
"""
return request_retry_decorator(fn, aws_exception_handler)
def get_client(region):
"""Method to get boto3 ec2 resource for given region
Args:
region (str): Region name
Returns:
boto3 resource
"""
return boto3.resource("ec2", region_name=region)
def get_clients(regions):
"""Method to get boto3 clients for given region or all the regions if none specified.
Args:
regions (list): List of regions to return clients for
Returns:
clients(obj): Map of region to boto3 resource
"""
return {region: get_client(region) for region in regions}
def get_available_regions(metadata):
return metadata["regions"].keys()
def get_spot_pricing(region, zone, instance_type):
client = boto3.client('ec2', region_name=region)
prod_desc = ['Linux/UNIX (Amazon VPC)']
spot_price = client.describe_spot_price_history(InstanceTypes=[instance_type],
MaxResults=1,
ProductDescriptions=prod_desc,
AvailabilityZone=zone)
if len(spot_price['SpotPriceHistory']) == 0:
raise YBOpsRuntimeError('Invalid instance type {} for zone {}'.format(instance_type, zone))
return spot_price['SpotPriceHistory'][0]['SpotPrice']
def get_zones(region, dest_vpc_id=None):
"""Method to fetch zones for given region or all the regions if none specified.
Args:
region (str): Name of region to get zones of.
Returns:
zones (obj): Map of zone -> subnet
"""
result = {}
filters = get_filters("state", "available")
client = boto3.client("ec2", region_name=region)
zones = client.describe_availability_zones(Filters=filters).get("AvailabilityZones", [])
new_client = get_client(region)
zone_mapping = {}
for z in zones:
zone_name = z["ZoneName"]
zone_tag = SUBNET_PREFIX_FORMAT.format(zone_name)
region_vpc = None
if dest_vpc_id:
region_vpc = new_client.Vpc(dest_vpc_id)
else:
region_vpc = get_vpc(new_client, RESOURCE_PREFIX_FORMAT.format(region))
subnet = next(iter(fetch_subnets(region_vpc, zone_tag)), None)
if subnet is None:
subnet = next(iter([s for s in region_vpc.subnets.all()
if s.availability_zone == zone_name]), None)
zone_mapping[zone_name] = subnet.id if subnet is not None else None
return zone_mapping
def get_vpc(client, tag_name, **kwargs):
"""Method to fetch vpc based on the tag_name.
Args:
client (boto client): Boto Client for the region to query.
tag_name (str): VPC tag name.
Returns:
VPC obj: VPC object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.vpcs.filter(Filters=filters)), None)
def fetch_subnets(vpc, tag_name):
"""Method to fetch subnets based on the tag_name.
Args:
vpc (vpc obj): VPC object to search for subnets
tag_name (str): subnet tag name.
Returns:
subnets (list): list of aws subnets for given vpc.
"""
filters = get_tag_filter(tag_name)
return vpc.subnets.filter(Filters=filters)
def create_subnet(client, vpc, zone, cidr, tag_name):
"""Method to create subnet based on cidr and tag name.
Args:
client (boto client): Region specific boto client
vpc (VPC object): VPC object to create subnet.
zone (str): Availability zone name
cidr (str): CIDR string
tag_name (str): Tag name for subnet.
Returns:
subnet: Newly created subnet object.
"""
subnet = next((s for s in fetch_subnets(vpc, tag_name) if s.cidr_block == cidr), None)
if subnet is None:
subnet = vpc.create_subnet(CidrBlock=cidr, AvailabilityZone=zone)
# TODO: no direct waiter on subnet just yet, it seems...
client.meta.client.get_waiter("subnet_available").wait(SubnetIds=[subnet.id])
tag_resource_name(client, subnet.id, tag_name)
return subnet
def get_security_group(client, group_name, vpc, **kwargs):
"""Method to fetch security group based on the group_name.
Args:
client (boto client): Region specific boto client
group_name (str): Security Group name
vpc (VPC object): The VPC in which to check for the SG
Returns:
SecurityGroup: Matching security group.
"""
filters = get_filters("group-name", group_name) + get_filters("vpc-id", vpc.id)
return next(iter(client.security_groups.filter(Filters=filters)), None)
@get_or_create(get_security_group)
def create_security_group(client, group_name, vpc, description, rules):
"""Method to create a security group based on the group_name and authorize ingress with
the rules provided.
Args:
client (boto client): Region specific boto client
group_name (str): security group name
description (str): description of the security group
vpc (VPC Object): VPC object to create the security group
rules (dict): List of rules to add to security group.
"""
sg = vpc.create_security_group(GroupName=group_name, Description=description)
try:
for rule in rules:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
logging.error("Authorize Security Group Ingress failed: {}".format(e))
sg.delete()
raise YBOpsRuntimeError("Security Group creation failed.")
return sg
def get_igw(client, tag_name, **kwargs):
"""Method to fetch Internet Gateway based on tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): Internet Gateway tag name.
Returns:
internet_gateway: internet gateway object.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.internet_gateways.filter(Filters=filters)), None)
@get_or_create(get_igw)
def create_igw(client, tag_name, vpc):
"""Method to create Internet Gateway based on tag_name in given VPC. If the gateway
already exists, it would return that object. If the object doesn't have a tag, we
would tag it accordingly.
Args:
client (boto client): Region specific boto client
tag_name (str): Tag name for internet gateway.
vpc (VPC object): VPC object to create Internet Gateway
Returns:
internet gateway: newly internet gateway object.
"""
# Query to make sure the region doesn't have any IGW already attached.
existing_igw = next(iter(vpc.internet_gateways.all()), None)
if existing_igw is not None:
# If we have existing igw for the region, lets just tag it with yb-XX-igw
tag_resource_name(client, existing_igw.id, tag_name)
return existing_igw
# If we don't have a internet gateway, lets create one and attach it to vpc
igw = client.create_internet_gateway()
tag_resource_name(client, igw.id, tag_name)
vpc.attach_internet_gateway(InternetGatewayId=igw.id)
return igw
def get_route_table(client, tag_name, **kwargs):
"""Method to fetch route table based on tag_name
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name to search for.
Returns:
RouteTable (obj): Matching route table object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.route_tables.filter(Filters=filters)), None)
@get_or_create(get_route_table)
def create_route_table(client, tag_name, vpc):
"""Method to create route table based on tag_name in given VPC. It will first
query for the tag name to see if the route table already exists or if one is already
attached to the VPC, if so it will return that route table.
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name
vpc (vpc object): VPC object to create the route table against
Returns:
RouteTable (obj): newly created RouteTable object.
"""
# Check to see if there is a route table attached to VPC, if so, we can just tag it
existing_route_table = next(iter(vpc.route_tables.all()), None)
if existing_route_table is not None:
tag_resource_name(client, existing_route_table.id, tag_name)
return existing_route_table
# If no route table exists, we can create one and tag it.
route_table = vpc.create_route_table()
tag_resource_name(client, route_table.id, tag_name)
return route_table
@get_and_cleanup(get_security_group)
def cleanup_security_group(sg, **kwargs):
"""Method to cleanup security group for the matching group_name.
Args:
sg: Instance of security group matching the group_name.
"""
sg.delete()
@get_and_cleanup(get_igw)
def cleanup_igw(igw, **kwargs):
"""Method to cleanup Internet Gateway matching the tag name. And also remove any vpc
that is attached to the Internet Gateway.
Args:
igw: Instance of Internet Gateway matching tag_name.
"""
for vpc in igw.attachments:
igw.detach_from_vpc(VpcId=vpc['VpcId'])
igw.delete()
@get_and_cleanup(get_route_table)
def cleanup_route_table(rt, **kwargs):
"""Method to cleanup the Route Table matching the tag name.
Args:
rt: Instance of Route Table matching tag_name.
"""
rt.delete()
def get_route_by_cidr(route_table, cidr):
"""Method to check if given CIDR already attached to route table.
Args:
RouteTable (obj): Route Table object.
cidr (str): CIDR string to check in route table.
Returns:
Route: the route for this CIDR or None if not found
"""
return dict((r.destination_cidr_block, r) for r in route_table.routes).get(cidr)
@get_or_create(get_vpc)
def create_vpc(client, tag_name, cidr):
"""Method to create vpc based on the cidr and tag with tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): VPC tag name
cidr (str): CIDR string.
Returns:
VPC(Object): Newly created VPC object.
"""
vpc = client.create_vpc(CidrBlock=cidr)
vpc.modify_attribute(EnableDnsHostnames={'Value': True})
tag_resource_name(client, vpc.id, tag_name)
return vpc
def set_yb_sg_and_fetch_vpc(metadata, region, dest_vpc_id):
"""Method to bootstrap vpc and security group, and enable vpc peering
with the host_instance vpc.
Args:
metadata (obj): Cloud metadata object with cidr prefix and other metadata.
region (str): Region name to create the vpc in.
dest_vpc_id (str): Id of the VPC that yugabyte machines will reside in.
Returns:
vpc_info (json): return vpc, subnet and security group as json.
"""
client = get_client(region)
dest_vpc = client.Vpc(dest_vpc_id)
subnets = {subnet.availability_zone: subnet for subnet in dest_vpc.subnets.all()}
sg_group_name = get_yb_sg_name(region)
rules = metadata["sg_rules"]
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
add_cidr_to_rules(rules, dest_vpc.cidr_block)
sgs = [create_security_group(client=client, group_name=sg_group_name, vpc=dest_vpc,
description="YugaByte SG", rules=rules)]
return vpc_components_as_json(dest_vpc, sgs, subnets)
def query_vpc(region):
"""Method to query VPC against given region and respective subnets.
Args:
region (str): Region name to query the VPC.
Returns:
vpc and subnet info (obj): Object with region and zone subnet id.
"""
per_vpc_info = {}
# Fetch all available AZs, as we want to group subnets by AZ.
raw_client = boto3.client("ec2", region_name=region)
zones = [z["ZoneName"]
for z in raw_client.describe_availability_zones(
Filters=get_filters("state", "available")).get("AvailabilityZones", [])]
# Default to empty lists, in case some zones do not have subnets, so we can use this as a query
# for all available AZs in this region.
subnets_by_zone = {z: [] for z in zones}
# Fetch SGs and group them by VPC ID.
client = get_client(region)
per_vpc_sgs = {}
sgs = client.security_groups.all()
for sg in sgs:
sg_list = per_vpc_sgs.setdefault(sg.vpc_id, [])
sg_list.append({
"sg_id": sg.group_id,
# Note: Name tag is not mandatory or always present but group_name is!
"sg_name": sg.group_name
})
# Fetch all available VPCs so we can group by VPC ID.
region_vpcs = client.vpcs.all()
for vpc in region_vpcs:
# Filter for available subnets and group by AZ.
subnets = vpc.subnets.filter(Filters=get_filters("state", "available"))
for s in subnets:
subnets_for_this_az = subnets_by_zone.setdefault(s.availability_zone, [])
subnets_for_this_az.append({
"subnet_id": s.subnet_id,
"name": _get_name_from_tags(s.tags),
"public": s.map_public_ip_on_launch
})
vpc_info = {
"subnets_by_zone": subnets_by_zone,
# In case we somehow did not find any SGs, default to empty list.
"security_groups": per_vpc_sgs.get(vpc.id, [])
}
per_vpc_info[vpc.id] = vpc_info
region_json = {
"per_vpc_info": per_vpc_info
}
return region_json
def _get_name_from_tags(tags):
for t in tags if tags else []:
if t.get("Key") == "Name":
return t.get("Value", None)
return None
def vpc_components_as_json(vpc, sgs, subnets):
"""Method takes VPC, Security Group and Subnets and returns a json data format with ids.
Args:
vpc (VPC Object): Region specific VPC object
sgs (List of Security Group Object): Region specific Security Group object
subnets (subnet object map): Map of Subnet objects keyed of zone.
Retuns:
json (str): A Json string for yugaware to consume with necessary ids.
"""
result = {}
result["vpc_id"] = vpc.id
result["security_group"] = [{"id": sg.group_id, "name": sg.group_name} for sg in sgs]
result["zones"] = {}
for zone, subnet in subnets.iteritems():
result["zones"][zone] = subnet.id
return result
def delete_vpc(region, host_vpc_id=None, host_vpc_region=None):
"""Method to delete VPC, Subnet, Internet Gateway, Route Table and VPC peering.
Args:
region (str): Region name to query the VPC.
"""
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(region)
client = get_client(region)
region_vpc = get_vpc(client, vpc_region_tag)
if region_vpc is None:
raise YBOpsRuntimeError("VPC not setup.")
zones = get_zones(region)
# Remove the yugabyte SG first.
sg_group_name = get_yb_sg_name(region)
cleanup_security_group(client=client, group_name=sg_group_name, vpc=region_vpc)
# Cleanup the subnets.
for zone, subnet_id in zones.iteritems():
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
if subnet_id is not None:
client.Subnet(subnet_id).delete()
# Remove the IGW.
igw_tag = IGW_PREFIX_FORMAT.format(region)
cleanup_igw(client=client, tag_name=igw_tag)
# Remove this region's CIDR from the RT of the host vpc.
host_vpc = None
if host_vpc_id is not None and host_vpc_region is not None:
host_vpc = get_client(host_vpc_region).Vpc(host_vpc_id)
for rt in list(host_vpc.route_tables.all()):
delete_route(rt, region_vpc.cidr_block)
# Remove all of the VPC peerings of this vpc.
cleanup_vpc_peering(client=client, vpc=region_vpc, host_vpc=None)
# Delete the VPC itself.
region_vpc.delete()
# Finally cleanup the Routing Table.
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(region)
cleanup_route_table(client=client, tag_name=route_table_tag)
return {"success": "VPC deleted."}
def tag_resource_name(client, resource_id, tag_name):
"""Method to create name tag for given resource.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_name (str): Tag name.
"""
tag_resource(client, resource_id, "Name", tag_name)
def tag_resource(client, resource_id, tag_key, tag_value):
"""Method to attach arbitrary key-value tags to resources.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_key: Tag key
tag_value: Tag value
"""
tags = [{"Key": tag_key, "Value": tag_value}]
client.create_tags(Resources=[resource_id], Tags=tags)
def get_filters(key, value):
return [{'Name': key, 'Values': [value]}]
def get_tag_filter(tag_name):
return get_filters("tag:Name", tag_name)
def get_vpc_peerings(vpc, host_vpc, **kwargs):
"""Method to fetch all the VPC peerings against given VPC. If host_vpc is provided
it will check if there is a peering against that vpc.
Args:
vpc(VPC object): VPC Object to search for peerings
host_vpc (Host VPC object): Can be Null as well, to check if specific host_vpc
peering is done.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
output = []
# Search through accepted vpc peerings.
vpc_peerings = vpc.accepted_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.requester_vpc == host_vpc)])
# Also search through requested vpc peerings.
vpc_peerings = vpc.requested_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.accepter_vpc == host_vpc)])
return output
@get_and_cleanup(get_vpc_peerings)
def cleanup_vpc_peering(vpc_peerings, **kwargs):
for vpc_peering in vpc_peerings:
vpc_peering.delete()
@get_or_create(get_vpc_peerings)
def create_vpc_peering(client, vpc, host_vpc, target_region):
"""Method would create a vpc peering between the newly created VPC and caller's VPC
Also makes sure, if they aren't the same, then there is no need for vpc peering.
Args:
client (boto client): Region specific boto client
vpc (VPC object): Newly created VPC object
host_vpc(Host VPC object): Host VPC to peer with.
target_region (region name): Region name in which peering is being created.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
try:
peer_conn = client.create_vpc_peering_connection(
VpcId=host_vpc.id, PeerVpcId=vpc.id, PeerRegion=target_region)
peer_conn.wait_until_exists()
# Need to accept from the other end.
remote_peer_conn = get_client(target_region).VpcPeeringConnection(peer_conn.id)
remote_peer_conn.wait_until_exists()
remote_peer_conn.accept()
return [peer_conn]
except Exception as e:
logging.error(e)
raise YBOpsRuntimeError("Unable to create VPC peering.")
def get_device_names(instance_type, num_volumes):
device_names = []
for i in xrange(num_volumes):
device_name_format = "nvme{}n1" if is_nvme(instance_type) else "xvd{}"
index = "{}".format(i if is_nvme(instance_type) else chr(ord('b') + i))
device_names.append(device_name_format.format(index))
return device_names
def is_next_gen(instance_type):
return instance_type.startswith(("c3", "c4", "c5", "m4", "r4"))
def is_nvme(instance_type):
return instance_type.startswith("i3")
def has_ephemerals(instance_type):
return not is_nvme(instance_type) and not is_next_gen(instance_type)
def create_instance(args):
client = get_client(args.region)
vars = {
"ImageId": args.machine_image,
"KeyName": args.key_pair_name,
"MinCount": 1,
"MaxCount": 1,
"InstanceType": args.instance_type,
}
# Network setup.
# Lets assume they have provided security group id comma delimited.
sg_ids = args.security_group_id.split(",") if args.security_group_id else None
if sg_ids is None:
# Figure out which VPC this instance will be brought up in and search for the SG in there.
# This is for a bit of backwards compatibility with the previous mode of potentially using
# YW's VPC, in which we would still deploy a SG with the same name as in our normal VPCs.
# This means there could be customers that had that deployment mode from the start AND have
# a SG we created back then, with the internal naming convention we use, but NOT in the YB
# VPC (which they likely will not even have).
vpc = get_vpc_for_subnet(client, args.cloud_subnet)
sg_name = get_yb_sg_name(args.region)
sg = get_security_group(client, sg_name, vpc)
sg_ids = [sg.id]
vars["NetworkInterfaces"] = [{
"DeviceIndex": 0,
"AssociatePublicIpAddress": args.assign_public_ip,
"SubnetId": args.cloud_subnet,
"Groups": sg_ids
}]
# Volume setup.
volumes = []
ebs = {
"DeleteOnTermination": True,
# TODO: constant
"VolumeSize": 40,
"VolumeType": "gp2"
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
volumes.append({
"DeviceName": "/dev/sda1",
"Ebs": ebs
})
device_names = get_device_names(args.instance_type, args.num_volumes)
# TODO: Clean up semantics on nvme vs "next-gen" vs ephemerals, as this is currently whack...
for i, device_name in enumerate(device_names):
volume = {}
if has_ephemerals(args.instance_type):
volume = {
"DeviceName": "/dev/{}".format(device_name),
"VirtualName": "ephemeral{}".format(i)
}
elif is_next_gen(args.instance_type):
ebs = {
"DeleteOnTermination": True,
"VolumeType": args.volume_type,
# TODO: make this int.
"VolumeSize": args.volume_size
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
if args.volume_type == "io1":
# TODO: make this int.
ebs["Iops"] = args.disk_iops
volume = {
"DeviceName": "/dev/{}".format(device_name),
"Ebs": ebs
}
volumes.append(volume)
vars["BlockDeviceMappings"] = volumes
# Tag setup.
def __create_tag(k, v):
return {"Key": k, "Value": v}
# Add Name all the time.
instance_tags = [
__create_tag("Name", args.search_pattern),
__create_tag("launched-by", os.environ.get("USER", "unknown")),
__create_tag("yb-server-type", args.type)
]
custom_tags = args.instance_tags if args.instance_tags is not None else '{}'
for k, v in json.loads(custom_tags).iteritems():
instance_tags.append(__create_tag(k, v))
vars["TagSpecifications"] = [{
"ResourceType": "instance",
"Tags": instance_tags
}]
# TODO: user_data > templates/cloud_init.yml.j2, still needed?
instance_ids = client.create_instances(**vars)
if len(instance_ids) != 1:
logging.error("Invalid create_instances response: {}".format(instance_ids))
raise YBOpsRuntimeError("Expected to create 1 instance, got {}".format(
len(instance_ids)))
instance = instance_ids[0]
instance.wait_until_running()
def modify_tags(region, instance_id, tags_to_set_str, tags_to_remove_str):
instance = get_client(region).Instance(instance_id)
# Remove all the tags we were asked to, except the internal ones.
tags_to_remove = set(tags_to_remove_str.split(",") if tags_to_remove_str else [])
# TODO: combine these with the above instance creation function.
internal_tags = set(["Name", "launched-by", "yb-server-type"])
if tags_to_remove & internal_tags:
raise YBOpsRuntimeError(
"Was asked to remove tags: {}, which contain internal tags: {}".format(
tags_to_remove, internal_tags
))
# Note: passing an empty list to Tags will remove all tags from the instance.
if tags_to_remove:
instance.delete_tags(Tags=[{"Key": k} for k in tags_to_remove])
# Set all the tags provided.
tags_to_set = json.loads(tags_to_set_str if tags_to_set_str else "{}")
customer_tags = []
for k, v in tags_to_set.iteritems():
customer_tags.append({"Key": k, "Value": v})
instance.create_tags(Tags=customer_tags)
def delete_route(rt, cidr):
route = get_route_by_cidr(rt, cidr)
if route is not None:
route.delete()
def get_vpc_for_subnet(client, subnet):
return client.Subnet(subnet).vpc
def get_yb_sg_name(region):
return SG_YUGABYTE_PREFIX_FORMAT.format(region)
def list_dns_record_set(hosted_zone_id):
return boto3.client('route53').get_hosted_zone(Id=hosted_zone_id)
def create_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'CREATE')
def edit_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'UPSERT')
def delete_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'DELETE')
def _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, action):
client = boto3.client('route53')
records = []
for ip in ip_list:
records.append({'Value': ip})
result = list_dns_record_set(hosted_zone_id)
hosted_zone_name = result['HostedZone']['Name']
change_batch = {
'Comment': "YugaWare driven Record Set",
'Changes': [{
'Action': action,
'ResourceRecordSet': {
'Name': "{}.{}".format(domain_name_prefix, hosted_zone_name),
'Type': 'A',
'TTL': 5,
'ResourceRecords': records
}
}]
}
result = client.change_resource_record_sets(
HostedZoneId=hosted_zone_id,
ChangeBatch=change_batch)
client.get_waiter('resource_record_sets_changed').wait(
Id=result['ChangeInfo']['Id'],
WaiterConfig={
'Delay': 10,
'MaxAttempts': 60
})
|
create_vpc
|
Method to create vpc based on the cidr and tag with tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): VPC tag name
cidr (str): CIDR string.
Returns:
VPC(Object): Newly created VPC object.
|
#!/usr/bin/env python
#
# Copyright 2019 YugaByte, Inc. and Contributors
#
# Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt
import boto3
import json
import logging
import os
import re
from ipaddr import IPNetwork
from ybops.utils import get_or_create, get_and_cleanup
from ybops.common.exceptions import YBOpsRuntimeError
from ybops.cloud.common.utils import request_retry_decorator
RESOURCE_PREFIX_FORMAT = "yb-{}"
IGW_CIDR = "0.0.0.0/0"
SUBNET_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT
IGW_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-igw"
ROUTE_TABLE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-rt"
SG_YUGABYTE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-sg"
PEER_CONN_FORMAT = "yb-peer-conn-{}-to-{}"
class AwsBootstrapRegion():
def __init__(self, region, metadata, region_cidrs):
self.region = region
self.metadata = metadata
self.region_cidrs = region_cidrs
self.client = get_client(self.region)
# Outputs.
self.vpc = None
self.igw = None
self.peer_vpc = None
self.sg_yugabyte = None
self.subnets = []
self.route_table = None
def bootstrap(self):
self.setup_vpc()
self.setup_igw()
self.setup_subnets()
self.setup_yugabyte_sg()
self.setup_rt()
def setup_vpc(self):
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(self.region)
vpc = create_vpc(client=self.client, tag_name=vpc_region_tag,
cidr=get_region_cidr(self.metadata, self.region))
vpc.wait_until_available()
self.vpc = vpc
def setup_igw(self):
igw_tag = IGW_PREFIX_FORMAT.format(self.region)
igw = create_igw(client=self.client, tag_name=igw_tag, vpc=self.vpc)
self.igw = igw
def setup_subnets(self):
zones = get_zones(self.region)
subnets = {}
for zone_index, zone in enumerate(sorted(zones.keys())):
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
zone_cidr = self.metadata["zone_cidr_format"].format(
get_cidr_prefix(self.metadata, self.region), (zone_index + 1) * 16)
subnet = create_subnet(self.client, self.vpc, zone, zone_cidr, vpc_zone_tag)
subnets[zone] = subnet
self.subnets = subnets
def setup_yugabyte_sg(self):
sg_group_name = get_yb_sg_name(self.region)
rules = list(self.metadata["sg_rules"])
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
sg = create_security_group(client=self.client, group_name=sg_group_name,
description="YugaByte SG", vpc=self.vpc,
rules=rules)
self.sg_yugabyte = sg
def setup_rt(self):
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(self.region)
route_table = create_route_table(client=self.client, tag_name=route_table_tag,
vpc=self.vpc)
# TODO: handle private/public case at somepoint, also NAT.
add_route_to_rt(route_table, IGW_CIDR, "GatewayId", self.igw.id)
current_associated_subnet_ids = [assoc.subnet_id for assoc in route_table.associations]
missing_ids = [subnet.id for subnet in self.subnets.values()
if subnet.id not in current_associated_subnet_ids]
for subnet_id in missing_ids:
route_table.associate_with_subnet(SubnetId=subnet_id)
self.route_table = route_table
def add_sg_ingress_to_sg(self, incoming_sg, target_sg):
current_sg_ids = set([pair["GroupId"]
for perm in target_sg.ip_permissions
for pair in perm["UserIdGroupPairs"]])
if incoming_sg.id not in current_sg_ids:
target_sg.authorize_ingress(
IpPermissions=[{
"IpProtocol": "-1",
"UserIdGroupPairs": [{"GroupId": incoming_sg.id}]}])
def add_route_to_rt(route_table, cidr, target_type, target_id):
kwargs = {target_type: target_id}
route = get_route_by_cidr(route_table, cidr)
if route is None:
route_table.create_route(DestinationCidrBlock=cidr, **kwargs)
elif getattr(route, dumb_camel_to_snake(target_type)) != target_id:
route.replace(**kwargs)
def add_cidr_to_rules(rules, cidr):
rule_block = {
"ip_protocol": "-1",
"from_port": 0,
"to_port": 65535,
"cidr_ip": cidr
}
rules.append(rule_block)
def get_cidr_prefix(metadata, region):
return metadata["regions"][region]["cidr_prefix"]
def get_region_cidr(metadata, region):
return metadata["region_cidr_format"].format(get_cidr_prefix(metadata, region))
def get_region_cidrs(metadata):
return dict([(r, get_region_cidr(metadata, r)) for r in metadata["regions"].keys()])
def dumb_camel_to_snake(s):
return re.sub("([A-Z])", "_\\1", s).lower()[1:]
class YbVpcComponents:
def __init__(self):
self.region = None
self.vpc = None
self.sg_yugabyte = None
self.customer_sgs = None
self.route_table = None
self.subnets = None
@staticmethod
def from_pieces(region, vpc_id, sg_id, rt_id, az_to_subnet_ids):
c = YbVpcComponents()
c.region = region
client = get_client(region)
c.vpc = client.Vpc(vpc_id)
c.sg_yugabyte = client.SecurityGroup(sg_id)
c.route_table = client.RouteTable(rt_id)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
@staticmethod
def from_user_json(region, per_region_meta):
c = YbVpcComponents()
c.region = region
client = get_client(region)
vpc_id = per_region_meta.get("vpcId")
if vpc_id:
c.vpc = client.Vpc(vpc_id)
else:
c.vpc = get_vpc(client, RESOURCE_PREFIX_FORMAT.format(region))
sg_ids = per_region_meta.get("customSecurityGroupId")
if sg_ids:
c.customer_sgs = [client.SecurityGroup(sg_id) for sg_id in sg_ids.split(",")]
else:
c.sg_yugabyte = get_security_group(
client, SG_YUGABYTE_PREFIX_FORMAT.format(region), c.vpc)
if not vpc_id:
c.route_table = get_route_table(client, ROUTE_TABLE_PREFIX_FORMAT.format(region))
az_to_subnet_ids = {}
if vpc_id:
az_to_subnet_ids = per_region_meta.get("azToSubnetIds", {})
else:
az_to_subnet_ids = get_zones(region)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
def as_json(self):
sgs = self.customer_sgs if self.customer_sgs else [self.sg_yugabyte]
return vpc_components_as_json(self.vpc, sgs, self.subnets)
class AwsBootstrapClient():
def __init__(self, metadata, host_vpc_id, host_vpc_region):
self.metadata = metadata
self.host_vpc_id = host_vpc_id
self.host_vpc_region = host_vpc_region
self.region_cidrs = get_region_cidrs(self.metadata)
# Validation.
self._validate_cidr_overlap()
def _validate_cidr_overlap(self):
region_networks = [IPNetwork(cidr) for cidr in self.region_cidrs.values()]
all_networks = region_networks
for i in xrange(len(all_networks)):
for j in xrange(i + 1, len(all_networks)):
left = all_networks[i]
right = all_networks[j]
if left.overlaps(right):
raise YBOpsRuntimeError(
"IP blocks in the CIDRs overlap: {} - {}".format(left, right))
def bootstrap_individual_region(self, region):
if region is None:
raise YBOpsRuntimeError("Must provider region to bootstrap!")
client = AwsBootstrapRegion(region, self.metadata, self.region_cidrs)
client.bootstrap()
return YbVpcComponents.from_pieces(
region, client.vpc.id, client.sg_yugabyte.id, client.route_table.id,
{az: s.id for az, s in client.subnets.iteritems()})
def cross_link_regions(self, components):
# Do the cross linking, adding CIDR entries to RTs and SGs, as well as doing vpc peerings.
region_and_vpc_tuples = [(r, c.vpc) for r, c in components.iteritems()]
host_vpc = None
if self.host_vpc_id and self.host_vpc_region:
host_vpc = get_client(self.host_vpc_region).Vpc(self.host_vpc_id)
region_and_vpc_tuples.append((self.host_vpc_region, host_vpc))
# Setup VPC peerings.
for i in xrange(len(region_and_vpc_tuples) - 1):
i_region, i_vpc = region_and_vpc_tuples[i]
for j in xrange(i + 1, len(region_and_vpc_tuples)):
j_region, j_vpc = region_and_vpc_tuples[j]
peering = create_vpc_peering(
# i is the host, j is the target.
client=get_client(i_region), vpc=j_vpc, host_vpc=i_vpc, target_region=j_region)
if len(peering) != 1:
raise YBOpsRuntimeError(
"Expecting one peering connection, got {}".format(peer_conn))
peering = peering[0]
# Add route i -> j.
add_route_to_rt(components[i_region].route_table, j_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
# Add route j -> i.
# Note: If we have a host_vpc, it is the last in the list, and it doesn't have an
# associated component, so we special case it.
if host_vpc is None or j != len(region_and_vpc_tuples) - 1:
add_route_to_rt(components[j_region].route_table, i_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
else:
# TODO: should ideally filter to the RT that is relevant, but we do not really
# know the subnets which matter from this host_vpc...
for rt in list(host_vpc.route_tables.all()):
add_route_to_rt(rt, i_vpc.cidr_block, "VpcPeeringConnectionId", peering.id)
# Setup SG entries for all the CIDRs.
all_cidrs = [vpc.cidr_block for r, vpc in region_and_vpc_tuples]
rules = []
# Add CIDRs from all the VPCs, including the host.
for cidr in all_cidrs:
add_cidr_to_rules(rules, cidr)
# Add CIDRs from any custom networks we have internally, primarily the OpenVPN in AWS.
# TODO(bogdan): custom CIDR entries
for cidr in self.metadata.get("custom_network_whitelisted_ip_cidrs", []):
add_cidr_to_rules(rules, cidr)
for region, component in components.iteritems():
sg = component.sg_yugabyte
ip_perms = sg.ip_permissions
for rule in rules:
found = False
for perm in ip_perms:
if perm.get("FromPort") == rule["from_port"] and \
perm.get("ToPort") == rule["to_port"] and \
perm.get("IpProtocol") == rule["ip_protocol"] and \
len([True for r in perm.get("IpRanges", [])
if r.get("CidrIp") == rule["cidr_ip"]]) > 0:
# This rule matches this permission, so no need to add it.
found = True
break
if not found:
try:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
if "InvalidPermission.Duplicate" not in str(e):
raise YBOpsRuntimeError(
"Authorize Security Group Ingress failed: {}".format(repr(e)))
def aws_exception_handler(e):
"""AWS specific exception handler.
Args:
e: the exception that was raised by the underlying API call that just failed.
Returns:
True if this exception can be retried, False otherwise.
"""
return "Request limit exceeded" in str(e)
def aws_request_limit_retry(fn):
"""A decorator for retrying an AWS operation after exceeding request limit. Does retries with
randomized jitter. Ideally, we should reconfigure boto3 to do the right kind of retries
internally, but as of May 2017 there does not seem to be a good way of doing that.
Initially not adding this decorator to all functions in this module. This should be done
gradually as we encounter rate limiting errors.
Relevant boto issues:
https://github.com/boto/boto3/issues/770
https://github.com/boto/botocore/issues/882
"""
return request_retry_decorator(fn, aws_exception_handler)
def get_client(region):
"""Method to get boto3 ec2 resource for given region
Args:
region (str): Region name
Returns:
boto3 resource
"""
return boto3.resource("ec2", region_name=region)
def get_clients(regions):
"""Method to get boto3 clients for given region or all the regions if none specified.
Args:
regions (list): List of regions to return clients for
Returns:
clients(obj): Map of region to boto3 resource
"""
return {region: get_client(region) for region in regions}
def get_available_regions(metadata):
return metadata["regions"].keys()
def get_spot_pricing(region, zone, instance_type):
client = boto3.client('ec2', region_name=region)
prod_desc = ['Linux/UNIX (Amazon VPC)']
spot_price = client.describe_spot_price_history(InstanceTypes=[instance_type],
MaxResults=1,
ProductDescriptions=prod_desc,
AvailabilityZone=zone)
if len(spot_price['SpotPriceHistory']) == 0:
raise YBOpsRuntimeError('Invalid instance type {} for zone {}'.format(instance_type, zone))
return spot_price['SpotPriceHistory'][0]['SpotPrice']
def get_zones(region, dest_vpc_id=None):
"""Method to fetch zones for given region or all the regions if none specified.
Args:
region (str): Name of region to get zones of.
Returns:
zones (obj): Map of zone -> subnet
"""
result = {}
filters = get_filters("state", "available")
client = boto3.client("ec2", region_name=region)
zones = client.describe_availability_zones(Filters=filters).get("AvailabilityZones", [])
new_client = get_client(region)
zone_mapping = {}
for z in zones:
zone_name = z["ZoneName"]
zone_tag = SUBNET_PREFIX_FORMAT.format(zone_name)
region_vpc = None
if dest_vpc_id:
region_vpc = new_client.Vpc(dest_vpc_id)
else:
region_vpc = get_vpc(new_client, RESOURCE_PREFIX_FORMAT.format(region))
subnet = next(iter(fetch_subnets(region_vpc, zone_tag)), None)
if subnet is None:
subnet = next(iter([s for s in region_vpc.subnets.all()
if s.availability_zone == zone_name]), None)
zone_mapping[zone_name] = subnet.id if subnet is not None else None
return zone_mapping
def get_vpc(client, tag_name, **kwargs):
"""Method to fetch vpc based on the tag_name.
Args:
client (boto client): Boto Client for the region to query.
tag_name (str): VPC tag name.
Returns:
VPC obj: VPC object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.vpcs.filter(Filters=filters)), None)
def fetch_subnets(vpc, tag_name):
"""Method to fetch subnets based on the tag_name.
Args:
vpc (vpc obj): VPC object to search for subnets
tag_name (str): subnet tag name.
Returns:
subnets (list): list of aws subnets for given vpc.
"""
filters = get_tag_filter(tag_name)
return vpc.subnets.filter(Filters=filters)
def create_subnet(client, vpc, zone, cidr, tag_name):
"""Method to create subnet based on cidr and tag name.
Args:
client (boto client): Region specific boto client
vpc (VPC object): VPC object to create subnet.
zone (str): Availability zone name
cidr (str): CIDR string
tag_name (str): Tag name for subnet.
Returns:
subnet: Newly created subnet object.
"""
subnet = next((s for s in fetch_subnets(vpc, tag_name) if s.cidr_block == cidr), None)
if subnet is None:
subnet = vpc.create_subnet(CidrBlock=cidr, AvailabilityZone=zone)
# TODO: no direct waiter on subnet just yet, it seems...
client.meta.client.get_waiter("subnet_available").wait(SubnetIds=[subnet.id])
tag_resource_name(client, subnet.id, tag_name)
return subnet
def get_security_group(client, group_name, vpc, **kwargs):
"""Method to fetch security group based on the group_name.
Args:
client (boto client): Region specific boto client
group_name (str): Security Group name
vpc (VPC object): The VPC in which to check for the SG
Returns:
SecurityGroup: Matching security group.
"""
filters = get_filters("group-name", group_name) + get_filters("vpc-id", vpc.id)
return next(iter(client.security_groups.filter(Filters=filters)), None)
@get_or_create(get_security_group)
def create_security_group(client, group_name, vpc, description, rules):
"""Method to create a security group based on the group_name and authorize ingress with
the rules provided.
Args:
client (boto client): Region specific boto client
group_name (str): security group name
description (str): description of the security group
vpc (VPC Object): VPC object to create the security group
rules (dict): List of rules to add to security group.
"""
sg = vpc.create_security_group(GroupName=group_name, Description=description)
try:
for rule in rules:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
logging.error("Authorize Security Group Ingress failed: {}".format(e))
sg.delete()
raise YBOpsRuntimeError("Security Group creation failed.")
return sg
def get_igw(client, tag_name, **kwargs):
"""Method to fetch Internet Gateway based on tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): Internet Gateway tag name.
Returns:
internet_gateway: internet gateway object.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.internet_gateways.filter(Filters=filters)), None)
@get_or_create(get_igw)
def create_igw(client, tag_name, vpc):
"""Method to create Internet Gateway based on tag_name in given VPC. If the gateway
already exists, it would return that object. If the object doesn't have a tag, we
would tag it accordingly.
Args:
client (boto client): Region specific boto client
tag_name (str): Tag name for internet gateway.
vpc (VPC object): VPC object to create Internet Gateway
Returns:
internet gateway: newly internet gateway object.
"""
# Query to make sure the region doesn't have any IGW already attached.
existing_igw = next(iter(vpc.internet_gateways.all()), None)
if existing_igw is not None:
# If we have existing igw for the region, lets just tag it with yb-XX-igw
tag_resource_name(client, existing_igw.id, tag_name)
return existing_igw
# If we don't have a internet gateway, lets create one and attach it to vpc
igw = client.create_internet_gateway()
tag_resource_name(client, igw.id, tag_name)
vpc.attach_internet_gateway(InternetGatewayId=igw.id)
return igw
def get_route_table(client, tag_name, **kwargs):
"""Method to fetch route table based on tag_name
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name to search for.
Returns:
RouteTable (obj): Matching route table object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.route_tables.filter(Filters=filters)), None)
@get_or_create(get_route_table)
def create_route_table(client, tag_name, vpc):
"""Method to create route table based on tag_name in given VPC. It will first
query for the tag name to see if the route table already exists or if one is already
attached to the VPC, if so it will return that route table.
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name
vpc (vpc object): VPC object to create the route table against
Returns:
RouteTable (obj): newly created RouteTable object.
"""
# Check to see if there is a route table attached to VPC, if so, we can just tag it
existing_route_table = next(iter(vpc.route_tables.all()), None)
if existing_route_table is not None:
tag_resource_name(client, existing_route_table.id, tag_name)
return existing_route_table
# If no route table exists, we can create one and tag it.
route_table = vpc.create_route_table()
tag_resource_name(client, route_table.id, tag_name)
return route_table
@get_and_cleanup(get_security_group)
def cleanup_security_group(sg, **kwargs):
"""Method to cleanup security group for the matching group_name.
Args:
sg: Instance of security group matching the group_name.
"""
sg.delete()
@get_and_cleanup(get_igw)
def cleanup_igw(igw, **kwargs):
"""Method to cleanup Internet Gateway matching the tag name. And also remove any vpc
that is attached to the Internet Gateway.
Args:
igw: Instance of Internet Gateway matching tag_name.
"""
for vpc in igw.attachments:
igw.detach_from_vpc(VpcId=vpc['VpcId'])
igw.delete()
@get_and_cleanup(get_route_table)
def cleanup_route_table(rt, **kwargs):
"""Method to cleanup the Route Table matching the tag name.
Args:
rt: Instance of Route Table matching tag_name.
"""
rt.delete()
def get_route_by_cidr(route_table, cidr):
"""Method to check if given CIDR already attached to route table.
Args:
RouteTable (obj): Route Table object.
cidr (str): CIDR string to check in route table.
Returns:
Route: the route for this CIDR or None if not found
"""
return dict((r.destination_cidr_block, r) for r in route_table.routes).get(cidr)
# MASKED: create_vpc function (lines 589-602)
def set_yb_sg_and_fetch_vpc(metadata, region, dest_vpc_id):
"""Method to bootstrap vpc and security group, and enable vpc peering
with the host_instance vpc.
Args:
metadata (obj): Cloud metadata object with cidr prefix and other metadata.
region (str): Region name to create the vpc in.
dest_vpc_id (str): Id of the VPC that yugabyte machines will reside in.
Returns:
vpc_info (json): return vpc, subnet and security group as json.
"""
client = get_client(region)
dest_vpc = client.Vpc(dest_vpc_id)
subnets = {subnet.availability_zone: subnet for subnet in dest_vpc.subnets.all()}
sg_group_name = get_yb_sg_name(region)
rules = metadata["sg_rules"]
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
add_cidr_to_rules(rules, dest_vpc.cidr_block)
sgs = [create_security_group(client=client, group_name=sg_group_name, vpc=dest_vpc,
description="YugaByte SG", rules=rules)]
return vpc_components_as_json(dest_vpc, sgs, subnets)
def query_vpc(region):
"""Method to query VPC against given region and respective subnets.
Args:
region (str): Region name to query the VPC.
Returns:
vpc and subnet info (obj): Object with region and zone subnet id.
"""
per_vpc_info = {}
# Fetch all available AZs, as we want to group subnets by AZ.
raw_client = boto3.client("ec2", region_name=region)
zones = [z["ZoneName"]
for z in raw_client.describe_availability_zones(
Filters=get_filters("state", "available")).get("AvailabilityZones", [])]
# Default to empty lists, in case some zones do not have subnets, so we can use this as a query
# for all available AZs in this region.
subnets_by_zone = {z: [] for z in zones}
# Fetch SGs and group them by VPC ID.
client = get_client(region)
per_vpc_sgs = {}
sgs = client.security_groups.all()
for sg in sgs:
sg_list = per_vpc_sgs.setdefault(sg.vpc_id, [])
sg_list.append({
"sg_id": sg.group_id,
# Note: Name tag is not mandatory or always present but group_name is!
"sg_name": sg.group_name
})
# Fetch all available VPCs so we can group by VPC ID.
region_vpcs = client.vpcs.all()
for vpc in region_vpcs:
# Filter for available subnets and group by AZ.
subnets = vpc.subnets.filter(Filters=get_filters("state", "available"))
for s in subnets:
subnets_for_this_az = subnets_by_zone.setdefault(s.availability_zone, [])
subnets_for_this_az.append({
"subnet_id": s.subnet_id,
"name": _get_name_from_tags(s.tags),
"public": s.map_public_ip_on_launch
})
vpc_info = {
"subnets_by_zone": subnets_by_zone,
# In case we somehow did not find any SGs, default to empty list.
"security_groups": per_vpc_sgs.get(vpc.id, [])
}
per_vpc_info[vpc.id] = vpc_info
region_json = {
"per_vpc_info": per_vpc_info
}
return region_json
def _get_name_from_tags(tags):
for t in tags if tags else []:
if t.get("Key") == "Name":
return t.get("Value", None)
return None
def vpc_components_as_json(vpc, sgs, subnets):
"""Method takes VPC, Security Group and Subnets and returns a json data format with ids.
Args:
vpc (VPC Object): Region specific VPC object
sgs (List of Security Group Object): Region specific Security Group object
subnets (subnet object map): Map of Subnet objects keyed of zone.
Retuns:
json (str): A Json string for yugaware to consume with necessary ids.
"""
result = {}
result["vpc_id"] = vpc.id
result["security_group"] = [{"id": sg.group_id, "name": sg.group_name} for sg in sgs]
result["zones"] = {}
for zone, subnet in subnets.iteritems():
result["zones"][zone] = subnet.id
return result
def delete_vpc(region, host_vpc_id=None, host_vpc_region=None):
"""Method to delete VPC, Subnet, Internet Gateway, Route Table and VPC peering.
Args:
region (str): Region name to query the VPC.
"""
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(region)
client = get_client(region)
region_vpc = get_vpc(client, vpc_region_tag)
if region_vpc is None:
raise YBOpsRuntimeError("VPC not setup.")
zones = get_zones(region)
# Remove the yugabyte SG first.
sg_group_name = get_yb_sg_name(region)
cleanup_security_group(client=client, group_name=sg_group_name, vpc=region_vpc)
# Cleanup the subnets.
for zone, subnet_id in zones.iteritems():
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
if subnet_id is not None:
client.Subnet(subnet_id).delete()
# Remove the IGW.
igw_tag = IGW_PREFIX_FORMAT.format(region)
cleanup_igw(client=client, tag_name=igw_tag)
# Remove this region's CIDR from the RT of the host vpc.
host_vpc = None
if host_vpc_id is not None and host_vpc_region is not None:
host_vpc = get_client(host_vpc_region).Vpc(host_vpc_id)
for rt in list(host_vpc.route_tables.all()):
delete_route(rt, region_vpc.cidr_block)
# Remove all of the VPC peerings of this vpc.
cleanup_vpc_peering(client=client, vpc=region_vpc, host_vpc=None)
# Delete the VPC itself.
region_vpc.delete()
# Finally cleanup the Routing Table.
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(region)
cleanup_route_table(client=client, tag_name=route_table_tag)
return {"success": "VPC deleted."}
def tag_resource_name(client, resource_id, tag_name):
"""Method to create name tag for given resource.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_name (str): Tag name.
"""
tag_resource(client, resource_id, "Name", tag_name)
def tag_resource(client, resource_id, tag_key, tag_value):
"""Method to attach arbitrary key-value tags to resources.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_key: Tag key
tag_value: Tag value
"""
tags = [{"Key": tag_key, "Value": tag_value}]
client.create_tags(Resources=[resource_id], Tags=tags)
def get_filters(key, value):
return [{'Name': key, 'Values': [value]}]
def get_tag_filter(tag_name):
return get_filters("tag:Name", tag_name)
def get_vpc_peerings(vpc, host_vpc, **kwargs):
"""Method to fetch all the VPC peerings against given VPC. If host_vpc is provided
it will check if there is a peering against that vpc.
Args:
vpc(VPC object): VPC Object to search for peerings
host_vpc (Host VPC object): Can be Null as well, to check if specific host_vpc
peering is done.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
output = []
# Search through accepted vpc peerings.
vpc_peerings = vpc.accepted_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.requester_vpc == host_vpc)])
# Also search through requested vpc peerings.
vpc_peerings = vpc.requested_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.accepter_vpc == host_vpc)])
return output
@get_and_cleanup(get_vpc_peerings)
def cleanup_vpc_peering(vpc_peerings, **kwargs):
for vpc_peering in vpc_peerings:
vpc_peering.delete()
@get_or_create(get_vpc_peerings)
def create_vpc_peering(client, vpc, host_vpc, target_region):
"""Method would create a vpc peering between the newly created VPC and caller's VPC
Also makes sure, if they aren't the same, then there is no need for vpc peering.
Args:
client (boto client): Region specific boto client
vpc (VPC object): Newly created VPC object
host_vpc(Host VPC object): Host VPC to peer with.
target_region (region name): Region name in which peering is being created.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
try:
peer_conn = client.create_vpc_peering_connection(
VpcId=host_vpc.id, PeerVpcId=vpc.id, PeerRegion=target_region)
peer_conn.wait_until_exists()
# Need to accept from the other end.
remote_peer_conn = get_client(target_region).VpcPeeringConnection(peer_conn.id)
remote_peer_conn.wait_until_exists()
remote_peer_conn.accept()
return [peer_conn]
except Exception as e:
logging.error(e)
raise YBOpsRuntimeError("Unable to create VPC peering.")
def get_device_names(instance_type, num_volumes):
device_names = []
for i in xrange(num_volumes):
device_name_format = "nvme{}n1" if is_nvme(instance_type) else "xvd{}"
index = "{}".format(i if is_nvme(instance_type) else chr(ord('b') + i))
device_names.append(device_name_format.format(index))
return device_names
def is_next_gen(instance_type):
return instance_type.startswith(("c3", "c4", "c5", "m4", "r4"))
def is_nvme(instance_type):
return instance_type.startswith("i3")
def has_ephemerals(instance_type):
return not is_nvme(instance_type) and not is_next_gen(instance_type)
def create_instance(args):
client = get_client(args.region)
vars = {
"ImageId": args.machine_image,
"KeyName": args.key_pair_name,
"MinCount": 1,
"MaxCount": 1,
"InstanceType": args.instance_type,
}
# Network setup.
# Lets assume they have provided security group id comma delimited.
sg_ids = args.security_group_id.split(",") if args.security_group_id else None
if sg_ids is None:
# Figure out which VPC this instance will be brought up in and search for the SG in there.
# This is for a bit of backwards compatibility with the previous mode of potentially using
# YW's VPC, in which we would still deploy a SG with the same name as in our normal VPCs.
# This means there could be customers that had that deployment mode from the start AND have
# a SG we created back then, with the internal naming convention we use, but NOT in the YB
# VPC (which they likely will not even have).
vpc = get_vpc_for_subnet(client, args.cloud_subnet)
sg_name = get_yb_sg_name(args.region)
sg = get_security_group(client, sg_name, vpc)
sg_ids = [sg.id]
vars["NetworkInterfaces"] = [{
"DeviceIndex": 0,
"AssociatePublicIpAddress": args.assign_public_ip,
"SubnetId": args.cloud_subnet,
"Groups": sg_ids
}]
# Volume setup.
volumes = []
ebs = {
"DeleteOnTermination": True,
# TODO: constant
"VolumeSize": 40,
"VolumeType": "gp2"
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
volumes.append({
"DeviceName": "/dev/sda1",
"Ebs": ebs
})
device_names = get_device_names(args.instance_type, args.num_volumes)
# TODO: Clean up semantics on nvme vs "next-gen" vs ephemerals, as this is currently whack...
for i, device_name in enumerate(device_names):
volume = {}
if has_ephemerals(args.instance_type):
volume = {
"DeviceName": "/dev/{}".format(device_name),
"VirtualName": "ephemeral{}".format(i)
}
elif is_next_gen(args.instance_type):
ebs = {
"DeleteOnTermination": True,
"VolumeType": args.volume_type,
# TODO: make this int.
"VolumeSize": args.volume_size
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
if args.volume_type == "io1":
# TODO: make this int.
ebs["Iops"] = args.disk_iops
volume = {
"DeviceName": "/dev/{}".format(device_name),
"Ebs": ebs
}
volumes.append(volume)
vars["BlockDeviceMappings"] = volumes
# Tag setup.
def __create_tag(k, v):
return {"Key": k, "Value": v}
# Add Name all the time.
instance_tags = [
__create_tag("Name", args.search_pattern),
__create_tag("launched-by", os.environ.get("USER", "unknown")),
__create_tag("yb-server-type", args.type)
]
custom_tags = args.instance_tags if args.instance_tags is not None else '{}'
for k, v in json.loads(custom_tags).iteritems():
instance_tags.append(__create_tag(k, v))
vars["TagSpecifications"] = [{
"ResourceType": "instance",
"Tags": instance_tags
}]
# TODO: user_data > templates/cloud_init.yml.j2, still needed?
instance_ids = client.create_instances(**vars)
if len(instance_ids) != 1:
logging.error("Invalid create_instances response: {}".format(instance_ids))
raise YBOpsRuntimeError("Expected to create 1 instance, got {}".format(
len(instance_ids)))
instance = instance_ids[0]
instance.wait_until_running()
def modify_tags(region, instance_id, tags_to_set_str, tags_to_remove_str):
instance = get_client(region).Instance(instance_id)
# Remove all the tags we were asked to, except the internal ones.
tags_to_remove = set(tags_to_remove_str.split(",") if tags_to_remove_str else [])
# TODO: combine these with the above instance creation function.
internal_tags = set(["Name", "launched-by", "yb-server-type"])
if tags_to_remove & internal_tags:
raise YBOpsRuntimeError(
"Was asked to remove tags: {}, which contain internal tags: {}".format(
tags_to_remove, internal_tags
))
# Note: passing an empty list to Tags will remove all tags from the instance.
if tags_to_remove:
instance.delete_tags(Tags=[{"Key": k} for k in tags_to_remove])
# Set all the tags provided.
tags_to_set = json.loads(tags_to_set_str if tags_to_set_str else "{}")
customer_tags = []
for k, v in tags_to_set.iteritems():
customer_tags.append({"Key": k, "Value": v})
instance.create_tags(Tags=customer_tags)
def delete_route(rt, cidr):
route = get_route_by_cidr(rt, cidr)
if route is not None:
route.delete()
def get_vpc_for_subnet(client, subnet):
return client.Subnet(subnet).vpc
def get_yb_sg_name(region):
return SG_YUGABYTE_PREFIX_FORMAT.format(region)
def list_dns_record_set(hosted_zone_id):
return boto3.client('route53').get_hosted_zone(Id=hosted_zone_id)
def create_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'CREATE')
def edit_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'UPSERT')
def delete_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'DELETE')
def _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, action):
client = boto3.client('route53')
records = []
for ip in ip_list:
records.append({'Value': ip})
result = list_dns_record_set(hosted_zone_id)
hosted_zone_name = result['HostedZone']['Name']
change_batch = {
'Comment': "YugaWare driven Record Set",
'Changes': [{
'Action': action,
'ResourceRecordSet': {
'Name': "{}.{}".format(domain_name_prefix, hosted_zone_name),
'Type': 'A',
'TTL': 5,
'ResourceRecords': records
}
}]
}
result = client.change_resource_record_sets(
HostedZoneId=hosted_zone_id,
ChangeBatch=change_batch)
client.get_waiter('resource_record_sets_changed').wait(
Id=result['ChangeInfo']['Id'],
WaiterConfig={
'Delay': 10,
'MaxAttempts': 60
})
|
@get_or_create(get_vpc)
def create_vpc(client, tag_name, cidr):
"""Method to create vpc based on the cidr and tag with tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): VPC tag name
cidr (str): CIDR string.
Returns:
VPC(Object): Newly created VPC object.
"""
vpc = client.create_vpc(CidrBlock=cidr)
vpc.modify_attribute(EnableDnsHostnames={'Value': True})
tag_resource_name(client, vpc.id, tag_name)
return vpc
| 589 | 602 |
#!/usr/bin/env python
#
# Copyright 2019 YugaByte, Inc. and Contributors
#
# Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt
import boto3
import json
import logging
import os
import re
from ipaddr import IPNetwork
from ybops.utils import get_or_create, get_and_cleanup
from ybops.common.exceptions import YBOpsRuntimeError
from ybops.cloud.common.utils import request_retry_decorator
RESOURCE_PREFIX_FORMAT = "yb-{}"
IGW_CIDR = "0.0.0.0/0"
SUBNET_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT
IGW_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-igw"
ROUTE_TABLE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-rt"
SG_YUGABYTE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-sg"
PEER_CONN_FORMAT = "yb-peer-conn-{}-to-{}"
class AwsBootstrapRegion():
def __init__(self, region, metadata, region_cidrs):
self.region = region
self.metadata = metadata
self.region_cidrs = region_cidrs
self.client = get_client(self.region)
# Outputs.
self.vpc = None
self.igw = None
self.peer_vpc = None
self.sg_yugabyte = None
self.subnets = []
self.route_table = None
def bootstrap(self):
self.setup_vpc()
self.setup_igw()
self.setup_subnets()
self.setup_yugabyte_sg()
self.setup_rt()
def setup_vpc(self):
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(self.region)
vpc = create_vpc(client=self.client, tag_name=vpc_region_tag,
cidr=get_region_cidr(self.metadata, self.region))
vpc.wait_until_available()
self.vpc = vpc
def setup_igw(self):
igw_tag = IGW_PREFIX_FORMAT.format(self.region)
igw = create_igw(client=self.client, tag_name=igw_tag, vpc=self.vpc)
self.igw = igw
def setup_subnets(self):
zones = get_zones(self.region)
subnets = {}
for zone_index, zone in enumerate(sorted(zones.keys())):
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
zone_cidr = self.metadata["zone_cidr_format"].format(
get_cidr_prefix(self.metadata, self.region), (zone_index + 1) * 16)
subnet = create_subnet(self.client, self.vpc, zone, zone_cidr, vpc_zone_tag)
subnets[zone] = subnet
self.subnets = subnets
def setup_yugabyte_sg(self):
sg_group_name = get_yb_sg_name(self.region)
rules = list(self.metadata["sg_rules"])
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
sg = create_security_group(client=self.client, group_name=sg_group_name,
description="YugaByte SG", vpc=self.vpc,
rules=rules)
self.sg_yugabyte = sg
def setup_rt(self):
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(self.region)
route_table = create_route_table(client=self.client, tag_name=route_table_tag,
vpc=self.vpc)
# TODO: handle private/public case at somepoint, also NAT.
add_route_to_rt(route_table, IGW_CIDR, "GatewayId", self.igw.id)
current_associated_subnet_ids = [assoc.subnet_id for assoc in route_table.associations]
missing_ids = [subnet.id for subnet in self.subnets.values()
if subnet.id not in current_associated_subnet_ids]
for subnet_id in missing_ids:
route_table.associate_with_subnet(SubnetId=subnet_id)
self.route_table = route_table
def add_sg_ingress_to_sg(self, incoming_sg, target_sg):
current_sg_ids = set([pair["GroupId"]
for perm in target_sg.ip_permissions
for pair in perm["UserIdGroupPairs"]])
if incoming_sg.id not in current_sg_ids:
target_sg.authorize_ingress(
IpPermissions=[{
"IpProtocol": "-1",
"UserIdGroupPairs": [{"GroupId": incoming_sg.id}]}])
def add_route_to_rt(route_table, cidr, target_type, target_id):
kwargs = {target_type: target_id}
route = get_route_by_cidr(route_table, cidr)
if route is None:
route_table.create_route(DestinationCidrBlock=cidr, **kwargs)
elif getattr(route, dumb_camel_to_snake(target_type)) != target_id:
route.replace(**kwargs)
def add_cidr_to_rules(rules, cidr):
rule_block = {
"ip_protocol": "-1",
"from_port": 0,
"to_port": 65535,
"cidr_ip": cidr
}
rules.append(rule_block)
def get_cidr_prefix(metadata, region):
return metadata["regions"][region]["cidr_prefix"]
def get_region_cidr(metadata, region):
return metadata["region_cidr_format"].format(get_cidr_prefix(metadata, region))
def get_region_cidrs(metadata):
return dict([(r, get_region_cidr(metadata, r)) for r in metadata["regions"].keys()])
def dumb_camel_to_snake(s):
return re.sub("([A-Z])", "_\\1", s).lower()[1:]
class YbVpcComponents:
def __init__(self):
self.region = None
self.vpc = None
self.sg_yugabyte = None
self.customer_sgs = None
self.route_table = None
self.subnets = None
@staticmethod
def from_pieces(region, vpc_id, sg_id, rt_id, az_to_subnet_ids):
c = YbVpcComponents()
c.region = region
client = get_client(region)
c.vpc = client.Vpc(vpc_id)
c.sg_yugabyte = client.SecurityGroup(sg_id)
c.route_table = client.RouteTable(rt_id)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
@staticmethod
def from_user_json(region, per_region_meta):
c = YbVpcComponents()
c.region = region
client = get_client(region)
vpc_id = per_region_meta.get("vpcId")
if vpc_id:
c.vpc = client.Vpc(vpc_id)
else:
c.vpc = get_vpc(client, RESOURCE_PREFIX_FORMAT.format(region))
sg_ids = per_region_meta.get("customSecurityGroupId")
if sg_ids:
c.customer_sgs = [client.SecurityGroup(sg_id) for sg_id in sg_ids.split(",")]
else:
c.sg_yugabyte = get_security_group(
client, SG_YUGABYTE_PREFIX_FORMAT.format(region), c.vpc)
if not vpc_id:
c.route_table = get_route_table(client, ROUTE_TABLE_PREFIX_FORMAT.format(region))
az_to_subnet_ids = {}
if vpc_id:
az_to_subnet_ids = per_region_meta.get("azToSubnetIds", {})
else:
az_to_subnet_ids = get_zones(region)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
def as_json(self):
sgs = self.customer_sgs if self.customer_sgs else [self.sg_yugabyte]
return vpc_components_as_json(self.vpc, sgs, self.subnets)
class AwsBootstrapClient():
def __init__(self, metadata, host_vpc_id, host_vpc_region):
self.metadata = metadata
self.host_vpc_id = host_vpc_id
self.host_vpc_region = host_vpc_region
self.region_cidrs = get_region_cidrs(self.metadata)
# Validation.
self._validate_cidr_overlap()
def _validate_cidr_overlap(self):
region_networks = [IPNetwork(cidr) for cidr in self.region_cidrs.values()]
all_networks = region_networks
for i in xrange(len(all_networks)):
for j in xrange(i + 1, len(all_networks)):
left = all_networks[i]
right = all_networks[j]
if left.overlaps(right):
raise YBOpsRuntimeError(
"IP blocks in the CIDRs overlap: {} - {}".format(left, right))
def bootstrap_individual_region(self, region):
if region is None:
raise YBOpsRuntimeError("Must provider region to bootstrap!")
client = AwsBootstrapRegion(region, self.metadata, self.region_cidrs)
client.bootstrap()
return YbVpcComponents.from_pieces(
region, client.vpc.id, client.sg_yugabyte.id, client.route_table.id,
{az: s.id for az, s in client.subnets.iteritems()})
def cross_link_regions(self, components):
# Do the cross linking, adding CIDR entries to RTs and SGs, as well as doing vpc peerings.
region_and_vpc_tuples = [(r, c.vpc) for r, c in components.iteritems()]
host_vpc = None
if self.host_vpc_id and self.host_vpc_region:
host_vpc = get_client(self.host_vpc_region).Vpc(self.host_vpc_id)
region_and_vpc_tuples.append((self.host_vpc_region, host_vpc))
# Setup VPC peerings.
for i in xrange(len(region_and_vpc_tuples) - 1):
i_region, i_vpc = region_and_vpc_tuples[i]
for j in xrange(i + 1, len(region_and_vpc_tuples)):
j_region, j_vpc = region_and_vpc_tuples[j]
peering = create_vpc_peering(
# i is the host, j is the target.
client=get_client(i_region), vpc=j_vpc, host_vpc=i_vpc, target_region=j_region)
if len(peering) != 1:
raise YBOpsRuntimeError(
"Expecting one peering connection, got {}".format(peer_conn))
peering = peering[0]
# Add route i -> j.
add_route_to_rt(components[i_region].route_table, j_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
# Add route j -> i.
# Note: If we have a host_vpc, it is the last in the list, and it doesn't have an
# associated component, so we special case it.
if host_vpc is None or j != len(region_and_vpc_tuples) - 1:
add_route_to_rt(components[j_region].route_table, i_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
else:
# TODO: should ideally filter to the RT that is relevant, but we do not really
# know the subnets which matter from this host_vpc...
for rt in list(host_vpc.route_tables.all()):
add_route_to_rt(rt, i_vpc.cidr_block, "VpcPeeringConnectionId", peering.id)
# Setup SG entries for all the CIDRs.
all_cidrs = [vpc.cidr_block for r, vpc in region_and_vpc_tuples]
rules = []
# Add CIDRs from all the VPCs, including the host.
for cidr in all_cidrs:
add_cidr_to_rules(rules, cidr)
# Add CIDRs from any custom networks we have internally, primarily the OpenVPN in AWS.
# TODO(bogdan): custom CIDR entries
for cidr in self.metadata.get("custom_network_whitelisted_ip_cidrs", []):
add_cidr_to_rules(rules, cidr)
for region, component in components.iteritems():
sg = component.sg_yugabyte
ip_perms = sg.ip_permissions
for rule in rules:
found = False
for perm in ip_perms:
if perm.get("FromPort") == rule["from_port"] and \
perm.get("ToPort") == rule["to_port"] and \
perm.get("IpProtocol") == rule["ip_protocol"] and \
len([True for r in perm.get("IpRanges", [])
if r.get("CidrIp") == rule["cidr_ip"]]) > 0:
# This rule matches this permission, so no need to add it.
found = True
break
if not found:
try:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
if "InvalidPermission.Duplicate" not in str(e):
raise YBOpsRuntimeError(
"Authorize Security Group Ingress failed: {}".format(repr(e)))
def aws_exception_handler(e):
"""AWS specific exception handler.
Args:
e: the exception that was raised by the underlying API call that just failed.
Returns:
True if this exception can be retried, False otherwise.
"""
return "Request limit exceeded" in str(e)
def aws_request_limit_retry(fn):
"""A decorator for retrying an AWS operation after exceeding request limit. Does retries with
randomized jitter. Ideally, we should reconfigure boto3 to do the right kind of retries
internally, but as of May 2017 there does not seem to be a good way of doing that.
Initially not adding this decorator to all functions in this module. This should be done
gradually as we encounter rate limiting errors.
Relevant boto issues:
https://github.com/boto/boto3/issues/770
https://github.com/boto/botocore/issues/882
"""
return request_retry_decorator(fn, aws_exception_handler)
def get_client(region):
"""Method to get boto3 ec2 resource for given region
Args:
region (str): Region name
Returns:
boto3 resource
"""
return boto3.resource("ec2", region_name=region)
def get_clients(regions):
"""Method to get boto3 clients for given region or all the regions if none specified.
Args:
regions (list): List of regions to return clients for
Returns:
clients(obj): Map of region to boto3 resource
"""
return {region: get_client(region) for region in regions}
def get_available_regions(metadata):
return metadata["regions"].keys()
def get_spot_pricing(region, zone, instance_type):
client = boto3.client('ec2', region_name=region)
prod_desc = ['Linux/UNIX (Amazon VPC)']
spot_price = client.describe_spot_price_history(InstanceTypes=[instance_type],
MaxResults=1,
ProductDescriptions=prod_desc,
AvailabilityZone=zone)
if len(spot_price['SpotPriceHistory']) == 0:
raise YBOpsRuntimeError('Invalid instance type {} for zone {}'.format(instance_type, zone))
return spot_price['SpotPriceHistory'][0]['SpotPrice']
def get_zones(region, dest_vpc_id=None):
"""Method to fetch zones for given region or all the regions if none specified.
Args:
region (str): Name of region to get zones of.
Returns:
zones (obj): Map of zone -> subnet
"""
result = {}
filters = get_filters("state", "available")
client = boto3.client("ec2", region_name=region)
zones = client.describe_availability_zones(Filters=filters).get("AvailabilityZones", [])
new_client = get_client(region)
zone_mapping = {}
for z in zones:
zone_name = z["ZoneName"]
zone_tag = SUBNET_PREFIX_FORMAT.format(zone_name)
region_vpc = None
if dest_vpc_id:
region_vpc = new_client.Vpc(dest_vpc_id)
else:
region_vpc = get_vpc(new_client, RESOURCE_PREFIX_FORMAT.format(region))
subnet = next(iter(fetch_subnets(region_vpc, zone_tag)), None)
if subnet is None:
subnet = next(iter([s for s in region_vpc.subnets.all()
if s.availability_zone == zone_name]), None)
zone_mapping[zone_name] = subnet.id if subnet is not None else None
return zone_mapping
def get_vpc(client, tag_name, **kwargs):
"""Method to fetch vpc based on the tag_name.
Args:
client (boto client): Boto Client for the region to query.
tag_name (str): VPC tag name.
Returns:
VPC obj: VPC object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.vpcs.filter(Filters=filters)), None)
def fetch_subnets(vpc, tag_name):
"""Method to fetch subnets based on the tag_name.
Args:
vpc (vpc obj): VPC object to search for subnets
tag_name (str): subnet tag name.
Returns:
subnets (list): list of aws subnets for given vpc.
"""
filters = get_tag_filter(tag_name)
return vpc.subnets.filter(Filters=filters)
def create_subnet(client, vpc, zone, cidr, tag_name):
"""Method to create subnet based on cidr and tag name.
Args:
client (boto client): Region specific boto client
vpc (VPC object): VPC object to create subnet.
zone (str): Availability zone name
cidr (str): CIDR string
tag_name (str): Tag name for subnet.
Returns:
subnet: Newly created subnet object.
"""
subnet = next((s for s in fetch_subnets(vpc, tag_name) if s.cidr_block == cidr), None)
if subnet is None:
subnet = vpc.create_subnet(CidrBlock=cidr, AvailabilityZone=zone)
# TODO: no direct waiter on subnet just yet, it seems...
client.meta.client.get_waiter("subnet_available").wait(SubnetIds=[subnet.id])
tag_resource_name(client, subnet.id, tag_name)
return subnet
def get_security_group(client, group_name, vpc, **kwargs):
"""Method to fetch security group based on the group_name.
Args:
client (boto client): Region specific boto client
group_name (str): Security Group name
vpc (VPC object): The VPC in which to check for the SG
Returns:
SecurityGroup: Matching security group.
"""
filters = get_filters("group-name", group_name) + get_filters("vpc-id", vpc.id)
return next(iter(client.security_groups.filter(Filters=filters)), None)
@get_or_create(get_security_group)
def create_security_group(client, group_name, vpc, description, rules):
"""Method to create a security group based on the group_name and authorize ingress with
the rules provided.
Args:
client (boto client): Region specific boto client
group_name (str): security group name
description (str): description of the security group
vpc (VPC Object): VPC object to create the security group
rules (dict): List of rules to add to security group.
"""
sg = vpc.create_security_group(GroupName=group_name, Description=description)
try:
for rule in rules:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
logging.error("Authorize Security Group Ingress failed: {}".format(e))
sg.delete()
raise YBOpsRuntimeError("Security Group creation failed.")
return sg
def get_igw(client, tag_name, **kwargs):
"""Method to fetch Internet Gateway based on tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): Internet Gateway tag name.
Returns:
internet_gateway: internet gateway object.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.internet_gateways.filter(Filters=filters)), None)
@get_or_create(get_igw)
def create_igw(client, tag_name, vpc):
"""Method to create Internet Gateway based on tag_name in given VPC. If the gateway
already exists, it would return that object. If the object doesn't have a tag, we
would tag it accordingly.
Args:
client (boto client): Region specific boto client
tag_name (str): Tag name for internet gateway.
vpc (VPC object): VPC object to create Internet Gateway
Returns:
internet gateway: newly internet gateway object.
"""
# Query to make sure the region doesn't have any IGW already attached.
existing_igw = next(iter(vpc.internet_gateways.all()), None)
if existing_igw is not None:
# If we have existing igw for the region, lets just tag it with yb-XX-igw
tag_resource_name(client, existing_igw.id, tag_name)
return existing_igw
# If we don't have a internet gateway, lets create one and attach it to vpc
igw = client.create_internet_gateway()
tag_resource_name(client, igw.id, tag_name)
vpc.attach_internet_gateway(InternetGatewayId=igw.id)
return igw
def get_route_table(client, tag_name, **kwargs):
"""Method to fetch route table based on tag_name
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name to search for.
Returns:
RouteTable (obj): Matching route table object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.route_tables.filter(Filters=filters)), None)
@get_or_create(get_route_table)
def create_route_table(client, tag_name, vpc):
"""Method to create route table based on tag_name in given VPC. It will first
query for the tag name to see if the route table already exists or if one is already
attached to the VPC, if so it will return that route table.
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name
vpc (vpc object): VPC object to create the route table against
Returns:
RouteTable (obj): newly created RouteTable object.
"""
# Check to see if there is a route table attached to VPC, if so, we can just tag it
existing_route_table = next(iter(vpc.route_tables.all()), None)
if existing_route_table is not None:
tag_resource_name(client, existing_route_table.id, tag_name)
return existing_route_table
# If no route table exists, we can create one and tag it.
route_table = vpc.create_route_table()
tag_resource_name(client, route_table.id, tag_name)
return route_table
@get_and_cleanup(get_security_group)
def cleanup_security_group(sg, **kwargs):
"""Method to cleanup security group for the matching group_name.
Args:
sg: Instance of security group matching the group_name.
"""
sg.delete()
@get_and_cleanup(get_igw)
def cleanup_igw(igw, **kwargs):
"""Method to cleanup Internet Gateway matching the tag name. And also remove any vpc
that is attached to the Internet Gateway.
Args:
igw: Instance of Internet Gateway matching tag_name.
"""
for vpc in igw.attachments:
igw.detach_from_vpc(VpcId=vpc['VpcId'])
igw.delete()
@get_and_cleanup(get_route_table)
def cleanup_route_table(rt, **kwargs):
"""Method to cleanup the Route Table matching the tag name.
Args:
rt: Instance of Route Table matching tag_name.
"""
rt.delete()
def get_route_by_cidr(route_table, cidr):
"""Method to check if given CIDR already attached to route table.
Args:
RouteTable (obj): Route Table object.
cidr (str): CIDR string to check in route table.
Returns:
Route: the route for this CIDR or None if not found
"""
return dict((r.destination_cidr_block, r) for r in route_table.routes).get(cidr)
@get_or_create(get_vpc)
def create_vpc(client, tag_name, cidr):
"""Method to create vpc based on the cidr and tag with tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): VPC tag name
cidr (str): CIDR string.
Returns:
VPC(Object): Newly created VPC object.
"""
vpc = client.create_vpc(CidrBlock=cidr)
vpc.modify_attribute(EnableDnsHostnames={'Value': True})
tag_resource_name(client, vpc.id, tag_name)
return vpc
def set_yb_sg_and_fetch_vpc(metadata, region, dest_vpc_id):
"""Method to bootstrap vpc and security group, and enable vpc peering
with the host_instance vpc.
Args:
metadata (obj): Cloud metadata object with cidr prefix and other metadata.
region (str): Region name to create the vpc in.
dest_vpc_id (str): Id of the VPC that yugabyte machines will reside in.
Returns:
vpc_info (json): return vpc, subnet and security group as json.
"""
client = get_client(region)
dest_vpc = client.Vpc(dest_vpc_id)
subnets = {subnet.availability_zone: subnet for subnet in dest_vpc.subnets.all()}
sg_group_name = get_yb_sg_name(region)
rules = metadata["sg_rules"]
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
add_cidr_to_rules(rules, dest_vpc.cidr_block)
sgs = [create_security_group(client=client, group_name=sg_group_name, vpc=dest_vpc,
description="YugaByte SG", rules=rules)]
return vpc_components_as_json(dest_vpc, sgs, subnets)
def query_vpc(region):
"""Method to query VPC against given region and respective subnets.
Args:
region (str): Region name to query the VPC.
Returns:
vpc and subnet info (obj): Object with region and zone subnet id.
"""
per_vpc_info = {}
# Fetch all available AZs, as we want to group subnets by AZ.
raw_client = boto3.client("ec2", region_name=region)
zones = [z["ZoneName"]
for z in raw_client.describe_availability_zones(
Filters=get_filters("state", "available")).get("AvailabilityZones", [])]
# Default to empty lists, in case some zones do not have subnets, so we can use this as a query
# for all available AZs in this region.
subnets_by_zone = {z: [] for z in zones}
# Fetch SGs and group them by VPC ID.
client = get_client(region)
per_vpc_sgs = {}
sgs = client.security_groups.all()
for sg in sgs:
sg_list = per_vpc_sgs.setdefault(sg.vpc_id, [])
sg_list.append({
"sg_id": sg.group_id,
# Note: Name tag is not mandatory or always present but group_name is!
"sg_name": sg.group_name
})
# Fetch all available VPCs so we can group by VPC ID.
region_vpcs = client.vpcs.all()
for vpc in region_vpcs:
# Filter for available subnets and group by AZ.
subnets = vpc.subnets.filter(Filters=get_filters("state", "available"))
for s in subnets:
subnets_for_this_az = subnets_by_zone.setdefault(s.availability_zone, [])
subnets_for_this_az.append({
"subnet_id": s.subnet_id,
"name": _get_name_from_tags(s.tags),
"public": s.map_public_ip_on_launch
})
vpc_info = {
"subnets_by_zone": subnets_by_zone,
# In case we somehow did not find any SGs, default to empty list.
"security_groups": per_vpc_sgs.get(vpc.id, [])
}
per_vpc_info[vpc.id] = vpc_info
region_json = {
"per_vpc_info": per_vpc_info
}
return region_json
def _get_name_from_tags(tags):
for t in tags if tags else []:
if t.get("Key") == "Name":
return t.get("Value", None)
return None
def vpc_components_as_json(vpc, sgs, subnets):
"""Method takes VPC, Security Group and Subnets and returns a json data format with ids.
Args:
vpc (VPC Object): Region specific VPC object
sgs (List of Security Group Object): Region specific Security Group object
subnets (subnet object map): Map of Subnet objects keyed of zone.
Retuns:
json (str): A Json string for yugaware to consume with necessary ids.
"""
result = {}
result["vpc_id"] = vpc.id
result["security_group"] = [{"id": sg.group_id, "name": sg.group_name} for sg in sgs]
result["zones"] = {}
for zone, subnet in subnets.iteritems():
result["zones"][zone] = subnet.id
return result
def delete_vpc(region, host_vpc_id=None, host_vpc_region=None):
"""Method to delete VPC, Subnet, Internet Gateway, Route Table and VPC peering.
Args:
region (str): Region name to query the VPC.
"""
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(region)
client = get_client(region)
region_vpc = get_vpc(client, vpc_region_tag)
if region_vpc is None:
raise YBOpsRuntimeError("VPC not setup.")
zones = get_zones(region)
# Remove the yugabyte SG first.
sg_group_name = get_yb_sg_name(region)
cleanup_security_group(client=client, group_name=sg_group_name, vpc=region_vpc)
# Cleanup the subnets.
for zone, subnet_id in zones.iteritems():
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
if subnet_id is not None:
client.Subnet(subnet_id).delete()
# Remove the IGW.
igw_tag = IGW_PREFIX_FORMAT.format(region)
cleanup_igw(client=client, tag_name=igw_tag)
# Remove this region's CIDR from the RT of the host vpc.
host_vpc = None
if host_vpc_id is not None and host_vpc_region is not None:
host_vpc = get_client(host_vpc_region).Vpc(host_vpc_id)
for rt in list(host_vpc.route_tables.all()):
delete_route(rt, region_vpc.cidr_block)
# Remove all of the VPC peerings of this vpc.
cleanup_vpc_peering(client=client, vpc=region_vpc, host_vpc=None)
# Delete the VPC itself.
region_vpc.delete()
# Finally cleanup the Routing Table.
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(region)
cleanup_route_table(client=client, tag_name=route_table_tag)
return {"success": "VPC deleted."}
def tag_resource_name(client, resource_id, tag_name):
"""Method to create name tag for given resource.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_name (str): Tag name.
"""
tag_resource(client, resource_id, "Name", tag_name)
def tag_resource(client, resource_id, tag_key, tag_value):
"""Method to attach arbitrary key-value tags to resources.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_key: Tag key
tag_value: Tag value
"""
tags = [{"Key": tag_key, "Value": tag_value}]
client.create_tags(Resources=[resource_id], Tags=tags)
def get_filters(key, value):
return [{'Name': key, 'Values': [value]}]
def get_tag_filter(tag_name):
return get_filters("tag:Name", tag_name)
def get_vpc_peerings(vpc, host_vpc, **kwargs):
"""Method to fetch all the VPC peerings against given VPC. If host_vpc is provided
it will check if there is a peering against that vpc.
Args:
vpc(VPC object): VPC Object to search for peerings
host_vpc (Host VPC object): Can be Null as well, to check if specific host_vpc
peering is done.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
output = []
# Search through accepted vpc peerings.
vpc_peerings = vpc.accepted_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.requester_vpc == host_vpc)])
# Also search through requested vpc peerings.
vpc_peerings = vpc.requested_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.accepter_vpc == host_vpc)])
return output
@get_and_cleanup(get_vpc_peerings)
def cleanup_vpc_peering(vpc_peerings, **kwargs):
for vpc_peering in vpc_peerings:
vpc_peering.delete()
@get_or_create(get_vpc_peerings)
def create_vpc_peering(client, vpc, host_vpc, target_region):
"""Method would create a vpc peering between the newly created VPC and caller's VPC
Also makes sure, if they aren't the same, then there is no need for vpc peering.
Args:
client (boto client): Region specific boto client
vpc (VPC object): Newly created VPC object
host_vpc(Host VPC object): Host VPC to peer with.
target_region (region name): Region name in which peering is being created.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
try:
peer_conn = client.create_vpc_peering_connection(
VpcId=host_vpc.id, PeerVpcId=vpc.id, PeerRegion=target_region)
peer_conn.wait_until_exists()
# Need to accept from the other end.
remote_peer_conn = get_client(target_region).VpcPeeringConnection(peer_conn.id)
remote_peer_conn.wait_until_exists()
remote_peer_conn.accept()
return [peer_conn]
except Exception as e:
logging.error(e)
raise YBOpsRuntimeError("Unable to create VPC peering.")
def get_device_names(instance_type, num_volumes):
device_names = []
for i in xrange(num_volumes):
device_name_format = "nvme{}n1" if is_nvme(instance_type) else "xvd{}"
index = "{}".format(i if is_nvme(instance_type) else chr(ord('b') + i))
device_names.append(device_name_format.format(index))
return device_names
def is_next_gen(instance_type):
return instance_type.startswith(("c3", "c4", "c5", "m4", "r4"))
def is_nvme(instance_type):
return instance_type.startswith("i3")
def has_ephemerals(instance_type):
return not is_nvme(instance_type) and not is_next_gen(instance_type)
def create_instance(args):
client = get_client(args.region)
vars = {
"ImageId": args.machine_image,
"KeyName": args.key_pair_name,
"MinCount": 1,
"MaxCount": 1,
"InstanceType": args.instance_type,
}
# Network setup.
# Lets assume they have provided security group id comma delimited.
sg_ids = args.security_group_id.split(",") if args.security_group_id else None
if sg_ids is None:
# Figure out which VPC this instance will be brought up in and search for the SG in there.
# This is for a bit of backwards compatibility with the previous mode of potentially using
# YW's VPC, in which we would still deploy a SG with the same name as in our normal VPCs.
# This means there could be customers that had that deployment mode from the start AND have
# a SG we created back then, with the internal naming convention we use, but NOT in the YB
# VPC (which they likely will not even have).
vpc = get_vpc_for_subnet(client, args.cloud_subnet)
sg_name = get_yb_sg_name(args.region)
sg = get_security_group(client, sg_name, vpc)
sg_ids = [sg.id]
vars["NetworkInterfaces"] = [{
"DeviceIndex": 0,
"AssociatePublicIpAddress": args.assign_public_ip,
"SubnetId": args.cloud_subnet,
"Groups": sg_ids
}]
# Volume setup.
volumes = []
ebs = {
"DeleteOnTermination": True,
# TODO: constant
"VolumeSize": 40,
"VolumeType": "gp2"
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
volumes.append({
"DeviceName": "/dev/sda1",
"Ebs": ebs
})
device_names = get_device_names(args.instance_type, args.num_volumes)
# TODO: Clean up semantics on nvme vs "next-gen" vs ephemerals, as this is currently whack...
for i, device_name in enumerate(device_names):
volume = {}
if has_ephemerals(args.instance_type):
volume = {
"DeviceName": "/dev/{}".format(device_name),
"VirtualName": "ephemeral{}".format(i)
}
elif is_next_gen(args.instance_type):
ebs = {
"DeleteOnTermination": True,
"VolumeType": args.volume_type,
# TODO: make this int.
"VolumeSize": args.volume_size
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
if args.volume_type == "io1":
# TODO: make this int.
ebs["Iops"] = args.disk_iops
volume = {
"DeviceName": "/dev/{}".format(device_name),
"Ebs": ebs
}
volumes.append(volume)
vars["BlockDeviceMappings"] = volumes
# Tag setup.
def __create_tag(k, v):
return {"Key": k, "Value": v}
# Add Name all the time.
instance_tags = [
__create_tag("Name", args.search_pattern),
__create_tag("launched-by", os.environ.get("USER", "unknown")),
__create_tag("yb-server-type", args.type)
]
custom_tags = args.instance_tags if args.instance_tags is not None else '{}'
for k, v in json.loads(custom_tags).iteritems():
instance_tags.append(__create_tag(k, v))
vars["TagSpecifications"] = [{
"ResourceType": "instance",
"Tags": instance_tags
}]
# TODO: user_data > templates/cloud_init.yml.j2, still needed?
instance_ids = client.create_instances(**vars)
if len(instance_ids) != 1:
logging.error("Invalid create_instances response: {}".format(instance_ids))
raise YBOpsRuntimeError("Expected to create 1 instance, got {}".format(
len(instance_ids)))
instance = instance_ids[0]
instance.wait_until_running()
def modify_tags(region, instance_id, tags_to_set_str, tags_to_remove_str):
instance = get_client(region).Instance(instance_id)
# Remove all the tags we were asked to, except the internal ones.
tags_to_remove = set(tags_to_remove_str.split(",") if tags_to_remove_str else [])
# TODO: combine these with the above instance creation function.
internal_tags = set(["Name", "launched-by", "yb-server-type"])
if tags_to_remove & internal_tags:
raise YBOpsRuntimeError(
"Was asked to remove tags: {}, which contain internal tags: {}".format(
tags_to_remove, internal_tags
))
# Note: passing an empty list to Tags will remove all tags from the instance.
if tags_to_remove:
instance.delete_tags(Tags=[{"Key": k} for k in tags_to_remove])
# Set all the tags provided.
tags_to_set = json.loads(tags_to_set_str if tags_to_set_str else "{}")
customer_tags = []
for k, v in tags_to_set.iteritems():
customer_tags.append({"Key": k, "Value": v})
instance.create_tags(Tags=customer_tags)
def delete_route(rt, cidr):
route = get_route_by_cidr(rt, cidr)
if route is not None:
route.delete()
def get_vpc_for_subnet(client, subnet):
return client.Subnet(subnet).vpc
def get_yb_sg_name(region):
return SG_YUGABYTE_PREFIX_FORMAT.format(region)
def list_dns_record_set(hosted_zone_id):
return boto3.client('route53').get_hosted_zone(Id=hosted_zone_id)
def create_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'CREATE')
def edit_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'UPSERT')
def delete_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'DELETE')
def _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, action):
client = boto3.client('route53')
records = []
for ip in ip_list:
records.append({'Value': ip})
result = list_dns_record_set(hosted_zone_id)
hosted_zone_name = result['HostedZone']['Name']
change_batch = {
'Comment': "YugaWare driven Record Set",
'Changes': [{
'Action': action,
'ResourceRecordSet': {
'Name': "{}.{}".format(domain_name_prefix, hosted_zone_name),
'Type': 'A',
'TTL': 5,
'ResourceRecords': records
}
}]
}
result = client.change_resource_record_sets(
HostedZoneId=hosted_zone_id,
ChangeBatch=change_batch)
client.get_waiter('resource_record_sets_changed').wait(
Id=result['ChangeInfo']['Id'],
WaiterConfig={
'Delay': 10,
'MaxAttempts': 60
})
|
set_yb_sg_and_fetch_vpc
|
Method to bootstrap vpc and security group, and enable vpc peering
with the host_instance vpc.
Args:
metadata (obj): Cloud metadata object with cidr prefix and other metadata.
region (str): Region name to create the vpc in.
dest_vpc_id (str): Id of the VPC that yugabyte machines will reside in.
Returns:
vpc_info (json): return vpc, subnet and security group as json.
|
#!/usr/bin/env python
#
# Copyright 2019 YugaByte, Inc. and Contributors
#
# Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt
import boto3
import json
import logging
import os
import re
from ipaddr import IPNetwork
from ybops.utils import get_or_create, get_and_cleanup
from ybops.common.exceptions import YBOpsRuntimeError
from ybops.cloud.common.utils import request_retry_decorator
RESOURCE_PREFIX_FORMAT = "yb-{}"
IGW_CIDR = "0.0.0.0/0"
SUBNET_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT
IGW_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-igw"
ROUTE_TABLE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-rt"
SG_YUGABYTE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-sg"
PEER_CONN_FORMAT = "yb-peer-conn-{}-to-{}"
class AwsBootstrapRegion():
def __init__(self, region, metadata, region_cidrs):
self.region = region
self.metadata = metadata
self.region_cidrs = region_cidrs
self.client = get_client(self.region)
# Outputs.
self.vpc = None
self.igw = None
self.peer_vpc = None
self.sg_yugabyte = None
self.subnets = []
self.route_table = None
def bootstrap(self):
self.setup_vpc()
self.setup_igw()
self.setup_subnets()
self.setup_yugabyte_sg()
self.setup_rt()
def setup_vpc(self):
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(self.region)
vpc = create_vpc(client=self.client, tag_name=vpc_region_tag,
cidr=get_region_cidr(self.metadata, self.region))
vpc.wait_until_available()
self.vpc = vpc
def setup_igw(self):
igw_tag = IGW_PREFIX_FORMAT.format(self.region)
igw = create_igw(client=self.client, tag_name=igw_tag, vpc=self.vpc)
self.igw = igw
def setup_subnets(self):
zones = get_zones(self.region)
subnets = {}
for zone_index, zone in enumerate(sorted(zones.keys())):
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
zone_cidr = self.metadata["zone_cidr_format"].format(
get_cidr_prefix(self.metadata, self.region), (zone_index + 1) * 16)
subnet = create_subnet(self.client, self.vpc, zone, zone_cidr, vpc_zone_tag)
subnets[zone] = subnet
self.subnets = subnets
def setup_yugabyte_sg(self):
sg_group_name = get_yb_sg_name(self.region)
rules = list(self.metadata["sg_rules"])
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
sg = create_security_group(client=self.client, group_name=sg_group_name,
description="YugaByte SG", vpc=self.vpc,
rules=rules)
self.sg_yugabyte = sg
def setup_rt(self):
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(self.region)
route_table = create_route_table(client=self.client, tag_name=route_table_tag,
vpc=self.vpc)
# TODO: handle private/public case at somepoint, also NAT.
add_route_to_rt(route_table, IGW_CIDR, "GatewayId", self.igw.id)
current_associated_subnet_ids = [assoc.subnet_id for assoc in route_table.associations]
missing_ids = [subnet.id for subnet in self.subnets.values()
if subnet.id not in current_associated_subnet_ids]
for subnet_id in missing_ids:
route_table.associate_with_subnet(SubnetId=subnet_id)
self.route_table = route_table
def add_sg_ingress_to_sg(self, incoming_sg, target_sg):
current_sg_ids = set([pair["GroupId"]
for perm in target_sg.ip_permissions
for pair in perm["UserIdGroupPairs"]])
if incoming_sg.id not in current_sg_ids:
target_sg.authorize_ingress(
IpPermissions=[{
"IpProtocol": "-1",
"UserIdGroupPairs": [{"GroupId": incoming_sg.id}]}])
def add_route_to_rt(route_table, cidr, target_type, target_id):
kwargs = {target_type: target_id}
route = get_route_by_cidr(route_table, cidr)
if route is None:
route_table.create_route(DestinationCidrBlock=cidr, **kwargs)
elif getattr(route, dumb_camel_to_snake(target_type)) != target_id:
route.replace(**kwargs)
def add_cidr_to_rules(rules, cidr):
rule_block = {
"ip_protocol": "-1",
"from_port": 0,
"to_port": 65535,
"cidr_ip": cidr
}
rules.append(rule_block)
def get_cidr_prefix(metadata, region):
return metadata["regions"][region]["cidr_prefix"]
def get_region_cidr(metadata, region):
return metadata["region_cidr_format"].format(get_cidr_prefix(metadata, region))
def get_region_cidrs(metadata):
return dict([(r, get_region_cidr(metadata, r)) for r in metadata["regions"].keys()])
def dumb_camel_to_snake(s):
return re.sub("([A-Z])", "_\\1", s).lower()[1:]
class YbVpcComponents:
def __init__(self):
self.region = None
self.vpc = None
self.sg_yugabyte = None
self.customer_sgs = None
self.route_table = None
self.subnets = None
@staticmethod
def from_pieces(region, vpc_id, sg_id, rt_id, az_to_subnet_ids):
c = YbVpcComponents()
c.region = region
client = get_client(region)
c.vpc = client.Vpc(vpc_id)
c.sg_yugabyte = client.SecurityGroup(sg_id)
c.route_table = client.RouteTable(rt_id)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
@staticmethod
def from_user_json(region, per_region_meta):
c = YbVpcComponents()
c.region = region
client = get_client(region)
vpc_id = per_region_meta.get("vpcId")
if vpc_id:
c.vpc = client.Vpc(vpc_id)
else:
c.vpc = get_vpc(client, RESOURCE_PREFIX_FORMAT.format(region))
sg_ids = per_region_meta.get("customSecurityGroupId")
if sg_ids:
c.customer_sgs = [client.SecurityGroup(sg_id) for sg_id in sg_ids.split(",")]
else:
c.sg_yugabyte = get_security_group(
client, SG_YUGABYTE_PREFIX_FORMAT.format(region), c.vpc)
if not vpc_id:
c.route_table = get_route_table(client, ROUTE_TABLE_PREFIX_FORMAT.format(region))
az_to_subnet_ids = {}
if vpc_id:
az_to_subnet_ids = per_region_meta.get("azToSubnetIds", {})
else:
az_to_subnet_ids = get_zones(region)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
def as_json(self):
sgs = self.customer_sgs if self.customer_sgs else [self.sg_yugabyte]
return vpc_components_as_json(self.vpc, sgs, self.subnets)
class AwsBootstrapClient():
def __init__(self, metadata, host_vpc_id, host_vpc_region):
self.metadata = metadata
self.host_vpc_id = host_vpc_id
self.host_vpc_region = host_vpc_region
self.region_cidrs = get_region_cidrs(self.metadata)
# Validation.
self._validate_cidr_overlap()
def _validate_cidr_overlap(self):
region_networks = [IPNetwork(cidr) for cidr in self.region_cidrs.values()]
all_networks = region_networks
for i in xrange(len(all_networks)):
for j in xrange(i + 1, len(all_networks)):
left = all_networks[i]
right = all_networks[j]
if left.overlaps(right):
raise YBOpsRuntimeError(
"IP blocks in the CIDRs overlap: {} - {}".format(left, right))
def bootstrap_individual_region(self, region):
if region is None:
raise YBOpsRuntimeError("Must provider region to bootstrap!")
client = AwsBootstrapRegion(region, self.metadata, self.region_cidrs)
client.bootstrap()
return YbVpcComponents.from_pieces(
region, client.vpc.id, client.sg_yugabyte.id, client.route_table.id,
{az: s.id for az, s in client.subnets.iteritems()})
def cross_link_regions(self, components):
# Do the cross linking, adding CIDR entries to RTs and SGs, as well as doing vpc peerings.
region_and_vpc_tuples = [(r, c.vpc) for r, c in components.iteritems()]
host_vpc = None
if self.host_vpc_id and self.host_vpc_region:
host_vpc = get_client(self.host_vpc_region).Vpc(self.host_vpc_id)
region_and_vpc_tuples.append((self.host_vpc_region, host_vpc))
# Setup VPC peerings.
for i in xrange(len(region_and_vpc_tuples) - 1):
i_region, i_vpc = region_and_vpc_tuples[i]
for j in xrange(i + 1, len(region_and_vpc_tuples)):
j_region, j_vpc = region_and_vpc_tuples[j]
peering = create_vpc_peering(
# i is the host, j is the target.
client=get_client(i_region), vpc=j_vpc, host_vpc=i_vpc, target_region=j_region)
if len(peering) != 1:
raise YBOpsRuntimeError(
"Expecting one peering connection, got {}".format(peer_conn))
peering = peering[0]
# Add route i -> j.
add_route_to_rt(components[i_region].route_table, j_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
# Add route j -> i.
# Note: If we have a host_vpc, it is the last in the list, and it doesn't have an
# associated component, so we special case it.
if host_vpc is None or j != len(region_and_vpc_tuples) - 1:
add_route_to_rt(components[j_region].route_table, i_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
else:
# TODO: should ideally filter to the RT that is relevant, but we do not really
# know the subnets which matter from this host_vpc...
for rt in list(host_vpc.route_tables.all()):
add_route_to_rt(rt, i_vpc.cidr_block, "VpcPeeringConnectionId", peering.id)
# Setup SG entries for all the CIDRs.
all_cidrs = [vpc.cidr_block for r, vpc in region_and_vpc_tuples]
rules = []
# Add CIDRs from all the VPCs, including the host.
for cidr in all_cidrs:
add_cidr_to_rules(rules, cidr)
# Add CIDRs from any custom networks we have internally, primarily the OpenVPN in AWS.
# TODO(bogdan): custom CIDR entries
for cidr in self.metadata.get("custom_network_whitelisted_ip_cidrs", []):
add_cidr_to_rules(rules, cidr)
for region, component in components.iteritems():
sg = component.sg_yugabyte
ip_perms = sg.ip_permissions
for rule in rules:
found = False
for perm in ip_perms:
if perm.get("FromPort") == rule["from_port"] and \
perm.get("ToPort") == rule["to_port"] and \
perm.get("IpProtocol") == rule["ip_protocol"] and \
len([True for r in perm.get("IpRanges", [])
if r.get("CidrIp") == rule["cidr_ip"]]) > 0:
# This rule matches this permission, so no need to add it.
found = True
break
if not found:
try:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
if "InvalidPermission.Duplicate" not in str(e):
raise YBOpsRuntimeError(
"Authorize Security Group Ingress failed: {}".format(repr(e)))
def aws_exception_handler(e):
"""AWS specific exception handler.
Args:
e: the exception that was raised by the underlying API call that just failed.
Returns:
True if this exception can be retried, False otherwise.
"""
return "Request limit exceeded" in str(e)
def aws_request_limit_retry(fn):
"""A decorator for retrying an AWS operation after exceeding request limit. Does retries with
randomized jitter. Ideally, we should reconfigure boto3 to do the right kind of retries
internally, but as of May 2017 there does not seem to be a good way of doing that.
Initially not adding this decorator to all functions in this module. This should be done
gradually as we encounter rate limiting errors.
Relevant boto issues:
https://github.com/boto/boto3/issues/770
https://github.com/boto/botocore/issues/882
"""
return request_retry_decorator(fn, aws_exception_handler)
def get_client(region):
"""Method to get boto3 ec2 resource for given region
Args:
region (str): Region name
Returns:
boto3 resource
"""
return boto3.resource("ec2", region_name=region)
def get_clients(regions):
"""Method to get boto3 clients for given region or all the regions if none specified.
Args:
regions (list): List of regions to return clients for
Returns:
clients(obj): Map of region to boto3 resource
"""
return {region: get_client(region) for region in regions}
def get_available_regions(metadata):
return metadata["regions"].keys()
def get_spot_pricing(region, zone, instance_type):
client = boto3.client('ec2', region_name=region)
prod_desc = ['Linux/UNIX (Amazon VPC)']
spot_price = client.describe_spot_price_history(InstanceTypes=[instance_type],
MaxResults=1,
ProductDescriptions=prod_desc,
AvailabilityZone=zone)
if len(spot_price['SpotPriceHistory']) == 0:
raise YBOpsRuntimeError('Invalid instance type {} for zone {}'.format(instance_type, zone))
return spot_price['SpotPriceHistory'][0]['SpotPrice']
def get_zones(region, dest_vpc_id=None):
"""Method to fetch zones for given region or all the regions if none specified.
Args:
region (str): Name of region to get zones of.
Returns:
zones (obj): Map of zone -> subnet
"""
result = {}
filters = get_filters("state", "available")
client = boto3.client("ec2", region_name=region)
zones = client.describe_availability_zones(Filters=filters).get("AvailabilityZones", [])
new_client = get_client(region)
zone_mapping = {}
for z in zones:
zone_name = z["ZoneName"]
zone_tag = SUBNET_PREFIX_FORMAT.format(zone_name)
region_vpc = None
if dest_vpc_id:
region_vpc = new_client.Vpc(dest_vpc_id)
else:
region_vpc = get_vpc(new_client, RESOURCE_PREFIX_FORMAT.format(region))
subnet = next(iter(fetch_subnets(region_vpc, zone_tag)), None)
if subnet is None:
subnet = next(iter([s for s in region_vpc.subnets.all()
if s.availability_zone == zone_name]), None)
zone_mapping[zone_name] = subnet.id if subnet is not None else None
return zone_mapping
def get_vpc(client, tag_name, **kwargs):
"""Method to fetch vpc based on the tag_name.
Args:
client (boto client): Boto Client for the region to query.
tag_name (str): VPC tag name.
Returns:
VPC obj: VPC object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.vpcs.filter(Filters=filters)), None)
def fetch_subnets(vpc, tag_name):
"""Method to fetch subnets based on the tag_name.
Args:
vpc (vpc obj): VPC object to search for subnets
tag_name (str): subnet tag name.
Returns:
subnets (list): list of aws subnets for given vpc.
"""
filters = get_tag_filter(tag_name)
return vpc.subnets.filter(Filters=filters)
def create_subnet(client, vpc, zone, cidr, tag_name):
"""Method to create subnet based on cidr and tag name.
Args:
client (boto client): Region specific boto client
vpc (VPC object): VPC object to create subnet.
zone (str): Availability zone name
cidr (str): CIDR string
tag_name (str): Tag name for subnet.
Returns:
subnet: Newly created subnet object.
"""
subnet = next((s for s in fetch_subnets(vpc, tag_name) if s.cidr_block == cidr), None)
if subnet is None:
subnet = vpc.create_subnet(CidrBlock=cidr, AvailabilityZone=zone)
# TODO: no direct waiter on subnet just yet, it seems...
client.meta.client.get_waiter("subnet_available").wait(SubnetIds=[subnet.id])
tag_resource_name(client, subnet.id, tag_name)
return subnet
def get_security_group(client, group_name, vpc, **kwargs):
"""Method to fetch security group based on the group_name.
Args:
client (boto client): Region specific boto client
group_name (str): Security Group name
vpc (VPC object): The VPC in which to check for the SG
Returns:
SecurityGroup: Matching security group.
"""
filters = get_filters("group-name", group_name) + get_filters("vpc-id", vpc.id)
return next(iter(client.security_groups.filter(Filters=filters)), None)
@get_or_create(get_security_group)
def create_security_group(client, group_name, vpc, description, rules):
"""Method to create a security group based on the group_name and authorize ingress with
the rules provided.
Args:
client (boto client): Region specific boto client
group_name (str): security group name
description (str): description of the security group
vpc (VPC Object): VPC object to create the security group
rules (dict): List of rules to add to security group.
"""
sg = vpc.create_security_group(GroupName=group_name, Description=description)
try:
for rule in rules:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
logging.error("Authorize Security Group Ingress failed: {}".format(e))
sg.delete()
raise YBOpsRuntimeError("Security Group creation failed.")
return sg
def get_igw(client, tag_name, **kwargs):
"""Method to fetch Internet Gateway based on tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): Internet Gateway tag name.
Returns:
internet_gateway: internet gateway object.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.internet_gateways.filter(Filters=filters)), None)
@get_or_create(get_igw)
def create_igw(client, tag_name, vpc):
"""Method to create Internet Gateway based on tag_name in given VPC. If the gateway
already exists, it would return that object. If the object doesn't have a tag, we
would tag it accordingly.
Args:
client (boto client): Region specific boto client
tag_name (str): Tag name for internet gateway.
vpc (VPC object): VPC object to create Internet Gateway
Returns:
internet gateway: newly internet gateway object.
"""
# Query to make sure the region doesn't have any IGW already attached.
existing_igw = next(iter(vpc.internet_gateways.all()), None)
if existing_igw is not None:
# If we have existing igw for the region, lets just tag it with yb-XX-igw
tag_resource_name(client, existing_igw.id, tag_name)
return existing_igw
# If we don't have a internet gateway, lets create one and attach it to vpc
igw = client.create_internet_gateway()
tag_resource_name(client, igw.id, tag_name)
vpc.attach_internet_gateway(InternetGatewayId=igw.id)
return igw
def get_route_table(client, tag_name, **kwargs):
"""Method to fetch route table based on tag_name
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name to search for.
Returns:
RouteTable (obj): Matching route table object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.route_tables.filter(Filters=filters)), None)
@get_or_create(get_route_table)
def create_route_table(client, tag_name, vpc):
"""Method to create route table based on tag_name in given VPC. It will first
query for the tag name to see if the route table already exists or if one is already
attached to the VPC, if so it will return that route table.
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name
vpc (vpc object): VPC object to create the route table against
Returns:
RouteTable (obj): newly created RouteTable object.
"""
# Check to see if there is a route table attached to VPC, if so, we can just tag it
existing_route_table = next(iter(vpc.route_tables.all()), None)
if existing_route_table is not None:
tag_resource_name(client, existing_route_table.id, tag_name)
return existing_route_table
# If no route table exists, we can create one and tag it.
route_table = vpc.create_route_table()
tag_resource_name(client, route_table.id, tag_name)
return route_table
@get_and_cleanup(get_security_group)
def cleanup_security_group(sg, **kwargs):
"""Method to cleanup security group for the matching group_name.
Args:
sg: Instance of security group matching the group_name.
"""
sg.delete()
@get_and_cleanup(get_igw)
def cleanup_igw(igw, **kwargs):
"""Method to cleanup Internet Gateway matching the tag name. And also remove any vpc
that is attached to the Internet Gateway.
Args:
igw: Instance of Internet Gateway matching tag_name.
"""
for vpc in igw.attachments:
igw.detach_from_vpc(VpcId=vpc['VpcId'])
igw.delete()
@get_and_cleanup(get_route_table)
def cleanup_route_table(rt, **kwargs):
"""Method to cleanup the Route Table matching the tag name.
Args:
rt: Instance of Route Table matching tag_name.
"""
rt.delete()
def get_route_by_cidr(route_table, cidr):
"""Method to check if given CIDR already attached to route table.
Args:
RouteTable (obj): Route Table object.
cidr (str): CIDR string to check in route table.
Returns:
Route: the route for this CIDR or None if not found
"""
return dict((r.destination_cidr_block, r) for r in route_table.routes).get(cidr)
@get_or_create(get_vpc)
def create_vpc(client, tag_name, cidr):
"""Method to create vpc based on the cidr and tag with tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): VPC tag name
cidr (str): CIDR string.
Returns:
VPC(Object): Newly created VPC object.
"""
vpc = client.create_vpc(CidrBlock=cidr)
vpc.modify_attribute(EnableDnsHostnames={'Value': True})
tag_resource_name(client, vpc.id, tag_name)
return vpc
# MASKED: set_yb_sg_and_fetch_vpc function (lines 605-626)
def query_vpc(region):
"""Method to query VPC against given region and respective subnets.
Args:
region (str): Region name to query the VPC.
Returns:
vpc and subnet info (obj): Object with region and zone subnet id.
"""
per_vpc_info = {}
# Fetch all available AZs, as we want to group subnets by AZ.
raw_client = boto3.client("ec2", region_name=region)
zones = [z["ZoneName"]
for z in raw_client.describe_availability_zones(
Filters=get_filters("state", "available")).get("AvailabilityZones", [])]
# Default to empty lists, in case some zones do not have subnets, so we can use this as a query
# for all available AZs in this region.
subnets_by_zone = {z: [] for z in zones}
# Fetch SGs and group them by VPC ID.
client = get_client(region)
per_vpc_sgs = {}
sgs = client.security_groups.all()
for sg in sgs:
sg_list = per_vpc_sgs.setdefault(sg.vpc_id, [])
sg_list.append({
"sg_id": sg.group_id,
# Note: Name tag is not mandatory or always present but group_name is!
"sg_name": sg.group_name
})
# Fetch all available VPCs so we can group by VPC ID.
region_vpcs = client.vpcs.all()
for vpc in region_vpcs:
# Filter for available subnets and group by AZ.
subnets = vpc.subnets.filter(Filters=get_filters("state", "available"))
for s in subnets:
subnets_for_this_az = subnets_by_zone.setdefault(s.availability_zone, [])
subnets_for_this_az.append({
"subnet_id": s.subnet_id,
"name": _get_name_from_tags(s.tags),
"public": s.map_public_ip_on_launch
})
vpc_info = {
"subnets_by_zone": subnets_by_zone,
# In case we somehow did not find any SGs, default to empty list.
"security_groups": per_vpc_sgs.get(vpc.id, [])
}
per_vpc_info[vpc.id] = vpc_info
region_json = {
"per_vpc_info": per_vpc_info
}
return region_json
def _get_name_from_tags(tags):
for t in tags if tags else []:
if t.get("Key") == "Name":
return t.get("Value", None)
return None
def vpc_components_as_json(vpc, sgs, subnets):
"""Method takes VPC, Security Group and Subnets and returns a json data format with ids.
Args:
vpc (VPC Object): Region specific VPC object
sgs (List of Security Group Object): Region specific Security Group object
subnets (subnet object map): Map of Subnet objects keyed of zone.
Retuns:
json (str): A Json string for yugaware to consume with necessary ids.
"""
result = {}
result["vpc_id"] = vpc.id
result["security_group"] = [{"id": sg.group_id, "name": sg.group_name} for sg in sgs]
result["zones"] = {}
for zone, subnet in subnets.iteritems():
result["zones"][zone] = subnet.id
return result
def delete_vpc(region, host_vpc_id=None, host_vpc_region=None):
"""Method to delete VPC, Subnet, Internet Gateway, Route Table and VPC peering.
Args:
region (str): Region name to query the VPC.
"""
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(region)
client = get_client(region)
region_vpc = get_vpc(client, vpc_region_tag)
if region_vpc is None:
raise YBOpsRuntimeError("VPC not setup.")
zones = get_zones(region)
# Remove the yugabyte SG first.
sg_group_name = get_yb_sg_name(region)
cleanup_security_group(client=client, group_name=sg_group_name, vpc=region_vpc)
# Cleanup the subnets.
for zone, subnet_id in zones.iteritems():
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
if subnet_id is not None:
client.Subnet(subnet_id).delete()
# Remove the IGW.
igw_tag = IGW_PREFIX_FORMAT.format(region)
cleanup_igw(client=client, tag_name=igw_tag)
# Remove this region's CIDR from the RT of the host vpc.
host_vpc = None
if host_vpc_id is not None and host_vpc_region is not None:
host_vpc = get_client(host_vpc_region).Vpc(host_vpc_id)
for rt in list(host_vpc.route_tables.all()):
delete_route(rt, region_vpc.cidr_block)
# Remove all of the VPC peerings of this vpc.
cleanup_vpc_peering(client=client, vpc=region_vpc, host_vpc=None)
# Delete the VPC itself.
region_vpc.delete()
# Finally cleanup the Routing Table.
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(region)
cleanup_route_table(client=client, tag_name=route_table_tag)
return {"success": "VPC deleted."}
def tag_resource_name(client, resource_id, tag_name):
"""Method to create name tag for given resource.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_name (str): Tag name.
"""
tag_resource(client, resource_id, "Name", tag_name)
def tag_resource(client, resource_id, tag_key, tag_value):
"""Method to attach arbitrary key-value tags to resources.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_key: Tag key
tag_value: Tag value
"""
tags = [{"Key": tag_key, "Value": tag_value}]
client.create_tags(Resources=[resource_id], Tags=tags)
def get_filters(key, value):
return [{'Name': key, 'Values': [value]}]
def get_tag_filter(tag_name):
return get_filters("tag:Name", tag_name)
def get_vpc_peerings(vpc, host_vpc, **kwargs):
"""Method to fetch all the VPC peerings against given VPC. If host_vpc is provided
it will check if there is a peering against that vpc.
Args:
vpc(VPC object): VPC Object to search for peerings
host_vpc (Host VPC object): Can be Null as well, to check if specific host_vpc
peering is done.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
output = []
# Search through accepted vpc peerings.
vpc_peerings = vpc.accepted_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.requester_vpc == host_vpc)])
# Also search through requested vpc peerings.
vpc_peerings = vpc.requested_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.accepter_vpc == host_vpc)])
return output
@get_and_cleanup(get_vpc_peerings)
def cleanup_vpc_peering(vpc_peerings, **kwargs):
for vpc_peering in vpc_peerings:
vpc_peering.delete()
@get_or_create(get_vpc_peerings)
def create_vpc_peering(client, vpc, host_vpc, target_region):
"""Method would create a vpc peering between the newly created VPC and caller's VPC
Also makes sure, if they aren't the same, then there is no need for vpc peering.
Args:
client (boto client): Region specific boto client
vpc (VPC object): Newly created VPC object
host_vpc(Host VPC object): Host VPC to peer with.
target_region (region name): Region name in which peering is being created.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
try:
peer_conn = client.create_vpc_peering_connection(
VpcId=host_vpc.id, PeerVpcId=vpc.id, PeerRegion=target_region)
peer_conn.wait_until_exists()
# Need to accept from the other end.
remote_peer_conn = get_client(target_region).VpcPeeringConnection(peer_conn.id)
remote_peer_conn.wait_until_exists()
remote_peer_conn.accept()
return [peer_conn]
except Exception as e:
logging.error(e)
raise YBOpsRuntimeError("Unable to create VPC peering.")
def get_device_names(instance_type, num_volumes):
device_names = []
for i in xrange(num_volumes):
device_name_format = "nvme{}n1" if is_nvme(instance_type) else "xvd{}"
index = "{}".format(i if is_nvme(instance_type) else chr(ord('b') + i))
device_names.append(device_name_format.format(index))
return device_names
def is_next_gen(instance_type):
return instance_type.startswith(("c3", "c4", "c5", "m4", "r4"))
def is_nvme(instance_type):
return instance_type.startswith("i3")
def has_ephemerals(instance_type):
return not is_nvme(instance_type) and not is_next_gen(instance_type)
def create_instance(args):
client = get_client(args.region)
vars = {
"ImageId": args.machine_image,
"KeyName": args.key_pair_name,
"MinCount": 1,
"MaxCount": 1,
"InstanceType": args.instance_type,
}
# Network setup.
# Lets assume they have provided security group id comma delimited.
sg_ids = args.security_group_id.split(",") if args.security_group_id else None
if sg_ids is None:
# Figure out which VPC this instance will be brought up in and search for the SG in there.
# This is for a bit of backwards compatibility with the previous mode of potentially using
# YW's VPC, in which we would still deploy a SG with the same name as in our normal VPCs.
# This means there could be customers that had that deployment mode from the start AND have
# a SG we created back then, with the internal naming convention we use, but NOT in the YB
# VPC (which they likely will not even have).
vpc = get_vpc_for_subnet(client, args.cloud_subnet)
sg_name = get_yb_sg_name(args.region)
sg = get_security_group(client, sg_name, vpc)
sg_ids = [sg.id]
vars["NetworkInterfaces"] = [{
"DeviceIndex": 0,
"AssociatePublicIpAddress": args.assign_public_ip,
"SubnetId": args.cloud_subnet,
"Groups": sg_ids
}]
# Volume setup.
volumes = []
ebs = {
"DeleteOnTermination": True,
# TODO: constant
"VolumeSize": 40,
"VolumeType": "gp2"
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
volumes.append({
"DeviceName": "/dev/sda1",
"Ebs": ebs
})
device_names = get_device_names(args.instance_type, args.num_volumes)
# TODO: Clean up semantics on nvme vs "next-gen" vs ephemerals, as this is currently whack...
for i, device_name in enumerate(device_names):
volume = {}
if has_ephemerals(args.instance_type):
volume = {
"DeviceName": "/dev/{}".format(device_name),
"VirtualName": "ephemeral{}".format(i)
}
elif is_next_gen(args.instance_type):
ebs = {
"DeleteOnTermination": True,
"VolumeType": args.volume_type,
# TODO: make this int.
"VolumeSize": args.volume_size
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
if args.volume_type == "io1":
# TODO: make this int.
ebs["Iops"] = args.disk_iops
volume = {
"DeviceName": "/dev/{}".format(device_name),
"Ebs": ebs
}
volumes.append(volume)
vars["BlockDeviceMappings"] = volumes
# Tag setup.
def __create_tag(k, v):
return {"Key": k, "Value": v}
# Add Name all the time.
instance_tags = [
__create_tag("Name", args.search_pattern),
__create_tag("launched-by", os.environ.get("USER", "unknown")),
__create_tag("yb-server-type", args.type)
]
custom_tags = args.instance_tags if args.instance_tags is not None else '{}'
for k, v in json.loads(custom_tags).iteritems():
instance_tags.append(__create_tag(k, v))
vars["TagSpecifications"] = [{
"ResourceType": "instance",
"Tags": instance_tags
}]
# TODO: user_data > templates/cloud_init.yml.j2, still needed?
instance_ids = client.create_instances(**vars)
if len(instance_ids) != 1:
logging.error("Invalid create_instances response: {}".format(instance_ids))
raise YBOpsRuntimeError("Expected to create 1 instance, got {}".format(
len(instance_ids)))
instance = instance_ids[0]
instance.wait_until_running()
def modify_tags(region, instance_id, tags_to_set_str, tags_to_remove_str):
instance = get_client(region).Instance(instance_id)
# Remove all the tags we were asked to, except the internal ones.
tags_to_remove = set(tags_to_remove_str.split(",") if tags_to_remove_str else [])
# TODO: combine these with the above instance creation function.
internal_tags = set(["Name", "launched-by", "yb-server-type"])
if tags_to_remove & internal_tags:
raise YBOpsRuntimeError(
"Was asked to remove tags: {}, which contain internal tags: {}".format(
tags_to_remove, internal_tags
))
# Note: passing an empty list to Tags will remove all tags from the instance.
if tags_to_remove:
instance.delete_tags(Tags=[{"Key": k} for k in tags_to_remove])
# Set all the tags provided.
tags_to_set = json.loads(tags_to_set_str if tags_to_set_str else "{}")
customer_tags = []
for k, v in tags_to_set.iteritems():
customer_tags.append({"Key": k, "Value": v})
instance.create_tags(Tags=customer_tags)
def delete_route(rt, cidr):
route = get_route_by_cidr(rt, cidr)
if route is not None:
route.delete()
def get_vpc_for_subnet(client, subnet):
return client.Subnet(subnet).vpc
def get_yb_sg_name(region):
return SG_YUGABYTE_PREFIX_FORMAT.format(region)
def list_dns_record_set(hosted_zone_id):
return boto3.client('route53').get_hosted_zone(Id=hosted_zone_id)
def create_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'CREATE')
def edit_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'UPSERT')
def delete_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'DELETE')
def _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, action):
client = boto3.client('route53')
records = []
for ip in ip_list:
records.append({'Value': ip})
result = list_dns_record_set(hosted_zone_id)
hosted_zone_name = result['HostedZone']['Name']
change_batch = {
'Comment': "YugaWare driven Record Set",
'Changes': [{
'Action': action,
'ResourceRecordSet': {
'Name': "{}.{}".format(domain_name_prefix, hosted_zone_name),
'Type': 'A',
'TTL': 5,
'ResourceRecords': records
}
}]
}
result = client.change_resource_record_sets(
HostedZoneId=hosted_zone_id,
ChangeBatch=change_batch)
client.get_waiter('resource_record_sets_changed').wait(
Id=result['ChangeInfo']['Id'],
WaiterConfig={
'Delay': 10,
'MaxAttempts': 60
})
|
def set_yb_sg_and_fetch_vpc(metadata, region, dest_vpc_id):
"""Method to bootstrap vpc and security group, and enable vpc peering
with the host_instance vpc.
Args:
metadata (obj): Cloud metadata object with cidr prefix and other metadata.
region (str): Region name to create the vpc in.
dest_vpc_id (str): Id of the VPC that yugabyte machines will reside in.
Returns:
vpc_info (json): return vpc, subnet and security group as json.
"""
client = get_client(region)
dest_vpc = client.Vpc(dest_vpc_id)
subnets = {subnet.availability_zone: subnet for subnet in dest_vpc.subnets.all()}
sg_group_name = get_yb_sg_name(region)
rules = metadata["sg_rules"]
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
add_cidr_to_rules(rules, dest_vpc.cidr_block)
sgs = [create_security_group(client=client, group_name=sg_group_name, vpc=dest_vpc,
description="YugaByte SG", rules=rules)]
return vpc_components_as_json(dest_vpc, sgs, subnets)
| 605 | 626 |
#!/usr/bin/env python
#
# Copyright 2019 YugaByte, Inc. and Contributors
#
# Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt
import boto3
import json
import logging
import os
import re
from ipaddr import IPNetwork
from ybops.utils import get_or_create, get_and_cleanup
from ybops.common.exceptions import YBOpsRuntimeError
from ybops.cloud.common.utils import request_retry_decorator
RESOURCE_PREFIX_FORMAT = "yb-{}"
IGW_CIDR = "0.0.0.0/0"
SUBNET_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT
IGW_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-igw"
ROUTE_TABLE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-rt"
SG_YUGABYTE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-sg"
PEER_CONN_FORMAT = "yb-peer-conn-{}-to-{}"
class AwsBootstrapRegion():
def __init__(self, region, metadata, region_cidrs):
self.region = region
self.metadata = metadata
self.region_cidrs = region_cidrs
self.client = get_client(self.region)
# Outputs.
self.vpc = None
self.igw = None
self.peer_vpc = None
self.sg_yugabyte = None
self.subnets = []
self.route_table = None
def bootstrap(self):
self.setup_vpc()
self.setup_igw()
self.setup_subnets()
self.setup_yugabyte_sg()
self.setup_rt()
def setup_vpc(self):
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(self.region)
vpc = create_vpc(client=self.client, tag_name=vpc_region_tag,
cidr=get_region_cidr(self.metadata, self.region))
vpc.wait_until_available()
self.vpc = vpc
def setup_igw(self):
igw_tag = IGW_PREFIX_FORMAT.format(self.region)
igw = create_igw(client=self.client, tag_name=igw_tag, vpc=self.vpc)
self.igw = igw
def setup_subnets(self):
zones = get_zones(self.region)
subnets = {}
for zone_index, zone in enumerate(sorted(zones.keys())):
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
zone_cidr = self.metadata["zone_cidr_format"].format(
get_cidr_prefix(self.metadata, self.region), (zone_index + 1) * 16)
subnet = create_subnet(self.client, self.vpc, zone, zone_cidr, vpc_zone_tag)
subnets[zone] = subnet
self.subnets = subnets
def setup_yugabyte_sg(self):
sg_group_name = get_yb_sg_name(self.region)
rules = list(self.metadata["sg_rules"])
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
sg = create_security_group(client=self.client, group_name=sg_group_name,
description="YugaByte SG", vpc=self.vpc,
rules=rules)
self.sg_yugabyte = sg
def setup_rt(self):
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(self.region)
route_table = create_route_table(client=self.client, tag_name=route_table_tag,
vpc=self.vpc)
# TODO: handle private/public case at somepoint, also NAT.
add_route_to_rt(route_table, IGW_CIDR, "GatewayId", self.igw.id)
current_associated_subnet_ids = [assoc.subnet_id for assoc in route_table.associations]
missing_ids = [subnet.id for subnet in self.subnets.values()
if subnet.id not in current_associated_subnet_ids]
for subnet_id in missing_ids:
route_table.associate_with_subnet(SubnetId=subnet_id)
self.route_table = route_table
def add_sg_ingress_to_sg(self, incoming_sg, target_sg):
current_sg_ids = set([pair["GroupId"]
for perm in target_sg.ip_permissions
for pair in perm["UserIdGroupPairs"]])
if incoming_sg.id not in current_sg_ids:
target_sg.authorize_ingress(
IpPermissions=[{
"IpProtocol": "-1",
"UserIdGroupPairs": [{"GroupId": incoming_sg.id}]}])
def add_route_to_rt(route_table, cidr, target_type, target_id):
kwargs = {target_type: target_id}
route = get_route_by_cidr(route_table, cidr)
if route is None:
route_table.create_route(DestinationCidrBlock=cidr, **kwargs)
elif getattr(route, dumb_camel_to_snake(target_type)) != target_id:
route.replace(**kwargs)
def add_cidr_to_rules(rules, cidr):
rule_block = {
"ip_protocol": "-1",
"from_port": 0,
"to_port": 65535,
"cidr_ip": cidr
}
rules.append(rule_block)
def get_cidr_prefix(metadata, region):
return metadata["regions"][region]["cidr_prefix"]
def get_region_cidr(metadata, region):
return metadata["region_cidr_format"].format(get_cidr_prefix(metadata, region))
def get_region_cidrs(metadata):
return dict([(r, get_region_cidr(metadata, r)) for r in metadata["regions"].keys()])
def dumb_camel_to_snake(s):
return re.sub("([A-Z])", "_\\1", s).lower()[1:]
class YbVpcComponents:
def __init__(self):
self.region = None
self.vpc = None
self.sg_yugabyte = None
self.customer_sgs = None
self.route_table = None
self.subnets = None
@staticmethod
def from_pieces(region, vpc_id, sg_id, rt_id, az_to_subnet_ids):
c = YbVpcComponents()
c.region = region
client = get_client(region)
c.vpc = client.Vpc(vpc_id)
c.sg_yugabyte = client.SecurityGroup(sg_id)
c.route_table = client.RouteTable(rt_id)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
@staticmethod
def from_user_json(region, per_region_meta):
c = YbVpcComponents()
c.region = region
client = get_client(region)
vpc_id = per_region_meta.get("vpcId")
if vpc_id:
c.vpc = client.Vpc(vpc_id)
else:
c.vpc = get_vpc(client, RESOURCE_PREFIX_FORMAT.format(region))
sg_ids = per_region_meta.get("customSecurityGroupId")
if sg_ids:
c.customer_sgs = [client.SecurityGroup(sg_id) for sg_id in sg_ids.split(",")]
else:
c.sg_yugabyte = get_security_group(
client, SG_YUGABYTE_PREFIX_FORMAT.format(region), c.vpc)
if not vpc_id:
c.route_table = get_route_table(client, ROUTE_TABLE_PREFIX_FORMAT.format(region))
az_to_subnet_ids = {}
if vpc_id:
az_to_subnet_ids = per_region_meta.get("azToSubnetIds", {})
else:
az_to_subnet_ids = get_zones(region)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
def as_json(self):
sgs = self.customer_sgs if self.customer_sgs else [self.sg_yugabyte]
return vpc_components_as_json(self.vpc, sgs, self.subnets)
class AwsBootstrapClient():
def __init__(self, metadata, host_vpc_id, host_vpc_region):
self.metadata = metadata
self.host_vpc_id = host_vpc_id
self.host_vpc_region = host_vpc_region
self.region_cidrs = get_region_cidrs(self.metadata)
# Validation.
self._validate_cidr_overlap()
def _validate_cidr_overlap(self):
region_networks = [IPNetwork(cidr) for cidr in self.region_cidrs.values()]
all_networks = region_networks
for i in xrange(len(all_networks)):
for j in xrange(i + 1, len(all_networks)):
left = all_networks[i]
right = all_networks[j]
if left.overlaps(right):
raise YBOpsRuntimeError(
"IP blocks in the CIDRs overlap: {} - {}".format(left, right))
def bootstrap_individual_region(self, region):
if region is None:
raise YBOpsRuntimeError("Must provider region to bootstrap!")
client = AwsBootstrapRegion(region, self.metadata, self.region_cidrs)
client.bootstrap()
return YbVpcComponents.from_pieces(
region, client.vpc.id, client.sg_yugabyte.id, client.route_table.id,
{az: s.id for az, s in client.subnets.iteritems()})
def cross_link_regions(self, components):
# Do the cross linking, adding CIDR entries to RTs and SGs, as well as doing vpc peerings.
region_and_vpc_tuples = [(r, c.vpc) for r, c in components.iteritems()]
host_vpc = None
if self.host_vpc_id and self.host_vpc_region:
host_vpc = get_client(self.host_vpc_region).Vpc(self.host_vpc_id)
region_and_vpc_tuples.append((self.host_vpc_region, host_vpc))
# Setup VPC peerings.
for i in xrange(len(region_and_vpc_tuples) - 1):
i_region, i_vpc = region_and_vpc_tuples[i]
for j in xrange(i + 1, len(region_and_vpc_tuples)):
j_region, j_vpc = region_and_vpc_tuples[j]
peering = create_vpc_peering(
# i is the host, j is the target.
client=get_client(i_region), vpc=j_vpc, host_vpc=i_vpc, target_region=j_region)
if len(peering) != 1:
raise YBOpsRuntimeError(
"Expecting one peering connection, got {}".format(peer_conn))
peering = peering[0]
# Add route i -> j.
add_route_to_rt(components[i_region].route_table, j_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
# Add route j -> i.
# Note: If we have a host_vpc, it is the last in the list, and it doesn't have an
# associated component, so we special case it.
if host_vpc is None or j != len(region_and_vpc_tuples) - 1:
add_route_to_rt(components[j_region].route_table, i_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
else:
# TODO: should ideally filter to the RT that is relevant, but we do not really
# know the subnets which matter from this host_vpc...
for rt in list(host_vpc.route_tables.all()):
add_route_to_rt(rt, i_vpc.cidr_block, "VpcPeeringConnectionId", peering.id)
# Setup SG entries for all the CIDRs.
all_cidrs = [vpc.cidr_block for r, vpc in region_and_vpc_tuples]
rules = []
# Add CIDRs from all the VPCs, including the host.
for cidr in all_cidrs:
add_cidr_to_rules(rules, cidr)
# Add CIDRs from any custom networks we have internally, primarily the OpenVPN in AWS.
# TODO(bogdan): custom CIDR entries
for cidr in self.metadata.get("custom_network_whitelisted_ip_cidrs", []):
add_cidr_to_rules(rules, cidr)
for region, component in components.iteritems():
sg = component.sg_yugabyte
ip_perms = sg.ip_permissions
for rule in rules:
found = False
for perm in ip_perms:
if perm.get("FromPort") == rule["from_port"] and \
perm.get("ToPort") == rule["to_port"] and \
perm.get("IpProtocol") == rule["ip_protocol"] and \
len([True for r in perm.get("IpRanges", [])
if r.get("CidrIp") == rule["cidr_ip"]]) > 0:
# This rule matches this permission, so no need to add it.
found = True
break
if not found:
try:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
if "InvalidPermission.Duplicate" not in str(e):
raise YBOpsRuntimeError(
"Authorize Security Group Ingress failed: {}".format(repr(e)))
def aws_exception_handler(e):
"""AWS specific exception handler.
Args:
e: the exception that was raised by the underlying API call that just failed.
Returns:
True if this exception can be retried, False otherwise.
"""
return "Request limit exceeded" in str(e)
def aws_request_limit_retry(fn):
"""A decorator for retrying an AWS operation after exceeding request limit. Does retries with
randomized jitter. Ideally, we should reconfigure boto3 to do the right kind of retries
internally, but as of May 2017 there does not seem to be a good way of doing that.
Initially not adding this decorator to all functions in this module. This should be done
gradually as we encounter rate limiting errors.
Relevant boto issues:
https://github.com/boto/boto3/issues/770
https://github.com/boto/botocore/issues/882
"""
return request_retry_decorator(fn, aws_exception_handler)
def get_client(region):
"""Method to get boto3 ec2 resource for given region
Args:
region (str): Region name
Returns:
boto3 resource
"""
return boto3.resource("ec2", region_name=region)
def get_clients(regions):
"""Method to get boto3 clients for given region or all the regions if none specified.
Args:
regions (list): List of regions to return clients for
Returns:
clients(obj): Map of region to boto3 resource
"""
return {region: get_client(region) for region in regions}
def get_available_regions(metadata):
return metadata["regions"].keys()
def get_spot_pricing(region, zone, instance_type):
client = boto3.client('ec2', region_name=region)
prod_desc = ['Linux/UNIX (Amazon VPC)']
spot_price = client.describe_spot_price_history(InstanceTypes=[instance_type],
MaxResults=1,
ProductDescriptions=prod_desc,
AvailabilityZone=zone)
if len(spot_price['SpotPriceHistory']) == 0:
raise YBOpsRuntimeError('Invalid instance type {} for zone {}'.format(instance_type, zone))
return spot_price['SpotPriceHistory'][0]['SpotPrice']
def get_zones(region, dest_vpc_id=None):
"""Method to fetch zones for given region or all the regions if none specified.
Args:
region (str): Name of region to get zones of.
Returns:
zones (obj): Map of zone -> subnet
"""
result = {}
filters = get_filters("state", "available")
client = boto3.client("ec2", region_name=region)
zones = client.describe_availability_zones(Filters=filters).get("AvailabilityZones", [])
new_client = get_client(region)
zone_mapping = {}
for z in zones:
zone_name = z["ZoneName"]
zone_tag = SUBNET_PREFIX_FORMAT.format(zone_name)
region_vpc = None
if dest_vpc_id:
region_vpc = new_client.Vpc(dest_vpc_id)
else:
region_vpc = get_vpc(new_client, RESOURCE_PREFIX_FORMAT.format(region))
subnet = next(iter(fetch_subnets(region_vpc, zone_tag)), None)
if subnet is None:
subnet = next(iter([s for s in region_vpc.subnets.all()
if s.availability_zone == zone_name]), None)
zone_mapping[zone_name] = subnet.id if subnet is not None else None
return zone_mapping
def get_vpc(client, tag_name, **kwargs):
"""Method to fetch vpc based on the tag_name.
Args:
client (boto client): Boto Client for the region to query.
tag_name (str): VPC tag name.
Returns:
VPC obj: VPC object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.vpcs.filter(Filters=filters)), None)
def fetch_subnets(vpc, tag_name):
"""Method to fetch subnets based on the tag_name.
Args:
vpc (vpc obj): VPC object to search for subnets
tag_name (str): subnet tag name.
Returns:
subnets (list): list of aws subnets for given vpc.
"""
filters = get_tag_filter(tag_name)
return vpc.subnets.filter(Filters=filters)
def create_subnet(client, vpc, zone, cidr, tag_name):
"""Method to create subnet based on cidr and tag name.
Args:
client (boto client): Region specific boto client
vpc (VPC object): VPC object to create subnet.
zone (str): Availability zone name
cidr (str): CIDR string
tag_name (str): Tag name for subnet.
Returns:
subnet: Newly created subnet object.
"""
subnet = next((s for s in fetch_subnets(vpc, tag_name) if s.cidr_block == cidr), None)
if subnet is None:
subnet = vpc.create_subnet(CidrBlock=cidr, AvailabilityZone=zone)
# TODO: no direct waiter on subnet just yet, it seems...
client.meta.client.get_waiter("subnet_available").wait(SubnetIds=[subnet.id])
tag_resource_name(client, subnet.id, tag_name)
return subnet
def get_security_group(client, group_name, vpc, **kwargs):
"""Method to fetch security group based on the group_name.
Args:
client (boto client): Region specific boto client
group_name (str): Security Group name
vpc (VPC object): The VPC in which to check for the SG
Returns:
SecurityGroup: Matching security group.
"""
filters = get_filters("group-name", group_name) + get_filters("vpc-id", vpc.id)
return next(iter(client.security_groups.filter(Filters=filters)), None)
@get_or_create(get_security_group)
def create_security_group(client, group_name, vpc, description, rules):
"""Method to create a security group based on the group_name and authorize ingress with
the rules provided.
Args:
client (boto client): Region specific boto client
group_name (str): security group name
description (str): description of the security group
vpc (VPC Object): VPC object to create the security group
rules (dict): List of rules to add to security group.
"""
sg = vpc.create_security_group(GroupName=group_name, Description=description)
try:
for rule in rules:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
logging.error("Authorize Security Group Ingress failed: {}".format(e))
sg.delete()
raise YBOpsRuntimeError("Security Group creation failed.")
return sg
def get_igw(client, tag_name, **kwargs):
"""Method to fetch Internet Gateway based on tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): Internet Gateway tag name.
Returns:
internet_gateway: internet gateway object.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.internet_gateways.filter(Filters=filters)), None)
@get_or_create(get_igw)
def create_igw(client, tag_name, vpc):
"""Method to create Internet Gateway based on tag_name in given VPC. If the gateway
already exists, it would return that object. If the object doesn't have a tag, we
would tag it accordingly.
Args:
client (boto client): Region specific boto client
tag_name (str): Tag name for internet gateway.
vpc (VPC object): VPC object to create Internet Gateway
Returns:
internet gateway: newly internet gateway object.
"""
# Query to make sure the region doesn't have any IGW already attached.
existing_igw = next(iter(vpc.internet_gateways.all()), None)
if existing_igw is not None:
# If we have existing igw for the region, lets just tag it with yb-XX-igw
tag_resource_name(client, existing_igw.id, tag_name)
return existing_igw
# If we don't have a internet gateway, lets create one and attach it to vpc
igw = client.create_internet_gateway()
tag_resource_name(client, igw.id, tag_name)
vpc.attach_internet_gateway(InternetGatewayId=igw.id)
return igw
def get_route_table(client, tag_name, **kwargs):
"""Method to fetch route table based on tag_name
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name to search for.
Returns:
RouteTable (obj): Matching route table object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.route_tables.filter(Filters=filters)), None)
@get_or_create(get_route_table)
def create_route_table(client, tag_name, vpc):
"""Method to create route table based on tag_name in given VPC. It will first
query for the tag name to see if the route table already exists or if one is already
attached to the VPC, if so it will return that route table.
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name
vpc (vpc object): VPC object to create the route table against
Returns:
RouteTable (obj): newly created RouteTable object.
"""
# Check to see if there is a route table attached to VPC, if so, we can just tag it
existing_route_table = next(iter(vpc.route_tables.all()), None)
if existing_route_table is not None:
tag_resource_name(client, existing_route_table.id, tag_name)
return existing_route_table
# If no route table exists, we can create one and tag it.
route_table = vpc.create_route_table()
tag_resource_name(client, route_table.id, tag_name)
return route_table
@get_and_cleanup(get_security_group)
def cleanup_security_group(sg, **kwargs):
"""Method to cleanup security group for the matching group_name.
Args:
sg: Instance of security group matching the group_name.
"""
sg.delete()
@get_and_cleanup(get_igw)
def cleanup_igw(igw, **kwargs):
"""Method to cleanup Internet Gateway matching the tag name. And also remove any vpc
that is attached to the Internet Gateway.
Args:
igw: Instance of Internet Gateway matching tag_name.
"""
for vpc in igw.attachments:
igw.detach_from_vpc(VpcId=vpc['VpcId'])
igw.delete()
@get_and_cleanup(get_route_table)
def cleanup_route_table(rt, **kwargs):
"""Method to cleanup the Route Table matching the tag name.
Args:
rt: Instance of Route Table matching tag_name.
"""
rt.delete()
def get_route_by_cidr(route_table, cidr):
"""Method to check if given CIDR already attached to route table.
Args:
RouteTable (obj): Route Table object.
cidr (str): CIDR string to check in route table.
Returns:
Route: the route for this CIDR or None if not found
"""
return dict((r.destination_cidr_block, r) for r in route_table.routes).get(cidr)
@get_or_create(get_vpc)
def create_vpc(client, tag_name, cidr):
"""Method to create vpc based on the cidr and tag with tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): VPC tag name
cidr (str): CIDR string.
Returns:
VPC(Object): Newly created VPC object.
"""
vpc = client.create_vpc(CidrBlock=cidr)
vpc.modify_attribute(EnableDnsHostnames={'Value': True})
tag_resource_name(client, vpc.id, tag_name)
return vpc
def set_yb_sg_and_fetch_vpc(metadata, region, dest_vpc_id):
"""Method to bootstrap vpc and security group, and enable vpc peering
with the host_instance vpc.
Args:
metadata (obj): Cloud metadata object with cidr prefix and other metadata.
region (str): Region name to create the vpc in.
dest_vpc_id (str): Id of the VPC that yugabyte machines will reside in.
Returns:
vpc_info (json): return vpc, subnet and security group as json.
"""
client = get_client(region)
dest_vpc = client.Vpc(dest_vpc_id)
subnets = {subnet.availability_zone: subnet for subnet in dest_vpc.subnets.all()}
sg_group_name = get_yb_sg_name(region)
rules = metadata["sg_rules"]
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
add_cidr_to_rules(rules, dest_vpc.cidr_block)
sgs = [create_security_group(client=client, group_name=sg_group_name, vpc=dest_vpc,
description="YugaByte SG", rules=rules)]
return vpc_components_as_json(dest_vpc, sgs, subnets)
def query_vpc(region):
"""Method to query VPC against given region and respective subnets.
Args:
region (str): Region name to query the VPC.
Returns:
vpc and subnet info (obj): Object with region and zone subnet id.
"""
per_vpc_info = {}
# Fetch all available AZs, as we want to group subnets by AZ.
raw_client = boto3.client("ec2", region_name=region)
zones = [z["ZoneName"]
for z in raw_client.describe_availability_zones(
Filters=get_filters("state", "available")).get("AvailabilityZones", [])]
# Default to empty lists, in case some zones do not have subnets, so we can use this as a query
# for all available AZs in this region.
subnets_by_zone = {z: [] for z in zones}
# Fetch SGs and group them by VPC ID.
client = get_client(region)
per_vpc_sgs = {}
sgs = client.security_groups.all()
for sg in sgs:
sg_list = per_vpc_sgs.setdefault(sg.vpc_id, [])
sg_list.append({
"sg_id": sg.group_id,
# Note: Name tag is not mandatory or always present but group_name is!
"sg_name": sg.group_name
})
# Fetch all available VPCs so we can group by VPC ID.
region_vpcs = client.vpcs.all()
for vpc in region_vpcs:
# Filter for available subnets and group by AZ.
subnets = vpc.subnets.filter(Filters=get_filters("state", "available"))
for s in subnets:
subnets_for_this_az = subnets_by_zone.setdefault(s.availability_zone, [])
subnets_for_this_az.append({
"subnet_id": s.subnet_id,
"name": _get_name_from_tags(s.tags),
"public": s.map_public_ip_on_launch
})
vpc_info = {
"subnets_by_zone": subnets_by_zone,
# In case we somehow did not find any SGs, default to empty list.
"security_groups": per_vpc_sgs.get(vpc.id, [])
}
per_vpc_info[vpc.id] = vpc_info
region_json = {
"per_vpc_info": per_vpc_info
}
return region_json
def _get_name_from_tags(tags):
for t in tags if tags else []:
if t.get("Key") == "Name":
return t.get("Value", None)
return None
def vpc_components_as_json(vpc, sgs, subnets):
"""Method takes VPC, Security Group and Subnets and returns a json data format with ids.
Args:
vpc (VPC Object): Region specific VPC object
sgs (List of Security Group Object): Region specific Security Group object
subnets (subnet object map): Map of Subnet objects keyed of zone.
Retuns:
json (str): A Json string for yugaware to consume with necessary ids.
"""
result = {}
result["vpc_id"] = vpc.id
result["security_group"] = [{"id": sg.group_id, "name": sg.group_name} for sg in sgs]
result["zones"] = {}
for zone, subnet in subnets.iteritems():
result["zones"][zone] = subnet.id
return result
def delete_vpc(region, host_vpc_id=None, host_vpc_region=None):
"""Method to delete VPC, Subnet, Internet Gateway, Route Table and VPC peering.
Args:
region (str): Region name to query the VPC.
"""
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(region)
client = get_client(region)
region_vpc = get_vpc(client, vpc_region_tag)
if region_vpc is None:
raise YBOpsRuntimeError("VPC not setup.")
zones = get_zones(region)
# Remove the yugabyte SG first.
sg_group_name = get_yb_sg_name(region)
cleanup_security_group(client=client, group_name=sg_group_name, vpc=region_vpc)
# Cleanup the subnets.
for zone, subnet_id in zones.iteritems():
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
if subnet_id is not None:
client.Subnet(subnet_id).delete()
# Remove the IGW.
igw_tag = IGW_PREFIX_FORMAT.format(region)
cleanup_igw(client=client, tag_name=igw_tag)
# Remove this region's CIDR from the RT of the host vpc.
host_vpc = None
if host_vpc_id is not None and host_vpc_region is not None:
host_vpc = get_client(host_vpc_region).Vpc(host_vpc_id)
for rt in list(host_vpc.route_tables.all()):
delete_route(rt, region_vpc.cidr_block)
# Remove all of the VPC peerings of this vpc.
cleanup_vpc_peering(client=client, vpc=region_vpc, host_vpc=None)
# Delete the VPC itself.
region_vpc.delete()
# Finally cleanup the Routing Table.
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(region)
cleanup_route_table(client=client, tag_name=route_table_tag)
return {"success": "VPC deleted."}
def tag_resource_name(client, resource_id, tag_name):
"""Method to create name tag for given resource.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_name (str): Tag name.
"""
tag_resource(client, resource_id, "Name", tag_name)
def tag_resource(client, resource_id, tag_key, tag_value):
"""Method to attach arbitrary key-value tags to resources.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_key: Tag key
tag_value: Tag value
"""
tags = [{"Key": tag_key, "Value": tag_value}]
client.create_tags(Resources=[resource_id], Tags=tags)
def get_filters(key, value):
return [{'Name': key, 'Values': [value]}]
def get_tag_filter(tag_name):
return get_filters("tag:Name", tag_name)
def get_vpc_peerings(vpc, host_vpc, **kwargs):
"""Method to fetch all the VPC peerings against given VPC. If host_vpc is provided
it will check if there is a peering against that vpc.
Args:
vpc(VPC object): VPC Object to search for peerings
host_vpc (Host VPC object): Can be Null as well, to check if specific host_vpc
peering is done.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
output = []
# Search through accepted vpc peerings.
vpc_peerings = vpc.accepted_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.requester_vpc == host_vpc)])
# Also search through requested vpc peerings.
vpc_peerings = vpc.requested_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.accepter_vpc == host_vpc)])
return output
@get_and_cleanup(get_vpc_peerings)
def cleanup_vpc_peering(vpc_peerings, **kwargs):
for vpc_peering in vpc_peerings:
vpc_peering.delete()
@get_or_create(get_vpc_peerings)
def create_vpc_peering(client, vpc, host_vpc, target_region):
"""Method would create a vpc peering between the newly created VPC and caller's VPC
Also makes sure, if they aren't the same, then there is no need for vpc peering.
Args:
client (boto client): Region specific boto client
vpc (VPC object): Newly created VPC object
host_vpc(Host VPC object): Host VPC to peer with.
target_region (region name): Region name in which peering is being created.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
try:
peer_conn = client.create_vpc_peering_connection(
VpcId=host_vpc.id, PeerVpcId=vpc.id, PeerRegion=target_region)
peer_conn.wait_until_exists()
# Need to accept from the other end.
remote_peer_conn = get_client(target_region).VpcPeeringConnection(peer_conn.id)
remote_peer_conn.wait_until_exists()
remote_peer_conn.accept()
return [peer_conn]
except Exception as e:
logging.error(e)
raise YBOpsRuntimeError("Unable to create VPC peering.")
def get_device_names(instance_type, num_volumes):
device_names = []
for i in xrange(num_volumes):
device_name_format = "nvme{}n1" if is_nvme(instance_type) else "xvd{}"
index = "{}".format(i if is_nvme(instance_type) else chr(ord('b') + i))
device_names.append(device_name_format.format(index))
return device_names
def is_next_gen(instance_type):
return instance_type.startswith(("c3", "c4", "c5", "m4", "r4"))
def is_nvme(instance_type):
return instance_type.startswith("i3")
def has_ephemerals(instance_type):
return not is_nvme(instance_type) and not is_next_gen(instance_type)
def create_instance(args):
client = get_client(args.region)
vars = {
"ImageId": args.machine_image,
"KeyName": args.key_pair_name,
"MinCount": 1,
"MaxCount": 1,
"InstanceType": args.instance_type,
}
# Network setup.
# Lets assume they have provided security group id comma delimited.
sg_ids = args.security_group_id.split(",") if args.security_group_id else None
if sg_ids is None:
# Figure out which VPC this instance will be brought up in and search for the SG in there.
# This is for a bit of backwards compatibility with the previous mode of potentially using
# YW's VPC, in which we would still deploy a SG with the same name as in our normal VPCs.
# This means there could be customers that had that deployment mode from the start AND have
# a SG we created back then, with the internal naming convention we use, but NOT in the YB
# VPC (which they likely will not even have).
vpc = get_vpc_for_subnet(client, args.cloud_subnet)
sg_name = get_yb_sg_name(args.region)
sg = get_security_group(client, sg_name, vpc)
sg_ids = [sg.id]
vars["NetworkInterfaces"] = [{
"DeviceIndex": 0,
"AssociatePublicIpAddress": args.assign_public_ip,
"SubnetId": args.cloud_subnet,
"Groups": sg_ids
}]
# Volume setup.
volumes = []
ebs = {
"DeleteOnTermination": True,
# TODO: constant
"VolumeSize": 40,
"VolumeType": "gp2"
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
volumes.append({
"DeviceName": "/dev/sda1",
"Ebs": ebs
})
device_names = get_device_names(args.instance_type, args.num_volumes)
# TODO: Clean up semantics on nvme vs "next-gen" vs ephemerals, as this is currently whack...
for i, device_name in enumerate(device_names):
volume = {}
if has_ephemerals(args.instance_type):
volume = {
"DeviceName": "/dev/{}".format(device_name),
"VirtualName": "ephemeral{}".format(i)
}
elif is_next_gen(args.instance_type):
ebs = {
"DeleteOnTermination": True,
"VolumeType": args.volume_type,
# TODO: make this int.
"VolumeSize": args.volume_size
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
if args.volume_type == "io1":
# TODO: make this int.
ebs["Iops"] = args.disk_iops
volume = {
"DeviceName": "/dev/{}".format(device_name),
"Ebs": ebs
}
volumes.append(volume)
vars["BlockDeviceMappings"] = volumes
# Tag setup.
def __create_tag(k, v):
return {"Key": k, "Value": v}
# Add Name all the time.
instance_tags = [
__create_tag("Name", args.search_pattern),
__create_tag("launched-by", os.environ.get("USER", "unknown")),
__create_tag("yb-server-type", args.type)
]
custom_tags = args.instance_tags if args.instance_tags is not None else '{}'
for k, v in json.loads(custom_tags).iteritems():
instance_tags.append(__create_tag(k, v))
vars["TagSpecifications"] = [{
"ResourceType": "instance",
"Tags": instance_tags
}]
# TODO: user_data > templates/cloud_init.yml.j2, still needed?
instance_ids = client.create_instances(**vars)
if len(instance_ids) != 1:
logging.error("Invalid create_instances response: {}".format(instance_ids))
raise YBOpsRuntimeError("Expected to create 1 instance, got {}".format(
len(instance_ids)))
instance = instance_ids[0]
instance.wait_until_running()
def modify_tags(region, instance_id, tags_to_set_str, tags_to_remove_str):
instance = get_client(region).Instance(instance_id)
# Remove all the tags we were asked to, except the internal ones.
tags_to_remove = set(tags_to_remove_str.split(",") if tags_to_remove_str else [])
# TODO: combine these with the above instance creation function.
internal_tags = set(["Name", "launched-by", "yb-server-type"])
if tags_to_remove & internal_tags:
raise YBOpsRuntimeError(
"Was asked to remove tags: {}, which contain internal tags: {}".format(
tags_to_remove, internal_tags
))
# Note: passing an empty list to Tags will remove all tags from the instance.
if tags_to_remove:
instance.delete_tags(Tags=[{"Key": k} for k in tags_to_remove])
# Set all the tags provided.
tags_to_set = json.loads(tags_to_set_str if tags_to_set_str else "{}")
customer_tags = []
for k, v in tags_to_set.iteritems():
customer_tags.append({"Key": k, "Value": v})
instance.create_tags(Tags=customer_tags)
def delete_route(rt, cidr):
route = get_route_by_cidr(rt, cidr)
if route is not None:
route.delete()
def get_vpc_for_subnet(client, subnet):
return client.Subnet(subnet).vpc
def get_yb_sg_name(region):
return SG_YUGABYTE_PREFIX_FORMAT.format(region)
def list_dns_record_set(hosted_zone_id):
return boto3.client('route53').get_hosted_zone(Id=hosted_zone_id)
def create_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'CREATE')
def edit_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'UPSERT')
def delete_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'DELETE')
def _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, action):
client = boto3.client('route53')
records = []
for ip in ip_list:
records.append({'Value': ip})
result = list_dns_record_set(hosted_zone_id)
hosted_zone_name = result['HostedZone']['Name']
change_batch = {
'Comment': "YugaWare driven Record Set",
'Changes': [{
'Action': action,
'ResourceRecordSet': {
'Name': "{}.{}".format(domain_name_prefix, hosted_zone_name),
'Type': 'A',
'TTL': 5,
'ResourceRecords': records
}
}]
}
result = client.change_resource_record_sets(
HostedZoneId=hosted_zone_id,
ChangeBatch=change_batch)
client.get_waiter('resource_record_sets_changed').wait(
Id=result['ChangeInfo']['Id'],
WaiterConfig={
'Delay': 10,
'MaxAttempts': 60
})
|
vpc_components_as_json
|
Method takes VPC, Security Group and Subnets and returns a json data format with ids.
Args:
vpc (VPC Object): Region specific VPC object
sgs (List of Security Group Object): Region specific Security Group object
subnets (subnet object map): Map of Subnet objects keyed of zone.
Retuns:
json (str): A Json string for yugaware to consume with necessary ids.
|
#!/usr/bin/env python
#
# Copyright 2019 YugaByte, Inc. and Contributors
#
# Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt
import boto3
import json
import logging
import os
import re
from ipaddr import IPNetwork
from ybops.utils import get_or_create, get_and_cleanup
from ybops.common.exceptions import YBOpsRuntimeError
from ybops.cloud.common.utils import request_retry_decorator
RESOURCE_PREFIX_FORMAT = "yb-{}"
IGW_CIDR = "0.0.0.0/0"
SUBNET_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT
IGW_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-igw"
ROUTE_TABLE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-rt"
SG_YUGABYTE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-sg"
PEER_CONN_FORMAT = "yb-peer-conn-{}-to-{}"
class AwsBootstrapRegion():
def __init__(self, region, metadata, region_cidrs):
self.region = region
self.metadata = metadata
self.region_cidrs = region_cidrs
self.client = get_client(self.region)
# Outputs.
self.vpc = None
self.igw = None
self.peer_vpc = None
self.sg_yugabyte = None
self.subnets = []
self.route_table = None
def bootstrap(self):
self.setup_vpc()
self.setup_igw()
self.setup_subnets()
self.setup_yugabyte_sg()
self.setup_rt()
def setup_vpc(self):
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(self.region)
vpc = create_vpc(client=self.client, tag_name=vpc_region_tag,
cidr=get_region_cidr(self.metadata, self.region))
vpc.wait_until_available()
self.vpc = vpc
def setup_igw(self):
igw_tag = IGW_PREFIX_FORMAT.format(self.region)
igw = create_igw(client=self.client, tag_name=igw_tag, vpc=self.vpc)
self.igw = igw
def setup_subnets(self):
zones = get_zones(self.region)
subnets = {}
for zone_index, zone in enumerate(sorted(zones.keys())):
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
zone_cidr = self.metadata["zone_cidr_format"].format(
get_cidr_prefix(self.metadata, self.region), (zone_index + 1) * 16)
subnet = create_subnet(self.client, self.vpc, zone, zone_cidr, vpc_zone_tag)
subnets[zone] = subnet
self.subnets = subnets
def setup_yugabyte_sg(self):
sg_group_name = get_yb_sg_name(self.region)
rules = list(self.metadata["sg_rules"])
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
sg = create_security_group(client=self.client, group_name=sg_group_name,
description="YugaByte SG", vpc=self.vpc,
rules=rules)
self.sg_yugabyte = sg
def setup_rt(self):
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(self.region)
route_table = create_route_table(client=self.client, tag_name=route_table_tag,
vpc=self.vpc)
# TODO: handle private/public case at somepoint, also NAT.
add_route_to_rt(route_table, IGW_CIDR, "GatewayId", self.igw.id)
current_associated_subnet_ids = [assoc.subnet_id for assoc in route_table.associations]
missing_ids = [subnet.id for subnet in self.subnets.values()
if subnet.id not in current_associated_subnet_ids]
for subnet_id in missing_ids:
route_table.associate_with_subnet(SubnetId=subnet_id)
self.route_table = route_table
def add_sg_ingress_to_sg(self, incoming_sg, target_sg):
current_sg_ids = set([pair["GroupId"]
for perm in target_sg.ip_permissions
for pair in perm["UserIdGroupPairs"]])
if incoming_sg.id not in current_sg_ids:
target_sg.authorize_ingress(
IpPermissions=[{
"IpProtocol": "-1",
"UserIdGroupPairs": [{"GroupId": incoming_sg.id}]}])
def add_route_to_rt(route_table, cidr, target_type, target_id):
kwargs = {target_type: target_id}
route = get_route_by_cidr(route_table, cidr)
if route is None:
route_table.create_route(DestinationCidrBlock=cidr, **kwargs)
elif getattr(route, dumb_camel_to_snake(target_type)) != target_id:
route.replace(**kwargs)
def add_cidr_to_rules(rules, cidr):
rule_block = {
"ip_protocol": "-1",
"from_port": 0,
"to_port": 65535,
"cidr_ip": cidr
}
rules.append(rule_block)
def get_cidr_prefix(metadata, region):
return metadata["regions"][region]["cidr_prefix"]
def get_region_cidr(metadata, region):
return metadata["region_cidr_format"].format(get_cidr_prefix(metadata, region))
def get_region_cidrs(metadata):
return dict([(r, get_region_cidr(metadata, r)) for r in metadata["regions"].keys()])
def dumb_camel_to_snake(s):
return re.sub("([A-Z])", "_\\1", s).lower()[1:]
class YbVpcComponents:
def __init__(self):
self.region = None
self.vpc = None
self.sg_yugabyte = None
self.customer_sgs = None
self.route_table = None
self.subnets = None
@staticmethod
def from_pieces(region, vpc_id, sg_id, rt_id, az_to_subnet_ids):
c = YbVpcComponents()
c.region = region
client = get_client(region)
c.vpc = client.Vpc(vpc_id)
c.sg_yugabyte = client.SecurityGroup(sg_id)
c.route_table = client.RouteTable(rt_id)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
@staticmethod
def from_user_json(region, per_region_meta):
c = YbVpcComponents()
c.region = region
client = get_client(region)
vpc_id = per_region_meta.get("vpcId")
if vpc_id:
c.vpc = client.Vpc(vpc_id)
else:
c.vpc = get_vpc(client, RESOURCE_PREFIX_FORMAT.format(region))
sg_ids = per_region_meta.get("customSecurityGroupId")
if sg_ids:
c.customer_sgs = [client.SecurityGroup(sg_id) for sg_id in sg_ids.split(",")]
else:
c.sg_yugabyte = get_security_group(
client, SG_YUGABYTE_PREFIX_FORMAT.format(region), c.vpc)
if not vpc_id:
c.route_table = get_route_table(client, ROUTE_TABLE_PREFIX_FORMAT.format(region))
az_to_subnet_ids = {}
if vpc_id:
az_to_subnet_ids = per_region_meta.get("azToSubnetIds", {})
else:
az_to_subnet_ids = get_zones(region)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
def as_json(self):
sgs = self.customer_sgs if self.customer_sgs else [self.sg_yugabyte]
return vpc_components_as_json(self.vpc, sgs, self.subnets)
class AwsBootstrapClient():
def __init__(self, metadata, host_vpc_id, host_vpc_region):
self.metadata = metadata
self.host_vpc_id = host_vpc_id
self.host_vpc_region = host_vpc_region
self.region_cidrs = get_region_cidrs(self.metadata)
# Validation.
self._validate_cidr_overlap()
def _validate_cidr_overlap(self):
region_networks = [IPNetwork(cidr) for cidr in self.region_cidrs.values()]
all_networks = region_networks
for i in xrange(len(all_networks)):
for j in xrange(i + 1, len(all_networks)):
left = all_networks[i]
right = all_networks[j]
if left.overlaps(right):
raise YBOpsRuntimeError(
"IP blocks in the CIDRs overlap: {} - {}".format(left, right))
def bootstrap_individual_region(self, region):
if region is None:
raise YBOpsRuntimeError("Must provider region to bootstrap!")
client = AwsBootstrapRegion(region, self.metadata, self.region_cidrs)
client.bootstrap()
return YbVpcComponents.from_pieces(
region, client.vpc.id, client.sg_yugabyte.id, client.route_table.id,
{az: s.id for az, s in client.subnets.iteritems()})
def cross_link_regions(self, components):
# Do the cross linking, adding CIDR entries to RTs and SGs, as well as doing vpc peerings.
region_and_vpc_tuples = [(r, c.vpc) for r, c in components.iteritems()]
host_vpc = None
if self.host_vpc_id and self.host_vpc_region:
host_vpc = get_client(self.host_vpc_region).Vpc(self.host_vpc_id)
region_and_vpc_tuples.append((self.host_vpc_region, host_vpc))
# Setup VPC peerings.
for i in xrange(len(region_and_vpc_tuples) - 1):
i_region, i_vpc = region_and_vpc_tuples[i]
for j in xrange(i + 1, len(region_and_vpc_tuples)):
j_region, j_vpc = region_and_vpc_tuples[j]
peering = create_vpc_peering(
# i is the host, j is the target.
client=get_client(i_region), vpc=j_vpc, host_vpc=i_vpc, target_region=j_region)
if len(peering) != 1:
raise YBOpsRuntimeError(
"Expecting one peering connection, got {}".format(peer_conn))
peering = peering[0]
# Add route i -> j.
add_route_to_rt(components[i_region].route_table, j_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
# Add route j -> i.
# Note: If we have a host_vpc, it is the last in the list, and it doesn't have an
# associated component, so we special case it.
if host_vpc is None or j != len(region_and_vpc_tuples) - 1:
add_route_to_rt(components[j_region].route_table, i_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
else:
# TODO: should ideally filter to the RT that is relevant, but we do not really
# know the subnets which matter from this host_vpc...
for rt in list(host_vpc.route_tables.all()):
add_route_to_rt(rt, i_vpc.cidr_block, "VpcPeeringConnectionId", peering.id)
# Setup SG entries for all the CIDRs.
all_cidrs = [vpc.cidr_block for r, vpc in region_and_vpc_tuples]
rules = []
# Add CIDRs from all the VPCs, including the host.
for cidr in all_cidrs:
add_cidr_to_rules(rules, cidr)
# Add CIDRs from any custom networks we have internally, primarily the OpenVPN in AWS.
# TODO(bogdan): custom CIDR entries
for cidr in self.metadata.get("custom_network_whitelisted_ip_cidrs", []):
add_cidr_to_rules(rules, cidr)
for region, component in components.iteritems():
sg = component.sg_yugabyte
ip_perms = sg.ip_permissions
for rule in rules:
found = False
for perm in ip_perms:
if perm.get("FromPort") == rule["from_port"] and \
perm.get("ToPort") == rule["to_port"] and \
perm.get("IpProtocol") == rule["ip_protocol"] and \
len([True for r in perm.get("IpRanges", [])
if r.get("CidrIp") == rule["cidr_ip"]]) > 0:
# This rule matches this permission, so no need to add it.
found = True
break
if not found:
try:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
if "InvalidPermission.Duplicate" not in str(e):
raise YBOpsRuntimeError(
"Authorize Security Group Ingress failed: {}".format(repr(e)))
def aws_exception_handler(e):
"""AWS specific exception handler.
Args:
e: the exception that was raised by the underlying API call that just failed.
Returns:
True if this exception can be retried, False otherwise.
"""
return "Request limit exceeded" in str(e)
def aws_request_limit_retry(fn):
"""A decorator for retrying an AWS operation after exceeding request limit. Does retries with
randomized jitter. Ideally, we should reconfigure boto3 to do the right kind of retries
internally, but as of May 2017 there does not seem to be a good way of doing that.
Initially not adding this decorator to all functions in this module. This should be done
gradually as we encounter rate limiting errors.
Relevant boto issues:
https://github.com/boto/boto3/issues/770
https://github.com/boto/botocore/issues/882
"""
return request_retry_decorator(fn, aws_exception_handler)
def get_client(region):
"""Method to get boto3 ec2 resource for given region
Args:
region (str): Region name
Returns:
boto3 resource
"""
return boto3.resource("ec2", region_name=region)
def get_clients(regions):
"""Method to get boto3 clients for given region or all the regions if none specified.
Args:
regions (list): List of regions to return clients for
Returns:
clients(obj): Map of region to boto3 resource
"""
return {region: get_client(region) for region in regions}
def get_available_regions(metadata):
return metadata["regions"].keys()
def get_spot_pricing(region, zone, instance_type):
client = boto3.client('ec2', region_name=region)
prod_desc = ['Linux/UNIX (Amazon VPC)']
spot_price = client.describe_spot_price_history(InstanceTypes=[instance_type],
MaxResults=1,
ProductDescriptions=prod_desc,
AvailabilityZone=zone)
if len(spot_price['SpotPriceHistory']) == 0:
raise YBOpsRuntimeError('Invalid instance type {} for zone {}'.format(instance_type, zone))
return spot_price['SpotPriceHistory'][0]['SpotPrice']
def get_zones(region, dest_vpc_id=None):
"""Method to fetch zones for given region or all the regions if none specified.
Args:
region (str): Name of region to get zones of.
Returns:
zones (obj): Map of zone -> subnet
"""
result = {}
filters = get_filters("state", "available")
client = boto3.client("ec2", region_name=region)
zones = client.describe_availability_zones(Filters=filters).get("AvailabilityZones", [])
new_client = get_client(region)
zone_mapping = {}
for z in zones:
zone_name = z["ZoneName"]
zone_tag = SUBNET_PREFIX_FORMAT.format(zone_name)
region_vpc = None
if dest_vpc_id:
region_vpc = new_client.Vpc(dest_vpc_id)
else:
region_vpc = get_vpc(new_client, RESOURCE_PREFIX_FORMAT.format(region))
subnet = next(iter(fetch_subnets(region_vpc, zone_tag)), None)
if subnet is None:
subnet = next(iter([s for s in region_vpc.subnets.all()
if s.availability_zone == zone_name]), None)
zone_mapping[zone_name] = subnet.id if subnet is not None else None
return zone_mapping
def get_vpc(client, tag_name, **kwargs):
"""Method to fetch vpc based on the tag_name.
Args:
client (boto client): Boto Client for the region to query.
tag_name (str): VPC tag name.
Returns:
VPC obj: VPC object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.vpcs.filter(Filters=filters)), None)
def fetch_subnets(vpc, tag_name):
"""Method to fetch subnets based on the tag_name.
Args:
vpc (vpc obj): VPC object to search for subnets
tag_name (str): subnet tag name.
Returns:
subnets (list): list of aws subnets for given vpc.
"""
filters = get_tag_filter(tag_name)
return vpc.subnets.filter(Filters=filters)
def create_subnet(client, vpc, zone, cidr, tag_name):
"""Method to create subnet based on cidr and tag name.
Args:
client (boto client): Region specific boto client
vpc (VPC object): VPC object to create subnet.
zone (str): Availability zone name
cidr (str): CIDR string
tag_name (str): Tag name for subnet.
Returns:
subnet: Newly created subnet object.
"""
subnet = next((s for s in fetch_subnets(vpc, tag_name) if s.cidr_block == cidr), None)
if subnet is None:
subnet = vpc.create_subnet(CidrBlock=cidr, AvailabilityZone=zone)
# TODO: no direct waiter on subnet just yet, it seems...
client.meta.client.get_waiter("subnet_available").wait(SubnetIds=[subnet.id])
tag_resource_name(client, subnet.id, tag_name)
return subnet
def get_security_group(client, group_name, vpc, **kwargs):
"""Method to fetch security group based on the group_name.
Args:
client (boto client): Region specific boto client
group_name (str): Security Group name
vpc (VPC object): The VPC in which to check for the SG
Returns:
SecurityGroup: Matching security group.
"""
filters = get_filters("group-name", group_name) + get_filters("vpc-id", vpc.id)
return next(iter(client.security_groups.filter(Filters=filters)), None)
@get_or_create(get_security_group)
def create_security_group(client, group_name, vpc, description, rules):
"""Method to create a security group based on the group_name and authorize ingress with
the rules provided.
Args:
client (boto client): Region specific boto client
group_name (str): security group name
description (str): description of the security group
vpc (VPC Object): VPC object to create the security group
rules (dict): List of rules to add to security group.
"""
sg = vpc.create_security_group(GroupName=group_name, Description=description)
try:
for rule in rules:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
logging.error("Authorize Security Group Ingress failed: {}".format(e))
sg.delete()
raise YBOpsRuntimeError("Security Group creation failed.")
return sg
def get_igw(client, tag_name, **kwargs):
"""Method to fetch Internet Gateway based on tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): Internet Gateway tag name.
Returns:
internet_gateway: internet gateway object.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.internet_gateways.filter(Filters=filters)), None)
@get_or_create(get_igw)
def create_igw(client, tag_name, vpc):
"""Method to create Internet Gateway based on tag_name in given VPC. If the gateway
already exists, it would return that object. If the object doesn't have a tag, we
would tag it accordingly.
Args:
client (boto client): Region specific boto client
tag_name (str): Tag name for internet gateway.
vpc (VPC object): VPC object to create Internet Gateway
Returns:
internet gateway: newly internet gateway object.
"""
# Query to make sure the region doesn't have any IGW already attached.
existing_igw = next(iter(vpc.internet_gateways.all()), None)
if existing_igw is not None:
# If we have existing igw for the region, lets just tag it with yb-XX-igw
tag_resource_name(client, existing_igw.id, tag_name)
return existing_igw
# If we don't have a internet gateway, lets create one and attach it to vpc
igw = client.create_internet_gateway()
tag_resource_name(client, igw.id, tag_name)
vpc.attach_internet_gateway(InternetGatewayId=igw.id)
return igw
def get_route_table(client, tag_name, **kwargs):
"""Method to fetch route table based on tag_name
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name to search for.
Returns:
RouteTable (obj): Matching route table object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.route_tables.filter(Filters=filters)), None)
@get_or_create(get_route_table)
def create_route_table(client, tag_name, vpc):
"""Method to create route table based on tag_name in given VPC. It will first
query for the tag name to see if the route table already exists or if one is already
attached to the VPC, if so it will return that route table.
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name
vpc (vpc object): VPC object to create the route table against
Returns:
RouteTable (obj): newly created RouteTable object.
"""
# Check to see if there is a route table attached to VPC, if so, we can just tag it
existing_route_table = next(iter(vpc.route_tables.all()), None)
if existing_route_table is not None:
tag_resource_name(client, existing_route_table.id, tag_name)
return existing_route_table
# If no route table exists, we can create one and tag it.
route_table = vpc.create_route_table()
tag_resource_name(client, route_table.id, tag_name)
return route_table
@get_and_cleanup(get_security_group)
def cleanup_security_group(sg, **kwargs):
"""Method to cleanup security group for the matching group_name.
Args:
sg: Instance of security group matching the group_name.
"""
sg.delete()
@get_and_cleanup(get_igw)
def cleanup_igw(igw, **kwargs):
"""Method to cleanup Internet Gateway matching the tag name. And also remove any vpc
that is attached to the Internet Gateway.
Args:
igw: Instance of Internet Gateway matching tag_name.
"""
for vpc in igw.attachments:
igw.detach_from_vpc(VpcId=vpc['VpcId'])
igw.delete()
@get_and_cleanup(get_route_table)
def cleanup_route_table(rt, **kwargs):
"""Method to cleanup the Route Table matching the tag name.
Args:
rt: Instance of Route Table matching tag_name.
"""
rt.delete()
def get_route_by_cidr(route_table, cidr):
"""Method to check if given CIDR already attached to route table.
Args:
RouteTable (obj): Route Table object.
cidr (str): CIDR string to check in route table.
Returns:
Route: the route for this CIDR or None if not found
"""
return dict((r.destination_cidr_block, r) for r in route_table.routes).get(cidr)
@get_or_create(get_vpc)
def create_vpc(client, tag_name, cidr):
"""Method to create vpc based on the cidr and tag with tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): VPC tag name
cidr (str): CIDR string.
Returns:
VPC(Object): Newly created VPC object.
"""
vpc = client.create_vpc(CidrBlock=cidr)
vpc.modify_attribute(EnableDnsHostnames={'Value': True})
tag_resource_name(client, vpc.id, tag_name)
return vpc
def set_yb_sg_and_fetch_vpc(metadata, region, dest_vpc_id):
"""Method to bootstrap vpc and security group, and enable vpc peering
with the host_instance vpc.
Args:
metadata (obj): Cloud metadata object with cidr prefix and other metadata.
region (str): Region name to create the vpc in.
dest_vpc_id (str): Id of the VPC that yugabyte machines will reside in.
Returns:
vpc_info (json): return vpc, subnet and security group as json.
"""
client = get_client(region)
dest_vpc = client.Vpc(dest_vpc_id)
subnets = {subnet.availability_zone: subnet for subnet in dest_vpc.subnets.all()}
sg_group_name = get_yb_sg_name(region)
rules = metadata["sg_rules"]
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
add_cidr_to_rules(rules, dest_vpc.cidr_block)
sgs = [create_security_group(client=client, group_name=sg_group_name, vpc=dest_vpc,
description="YugaByte SG", rules=rules)]
return vpc_components_as_json(dest_vpc, sgs, subnets)
def query_vpc(region):
"""Method to query VPC against given region and respective subnets.
Args:
region (str): Region name to query the VPC.
Returns:
vpc and subnet info (obj): Object with region and zone subnet id.
"""
per_vpc_info = {}
# Fetch all available AZs, as we want to group subnets by AZ.
raw_client = boto3.client("ec2", region_name=region)
zones = [z["ZoneName"]
for z in raw_client.describe_availability_zones(
Filters=get_filters("state", "available")).get("AvailabilityZones", [])]
# Default to empty lists, in case some zones do not have subnets, so we can use this as a query
# for all available AZs in this region.
subnets_by_zone = {z: [] for z in zones}
# Fetch SGs and group them by VPC ID.
client = get_client(region)
per_vpc_sgs = {}
sgs = client.security_groups.all()
for sg in sgs:
sg_list = per_vpc_sgs.setdefault(sg.vpc_id, [])
sg_list.append({
"sg_id": sg.group_id,
# Note: Name tag is not mandatory or always present but group_name is!
"sg_name": sg.group_name
})
# Fetch all available VPCs so we can group by VPC ID.
region_vpcs = client.vpcs.all()
for vpc in region_vpcs:
# Filter for available subnets and group by AZ.
subnets = vpc.subnets.filter(Filters=get_filters("state", "available"))
for s in subnets:
subnets_for_this_az = subnets_by_zone.setdefault(s.availability_zone, [])
subnets_for_this_az.append({
"subnet_id": s.subnet_id,
"name": _get_name_from_tags(s.tags),
"public": s.map_public_ip_on_launch
})
vpc_info = {
"subnets_by_zone": subnets_by_zone,
# In case we somehow did not find any SGs, default to empty list.
"security_groups": per_vpc_sgs.get(vpc.id, [])
}
per_vpc_info[vpc.id] = vpc_info
region_json = {
"per_vpc_info": per_vpc_info
}
return region_json
def _get_name_from_tags(tags):
for t in tags if tags else []:
if t.get("Key") == "Name":
return t.get("Value", None)
return None
# MASKED: vpc_components_as_json function (lines 688-703)
def delete_vpc(region, host_vpc_id=None, host_vpc_region=None):
"""Method to delete VPC, Subnet, Internet Gateway, Route Table and VPC peering.
Args:
region (str): Region name to query the VPC.
"""
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(region)
client = get_client(region)
region_vpc = get_vpc(client, vpc_region_tag)
if region_vpc is None:
raise YBOpsRuntimeError("VPC not setup.")
zones = get_zones(region)
# Remove the yugabyte SG first.
sg_group_name = get_yb_sg_name(region)
cleanup_security_group(client=client, group_name=sg_group_name, vpc=region_vpc)
# Cleanup the subnets.
for zone, subnet_id in zones.iteritems():
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
if subnet_id is not None:
client.Subnet(subnet_id).delete()
# Remove the IGW.
igw_tag = IGW_PREFIX_FORMAT.format(region)
cleanup_igw(client=client, tag_name=igw_tag)
# Remove this region's CIDR from the RT of the host vpc.
host_vpc = None
if host_vpc_id is not None and host_vpc_region is not None:
host_vpc = get_client(host_vpc_region).Vpc(host_vpc_id)
for rt in list(host_vpc.route_tables.all()):
delete_route(rt, region_vpc.cidr_block)
# Remove all of the VPC peerings of this vpc.
cleanup_vpc_peering(client=client, vpc=region_vpc, host_vpc=None)
# Delete the VPC itself.
region_vpc.delete()
# Finally cleanup the Routing Table.
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(region)
cleanup_route_table(client=client, tag_name=route_table_tag)
return {"success": "VPC deleted."}
def tag_resource_name(client, resource_id, tag_name):
"""Method to create name tag for given resource.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_name (str): Tag name.
"""
tag_resource(client, resource_id, "Name", tag_name)
def tag_resource(client, resource_id, tag_key, tag_value):
"""Method to attach arbitrary key-value tags to resources.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_key: Tag key
tag_value: Tag value
"""
tags = [{"Key": tag_key, "Value": tag_value}]
client.create_tags(Resources=[resource_id], Tags=tags)
def get_filters(key, value):
return [{'Name': key, 'Values': [value]}]
def get_tag_filter(tag_name):
return get_filters("tag:Name", tag_name)
def get_vpc_peerings(vpc, host_vpc, **kwargs):
"""Method to fetch all the VPC peerings against given VPC. If host_vpc is provided
it will check if there is a peering against that vpc.
Args:
vpc(VPC object): VPC Object to search for peerings
host_vpc (Host VPC object): Can be Null as well, to check if specific host_vpc
peering is done.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
output = []
# Search through accepted vpc peerings.
vpc_peerings = vpc.accepted_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.requester_vpc == host_vpc)])
# Also search through requested vpc peerings.
vpc_peerings = vpc.requested_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.accepter_vpc == host_vpc)])
return output
@get_and_cleanup(get_vpc_peerings)
def cleanup_vpc_peering(vpc_peerings, **kwargs):
for vpc_peering in vpc_peerings:
vpc_peering.delete()
@get_or_create(get_vpc_peerings)
def create_vpc_peering(client, vpc, host_vpc, target_region):
"""Method would create a vpc peering between the newly created VPC and caller's VPC
Also makes sure, if they aren't the same, then there is no need for vpc peering.
Args:
client (boto client): Region specific boto client
vpc (VPC object): Newly created VPC object
host_vpc(Host VPC object): Host VPC to peer with.
target_region (region name): Region name in which peering is being created.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
try:
peer_conn = client.create_vpc_peering_connection(
VpcId=host_vpc.id, PeerVpcId=vpc.id, PeerRegion=target_region)
peer_conn.wait_until_exists()
# Need to accept from the other end.
remote_peer_conn = get_client(target_region).VpcPeeringConnection(peer_conn.id)
remote_peer_conn.wait_until_exists()
remote_peer_conn.accept()
return [peer_conn]
except Exception as e:
logging.error(e)
raise YBOpsRuntimeError("Unable to create VPC peering.")
def get_device_names(instance_type, num_volumes):
device_names = []
for i in xrange(num_volumes):
device_name_format = "nvme{}n1" if is_nvme(instance_type) else "xvd{}"
index = "{}".format(i if is_nvme(instance_type) else chr(ord('b') + i))
device_names.append(device_name_format.format(index))
return device_names
def is_next_gen(instance_type):
return instance_type.startswith(("c3", "c4", "c5", "m4", "r4"))
def is_nvme(instance_type):
return instance_type.startswith("i3")
def has_ephemerals(instance_type):
return not is_nvme(instance_type) and not is_next_gen(instance_type)
def create_instance(args):
client = get_client(args.region)
vars = {
"ImageId": args.machine_image,
"KeyName": args.key_pair_name,
"MinCount": 1,
"MaxCount": 1,
"InstanceType": args.instance_type,
}
# Network setup.
# Lets assume they have provided security group id comma delimited.
sg_ids = args.security_group_id.split(",") if args.security_group_id else None
if sg_ids is None:
# Figure out which VPC this instance will be brought up in and search for the SG in there.
# This is for a bit of backwards compatibility with the previous mode of potentially using
# YW's VPC, in which we would still deploy a SG with the same name as in our normal VPCs.
# This means there could be customers that had that deployment mode from the start AND have
# a SG we created back then, with the internal naming convention we use, but NOT in the YB
# VPC (which they likely will not even have).
vpc = get_vpc_for_subnet(client, args.cloud_subnet)
sg_name = get_yb_sg_name(args.region)
sg = get_security_group(client, sg_name, vpc)
sg_ids = [sg.id]
vars["NetworkInterfaces"] = [{
"DeviceIndex": 0,
"AssociatePublicIpAddress": args.assign_public_ip,
"SubnetId": args.cloud_subnet,
"Groups": sg_ids
}]
# Volume setup.
volumes = []
ebs = {
"DeleteOnTermination": True,
# TODO: constant
"VolumeSize": 40,
"VolumeType": "gp2"
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
volumes.append({
"DeviceName": "/dev/sda1",
"Ebs": ebs
})
device_names = get_device_names(args.instance_type, args.num_volumes)
# TODO: Clean up semantics on nvme vs "next-gen" vs ephemerals, as this is currently whack...
for i, device_name in enumerate(device_names):
volume = {}
if has_ephemerals(args.instance_type):
volume = {
"DeviceName": "/dev/{}".format(device_name),
"VirtualName": "ephemeral{}".format(i)
}
elif is_next_gen(args.instance_type):
ebs = {
"DeleteOnTermination": True,
"VolumeType": args.volume_type,
# TODO: make this int.
"VolumeSize": args.volume_size
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
if args.volume_type == "io1":
# TODO: make this int.
ebs["Iops"] = args.disk_iops
volume = {
"DeviceName": "/dev/{}".format(device_name),
"Ebs": ebs
}
volumes.append(volume)
vars["BlockDeviceMappings"] = volumes
# Tag setup.
def __create_tag(k, v):
return {"Key": k, "Value": v}
# Add Name all the time.
instance_tags = [
__create_tag("Name", args.search_pattern),
__create_tag("launched-by", os.environ.get("USER", "unknown")),
__create_tag("yb-server-type", args.type)
]
custom_tags = args.instance_tags if args.instance_tags is not None else '{}'
for k, v in json.loads(custom_tags).iteritems():
instance_tags.append(__create_tag(k, v))
vars["TagSpecifications"] = [{
"ResourceType": "instance",
"Tags": instance_tags
}]
# TODO: user_data > templates/cloud_init.yml.j2, still needed?
instance_ids = client.create_instances(**vars)
if len(instance_ids) != 1:
logging.error("Invalid create_instances response: {}".format(instance_ids))
raise YBOpsRuntimeError("Expected to create 1 instance, got {}".format(
len(instance_ids)))
instance = instance_ids[0]
instance.wait_until_running()
def modify_tags(region, instance_id, tags_to_set_str, tags_to_remove_str):
instance = get_client(region).Instance(instance_id)
# Remove all the tags we were asked to, except the internal ones.
tags_to_remove = set(tags_to_remove_str.split(",") if tags_to_remove_str else [])
# TODO: combine these with the above instance creation function.
internal_tags = set(["Name", "launched-by", "yb-server-type"])
if tags_to_remove & internal_tags:
raise YBOpsRuntimeError(
"Was asked to remove tags: {}, which contain internal tags: {}".format(
tags_to_remove, internal_tags
))
# Note: passing an empty list to Tags will remove all tags from the instance.
if tags_to_remove:
instance.delete_tags(Tags=[{"Key": k} for k in tags_to_remove])
# Set all the tags provided.
tags_to_set = json.loads(tags_to_set_str if tags_to_set_str else "{}")
customer_tags = []
for k, v in tags_to_set.iteritems():
customer_tags.append({"Key": k, "Value": v})
instance.create_tags(Tags=customer_tags)
def delete_route(rt, cidr):
route = get_route_by_cidr(rt, cidr)
if route is not None:
route.delete()
def get_vpc_for_subnet(client, subnet):
return client.Subnet(subnet).vpc
def get_yb_sg_name(region):
return SG_YUGABYTE_PREFIX_FORMAT.format(region)
def list_dns_record_set(hosted_zone_id):
return boto3.client('route53').get_hosted_zone(Id=hosted_zone_id)
def create_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'CREATE')
def edit_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'UPSERT')
def delete_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'DELETE')
def _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, action):
client = boto3.client('route53')
records = []
for ip in ip_list:
records.append({'Value': ip})
result = list_dns_record_set(hosted_zone_id)
hosted_zone_name = result['HostedZone']['Name']
change_batch = {
'Comment': "YugaWare driven Record Set",
'Changes': [{
'Action': action,
'ResourceRecordSet': {
'Name': "{}.{}".format(domain_name_prefix, hosted_zone_name),
'Type': 'A',
'TTL': 5,
'ResourceRecords': records
}
}]
}
result = client.change_resource_record_sets(
HostedZoneId=hosted_zone_id,
ChangeBatch=change_batch)
client.get_waiter('resource_record_sets_changed').wait(
Id=result['ChangeInfo']['Id'],
WaiterConfig={
'Delay': 10,
'MaxAttempts': 60
})
|
def vpc_components_as_json(vpc, sgs, subnets):
"""Method takes VPC, Security Group and Subnets and returns a json data format with ids.
Args:
vpc (VPC Object): Region specific VPC object
sgs (List of Security Group Object): Region specific Security Group object
subnets (subnet object map): Map of Subnet objects keyed of zone.
Retuns:
json (str): A Json string for yugaware to consume with necessary ids.
"""
result = {}
result["vpc_id"] = vpc.id
result["security_group"] = [{"id": sg.group_id, "name": sg.group_name} for sg in sgs]
result["zones"] = {}
for zone, subnet in subnets.iteritems():
result["zones"][zone] = subnet.id
return result
| 688 | 703 |
#!/usr/bin/env python
#
# Copyright 2019 YugaByte, Inc. and Contributors
#
# Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt
import boto3
import json
import logging
import os
import re
from ipaddr import IPNetwork
from ybops.utils import get_or_create, get_and_cleanup
from ybops.common.exceptions import YBOpsRuntimeError
from ybops.cloud.common.utils import request_retry_decorator
RESOURCE_PREFIX_FORMAT = "yb-{}"
IGW_CIDR = "0.0.0.0/0"
SUBNET_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT
IGW_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-igw"
ROUTE_TABLE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-rt"
SG_YUGABYTE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-sg"
PEER_CONN_FORMAT = "yb-peer-conn-{}-to-{}"
class AwsBootstrapRegion():
def __init__(self, region, metadata, region_cidrs):
self.region = region
self.metadata = metadata
self.region_cidrs = region_cidrs
self.client = get_client(self.region)
# Outputs.
self.vpc = None
self.igw = None
self.peer_vpc = None
self.sg_yugabyte = None
self.subnets = []
self.route_table = None
def bootstrap(self):
self.setup_vpc()
self.setup_igw()
self.setup_subnets()
self.setup_yugabyte_sg()
self.setup_rt()
def setup_vpc(self):
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(self.region)
vpc = create_vpc(client=self.client, tag_name=vpc_region_tag,
cidr=get_region_cidr(self.metadata, self.region))
vpc.wait_until_available()
self.vpc = vpc
def setup_igw(self):
igw_tag = IGW_PREFIX_FORMAT.format(self.region)
igw = create_igw(client=self.client, tag_name=igw_tag, vpc=self.vpc)
self.igw = igw
def setup_subnets(self):
zones = get_zones(self.region)
subnets = {}
for zone_index, zone in enumerate(sorted(zones.keys())):
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
zone_cidr = self.metadata["zone_cidr_format"].format(
get_cidr_prefix(self.metadata, self.region), (zone_index + 1) * 16)
subnet = create_subnet(self.client, self.vpc, zone, zone_cidr, vpc_zone_tag)
subnets[zone] = subnet
self.subnets = subnets
def setup_yugabyte_sg(self):
sg_group_name = get_yb_sg_name(self.region)
rules = list(self.metadata["sg_rules"])
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
sg = create_security_group(client=self.client, group_name=sg_group_name,
description="YugaByte SG", vpc=self.vpc,
rules=rules)
self.sg_yugabyte = sg
def setup_rt(self):
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(self.region)
route_table = create_route_table(client=self.client, tag_name=route_table_tag,
vpc=self.vpc)
# TODO: handle private/public case at somepoint, also NAT.
add_route_to_rt(route_table, IGW_CIDR, "GatewayId", self.igw.id)
current_associated_subnet_ids = [assoc.subnet_id for assoc in route_table.associations]
missing_ids = [subnet.id for subnet in self.subnets.values()
if subnet.id not in current_associated_subnet_ids]
for subnet_id in missing_ids:
route_table.associate_with_subnet(SubnetId=subnet_id)
self.route_table = route_table
def add_sg_ingress_to_sg(self, incoming_sg, target_sg):
current_sg_ids = set([pair["GroupId"]
for perm in target_sg.ip_permissions
for pair in perm["UserIdGroupPairs"]])
if incoming_sg.id not in current_sg_ids:
target_sg.authorize_ingress(
IpPermissions=[{
"IpProtocol": "-1",
"UserIdGroupPairs": [{"GroupId": incoming_sg.id}]}])
def add_route_to_rt(route_table, cidr, target_type, target_id):
kwargs = {target_type: target_id}
route = get_route_by_cidr(route_table, cidr)
if route is None:
route_table.create_route(DestinationCidrBlock=cidr, **kwargs)
elif getattr(route, dumb_camel_to_snake(target_type)) != target_id:
route.replace(**kwargs)
def add_cidr_to_rules(rules, cidr):
rule_block = {
"ip_protocol": "-1",
"from_port": 0,
"to_port": 65535,
"cidr_ip": cidr
}
rules.append(rule_block)
def get_cidr_prefix(metadata, region):
return metadata["regions"][region]["cidr_prefix"]
def get_region_cidr(metadata, region):
return metadata["region_cidr_format"].format(get_cidr_prefix(metadata, region))
def get_region_cidrs(metadata):
return dict([(r, get_region_cidr(metadata, r)) for r in metadata["regions"].keys()])
def dumb_camel_to_snake(s):
return re.sub("([A-Z])", "_\\1", s).lower()[1:]
class YbVpcComponents:
def __init__(self):
self.region = None
self.vpc = None
self.sg_yugabyte = None
self.customer_sgs = None
self.route_table = None
self.subnets = None
@staticmethod
def from_pieces(region, vpc_id, sg_id, rt_id, az_to_subnet_ids):
c = YbVpcComponents()
c.region = region
client = get_client(region)
c.vpc = client.Vpc(vpc_id)
c.sg_yugabyte = client.SecurityGroup(sg_id)
c.route_table = client.RouteTable(rt_id)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
@staticmethod
def from_user_json(region, per_region_meta):
c = YbVpcComponents()
c.region = region
client = get_client(region)
vpc_id = per_region_meta.get("vpcId")
if vpc_id:
c.vpc = client.Vpc(vpc_id)
else:
c.vpc = get_vpc(client, RESOURCE_PREFIX_FORMAT.format(region))
sg_ids = per_region_meta.get("customSecurityGroupId")
if sg_ids:
c.customer_sgs = [client.SecurityGroup(sg_id) for sg_id in sg_ids.split(",")]
else:
c.sg_yugabyte = get_security_group(
client, SG_YUGABYTE_PREFIX_FORMAT.format(region), c.vpc)
if not vpc_id:
c.route_table = get_route_table(client, ROUTE_TABLE_PREFIX_FORMAT.format(region))
az_to_subnet_ids = {}
if vpc_id:
az_to_subnet_ids = per_region_meta.get("azToSubnetIds", {})
else:
az_to_subnet_ids = get_zones(region)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
def as_json(self):
sgs = self.customer_sgs if self.customer_sgs else [self.sg_yugabyte]
return vpc_components_as_json(self.vpc, sgs, self.subnets)
class AwsBootstrapClient():
def __init__(self, metadata, host_vpc_id, host_vpc_region):
self.metadata = metadata
self.host_vpc_id = host_vpc_id
self.host_vpc_region = host_vpc_region
self.region_cidrs = get_region_cidrs(self.metadata)
# Validation.
self._validate_cidr_overlap()
def _validate_cidr_overlap(self):
region_networks = [IPNetwork(cidr) for cidr in self.region_cidrs.values()]
all_networks = region_networks
for i in xrange(len(all_networks)):
for j in xrange(i + 1, len(all_networks)):
left = all_networks[i]
right = all_networks[j]
if left.overlaps(right):
raise YBOpsRuntimeError(
"IP blocks in the CIDRs overlap: {} - {}".format(left, right))
def bootstrap_individual_region(self, region):
if region is None:
raise YBOpsRuntimeError("Must provider region to bootstrap!")
client = AwsBootstrapRegion(region, self.metadata, self.region_cidrs)
client.bootstrap()
return YbVpcComponents.from_pieces(
region, client.vpc.id, client.sg_yugabyte.id, client.route_table.id,
{az: s.id for az, s in client.subnets.iteritems()})
def cross_link_regions(self, components):
# Do the cross linking, adding CIDR entries to RTs and SGs, as well as doing vpc peerings.
region_and_vpc_tuples = [(r, c.vpc) for r, c in components.iteritems()]
host_vpc = None
if self.host_vpc_id and self.host_vpc_region:
host_vpc = get_client(self.host_vpc_region).Vpc(self.host_vpc_id)
region_and_vpc_tuples.append((self.host_vpc_region, host_vpc))
# Setup VPC peerings.
for i in xrange(len(region_and_vpc_tuples) - 1):
i_region, i_vpc = region_and_vpc_tuples[i]
for j in xrange(i + 1, len(region_and_vpc_tuples)):
j_region, j_vpc = region_and_vpc_tuples[j]
peering = create_vpc_peering(
# i is the host, j is the target.
client=get_client(i_region), vpc=j_vpc, host_vpc=i_vpc, target_region=j_region)
if len(peering) != 1:
raise YBOpsRuntimeError(
"Expecting one peering connection, got {}".format(peer_conn))
peering = peering[0]
# Add route i -> j.
add_route_to_rt(components[i_region].route_table, j_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
# Add route j -> i.
# Note: If we have a host_vpc, it is the last in the list, and it doesn't have an
# associated component, so we special case it.
if host_vpc is None or j != len(region_and_vpc_tuples) - 1:
add_route_to_rt(components[j_region].route_table, i_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
else:
# TODO: should ideally filter to the RT that is relevant, but we do not really
# know the subnets which matter from this host_vpc...
for rt in list(host_vpc.route_tables.all()):
add_route_to_rt(rt, i_vpc.cidr_block, "VpcPeeringConnectionId", peering.id)
# Setup SG entries for all the CIDRs.
all_cidrs = [vpc.cidr_block for r, vpc in region_and_vpc_tuples]
rules = []
# Add CIDRs from all the VPCs, including the host.
for cidr in all_cidrs:
add_cidr_to_rules(rules, cidr)
# Add CIDRs from any custom networks we have internally, primarily the OpenVPN in AWS.
# TODO(bogdan): custom CIDR entries
for cidr in self.metadata.get("custom_network_whitelisted_ip_cidrs", []):
add_cidr_to_rules(rules, cidr)
for region, component in components.iteritems():
sg = component.sg_yugabyte
ip_perms = sg.ip_permissions
for rule in rules:
found = False
for perm in ip_perms:
if perm.get("FromPort") == rule["from_port"] and \
perm.get("ToPort") == rule["to_port"] and \
perm.get("IpProtocol") == rule["ip_protocol"] and \
len([True for r in perm.get("IpRanges", [])
if r.get("CidrIp") == rule["cidr_ip"]]) > 0:
# This rule matches this permission, so no need to add it.
found = True
break
if not found:
try:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
if "InvalidPermission.Duplicate" not in str(e):
raise YBOpsRuntimeError(
"Authorize Security Group Ingress failed: {}".format(repr(e)))
def aws_exception_handler(e):
"""AWS specific exception handler.
Args:
e: the exception that was raised by the underlying API call that just failed.
Returns:
True if this exception can be retried, False otherwise.
"""
return "Request limit exceeded" in str(e)
def aws_request_limit_retry(fn):
"""A decorator for retrying an AWS operation after exceeding request limit. Does retries with
randomized jitter. Ideally, we should reconfigure boto3 to do the right kind of retries
internally, but as of May 2017 there does not seem to be a good way of doing that.
Initially not adding this decorator to all functions in this module. This should be done
gradually as we encounter rate limiting errors.
Relevant boto issues:
https://github.com/boto/boto3/issues/770
https://github.com/boto/botocore/issues/882
"""
return request_retry_decorator(fn, aws_exception_handler)
def get_client(region):
"""Method to get boto3 ec2 resource for given region
Args:
region (str): Region name
Returns:
boto3 resource
"""
return boto3.resource("ec2", region_name=region)
def get_clients(regions):
"""Method to get boto3 clients for given region or all the regions if none specified.
Args:
regions (list): List of regions to return clients for
Returns:
clients(obj): Map of region to boto3 resource
"""
return {region: get_client(region) for region in regions}
def get_available_regions(metadata):
return metadata["regions"].keys()
def get_spot_pricing(region, zone, instance_type):
client = boto3.client('ec2', region_name=region)
prod_desc = ['Linux/UNIX (Amazon VPC)']
spot_price = client.describe_spot_price_history(InstanceTypes=[instance_type],
MaxResults=1,
ProductDescriptions=prod_desc,
AvailabilityZone=zone)
if len(spot_price['SpotPriceHistory']) == 0:
raise YBOpsRuntimeError('Invalid instance type {} for zone {}'.format(instance_type, zone))
return spot_price['SpotPriceHistory'][0]['SpotPrice']
def get_zones(region, dest_vpc_id=None):
"""Method to fetch zones for given region or all the regions if none specified.
Args:
region (str): Name of region to get zones of.
Returns:
zones (obj): Map of zone -> subnet
"""
result = {}
filters = get_filters("state", "available")
client = boto3.client("ec2", region_name=region)
zones = client.describe_availability_zones(Filters=filters).get("AvailabilityZones", [])
new_client = get_client(region)
zone_mapping = {}
for z in zones:
zone_name = z["ZoneName"]
zone_tag = SUBNET_PREFIX_FORMAT.format(zone_name)
region_vpc = None
if dest_vpc_id:
region_vpc = new_client.Vpc(dest_vpc_id)
else:
region_vpc = get_vpc(new_client, RESOURCE_PREFIX_FORMAT.format(region))
subnet = next(iter(fetch_subnets(region_vpc, zone_tag)), None)
if subnet is None:
subnet = next(iter([s for s in region_vpc.subnets.all()
if s.availability_zone == zone_name]), None)
zone_mapping[zone_name] = subnet.id if subnet is not None else None
return zone_mapping
def get_vpc(client, tag_name, **kwargs):
"""Method to fetch vpc based on the tag_name.
Args:
client (boto client): Boto Client for the region to query.
tag_name (str): VPC tag name.
Returns:
VPC obj: VPC object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.vpcs.filter(Filters=filters)), None)
def fetch_subnets(vpc, tag_name):
"""Method to fetch subnets based on the tag_name.
Args:
vpc (vpc obj): VPC object to search for subnets
tag_name (str): subnet tag name.
Returns:
subnets (list): list of aws subnets for given vpc.
"""
filters = get_tag_filter(tag_name)
return vpc.subnets.filter(Filters=filters)
def create_subnet(client, vpc, zone, cidr, tag_name):
"""Method to create subnet based on cidr and tag name.
Args:
client (boto client): Region specific boto client
vpc (VPC object): VPC object to create subnet.
zone (str): Availability zone name
cidr (str): CIDR string
tag_name (str): Tag name for subnet.
Returns:
subnet: Newly created subnet object.
"""
subnet = next((s for s in fetch_subnets(vpc, tag_name) if s.cidr_block == cidr), None)
if subnet is None:
subnet = vpc.create_subnet(CidrBlock=cidr, AvailabilityZone=zone)
# TODO: no direct waiter on subnet just yet, it seems...
client.meta.client.get_waiter("subnet_available").wait(SubnetIds=[subnet.id])
tag_resource_name(client, subnet.id, tag_name)
return subnet
def get_security_group(client, group_name, vpc, **kwargs):
"""Method to fetch security group based on the group_name.
Args:
client (boto client): Region specific boto client
group_name (str): Security Group name
vpc (VPC object): The VPC in which to check for the SG
Returns:
SecurityGroup: Matching security group.
"""
filters = get_filters("group-name", group_name) + get_filters("vpc-id", vpc.id)
return next(iter(client.security_groups.filter(Filters=filters)), None)
@get_or_create(get_security_group)
def create_security_group(client, group_name, vpc, description, rules):
"""Method to create a security group based on the group_name and authorize ingress with
the rules provided.
Args:
client (boto client): Region specific boto client
group_name (str): security group name
description (str): description of the security group
vpc (VPC Object): VPC object to create the security group
rules (dict): List of rules to add to security group.
"""
sg = vpc.create_security_group(GroupName=group_name, Description=description)
try:
for rule in rules:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
logging.error("Authorize Security Group Ingress failed: {}".format(e))
sg.delete()
raise YBOpsRuntimeError("Security Group creation failed.")
return sg
def get_igw(client, tag_name, **kwargs):
"""Method to fetch Internet Gateway based on tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): Internet Gateway tag name.
Returns:
internet_gateway: internet gateway object.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.internet_gateways.filter(Filters=filters)), None)
@get_or_create(get_igw)
def create_igw(client, tag_name, vpc):
"""Method to create Internet Gateway based on tag_name in given VPC. If the gateway
already exists, it would return that object. If the object doesn't have a tag, we
would tag it accordingly.
Args:
client (boto client): Region specific boto client
tag_name (str): Tag name for internet gateway.
vpc (VPC object): VPC object to create Internet Gateway
Returns:
internet gateway: newly internet gateway object.
"""
# Query to make sure the region doesn't have any IGW already attached.
existing_igw = next(iter(vpc.internet_gateways.all()), None)
if existing_igw is not None:
# If we have existing igw for the region, lets just tag it with yb-XX-igw
tag_resource_name(client, existing_igw.id, tag_name)
return existing_igw
# If we don't have a internet gateway, lets create one and attach it to vpc
igw = client.create_internet_gateway()
tag_resource_name(client, igw.id, tag_name)
vpc.attach_internet_gateway(InternetGatewayId=igw.id)
return igw
def get_route_table(client, tag_name, **kwargs):
"""Method to fetch route table based on tag_name
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name to search for.
Returns:
RouteTable (obj): Matching route table object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.route_tables.filter(Filters=filters)), None)
@get_or_create(get_route_table)
def create_route_table(client, tag_name, vpc):
"""Method to create route table based on tag_name in given VPC. It will first
query for the tag name to see if the route table already exists or if one is already
attached to the VPC, if so it will return that route table.
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name
vpc (vpc object): VPC object to create the route table against
Returns:
RouteTable (obj): newly created RouteTable object.
"""
# Check to see if there is a route table attached to VPC, if so, we can just tag it
existing_route_table = next(iter(vpc.route_tables.all()), None)
if existing_route_table is not None:
tag_resource_name(client, existing_route_table.id, tag_name)
return existing_route_table
# If no route table exists, we can create one and tag it.
route_table = vpc.create_route_table()
tag_resource_name(client, route_table.id, tag_name)
return route_table
@get_and_cleanup(get_security_group)
def cleanup_security_group(sg, **kwargs):
"""Method to cleanup security group for the matching group_name.
Args:
sg: Instance of security group matching the group_name.
"""
sg.delete()
@get_and_cleanup(get_igw)
def cleanup_igw(igw, **kwargs):
"""Method to cleanup Internet Gateway matching the tag name. And also remove any vpc
that is attached to the Internet Gateway.
Args:
igw: Instance of Internet Gateway matching tag_name.
"""
for vpc in igw.attachments:
igw.detach_from_vpc(VpcId=vpc['VpcId'])
igw.delete()
@get_and_cleanup(get_route_table)
def cleanup_route_table(rt, **kwargs):
"""Method to cleanup the Route Table matching the tag name.
Args:
rt: Instance of Route Table matching tag_name.
"""
rt.delete()
def get_route_by_cidr(route_table, cidr):
"""Method to check if given CIDR already attached to route table.
Args:
RouteTable (obj): Route Table object.
cidr (str): CIDR string to check in route table.
Returns:
Route: the route for this CIDR or None if not found
"""
return dict((r.destination_cidr_block, r) for r in route_table.routes).get(cidr)
@get_or_create(get_vpc)
def create_vpc(client, tag_name, cidr):
"""Method to create vpc based on the cidr and tag with tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): VPC tag name
cidr (str): CIDR string.
Returns:
VPC(Object): Newly created VPC object.
"""
vpc = client.create_vpc(CidrBlock=cidr)
vpc.modify_attribute(EnableDnsHostnames={'Value': True})
tag_resource_name(client, vpc.id, tag_name)
return vpc
def set_yb_sg_and_fetch_vpc(metadata, region, dest_vpc_id):
"""Method to bootstrap vpc and security group, and enable vpc peering
with the host_instance vpc.
Args:
metadata (obj): Cloud metadata object with cidr prefix and other metadata.
region (str): Region name to create the vpc in.
dest_vpc_id (str): Id of the VPC that yugabyte machines will reside in.
Returns:
vpc_info (json): return vpc, subnet and security group as json.
"""
client = get_client(region)
dest_vpc = client.Vpc(dest_vpc_id)
subnets = {subnet.availability_zone: subnet for subnet in dest_vpc.subnets.all()}
sg_group_name = get_yb_sg_name(region)
rules = metadata["sg_rules"]
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
add_cidr_to_rules(rules, dest_vpc.cidr_block)
sgs = [create_security_group(client=client, group_name=sg_group_name, vpc=dest_vpc,
description="YugaByte SG", rules=rules)]
return vpc_components_as_json(dest_vpc, sgs, subnets)
def query_vpc(region):
"""Method to query VPC against given region and respective subnets.
Args:
region (str): Region name to query the VPC.
Returns:
vpc and subnet info (obj): Object with region and zone subnet id.
"""
per_vpc_info = {}
# Fetch all available AZs, as we want to group subnets by AZ.
raw_client = boto3.client("ec2", region_name=region)
zones = [z["ZoneName"]
for z in raw_client.describe_availability_zones(
Filters=get_filters("state", "available")).get("AvailabilityZones", [])]
# Default to empty lists, in case some zones do not have subnets, so we can use this as a query
# for all available AZs in this region.
subnets_by_zone = {z: [] for z in zones}
# Fetch SGs and group them by VPC ID.
client = get_client(region)
per_vpc_sgs = {}
sgs = client.security_groups.all()
for sg in sgs:
sg_list = per_vpc_sgs.setdefault(sg.vpc_id, [])
sg_list.append({
"sg_id": sg.group_id,
# Note: Name tag is not mandatory or always present but group_name is!
"sg_name": sg.group_name
})
# Fetch all available VPCs so we can group by VPC ID.
region_vpcs = client.vpcs.all()
for vpc in region_vpcs:
# Filter for available subnets and group by AZ.
subnets = vpc.subnets.filter(Filters=get_filters("state", "available"))
for s in subnets:
subnets_for_this_az = subnets_by_zone.setdefault(s.availability_zone, [])
subnets_for_this_az.append({
"subnet_id": s.subnet_id,
"name": _get_name_from_tags(s.tags),
"public": s.map_public_ip_on_launch
})
vpc_info = {
"subnets_by_zone": subnets_by_zone,
# In case we somehow did not find any SGs, default to empty list.
"security_groups": per_vpc_sgs.get(vpc.id, [])
}
per_vpc_info[vpc.id] = vpc_info
region_json = {
"per_vpc_info": per_vpc_info
}
return region_json
def _get_name_from_tags(tags):
for t in tags if tags else []:
if t.get("Key") == "Name":
return t.get("Value", None)
return None
def vpc_components_as_json(vpc, sgs, subnets):
"""Method takes VPC, Security Group and Subnets and returns a json data format with ids.
Args:
vpc (VPC Object): Region specific VPC object
sgs (List of Security Group Object): Region specific Security Group object
subnets (subnet object map): Map of Subnet objects keyed of zone.
Retuns:
json (str): A Json string for yugaware to consume with necessary ids.
"""
result = {}
result["vpc_id"] = vpc.id
result["security_group"] = [{"id": sg.group_id, "name": sg.group_name} for sg in sgs]
result["zones"] = {}
for zone, subnet in subnets.iteritems():
result["zones"][zone] = subnet.id
return result
def delete_vpc(region, host_vpc_id=None, host_vpc_region=None):
"""Method to delete VPC, Subnet, Internet Gateway, Route Table and VPC peering.
Args:
region (str): Region name to query the VPC.
"""
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(region)
client = get_client(region)
region_vpc = get_vpc(client, vpc_region_tag)
if region_vpc is None:
raise YBOpsRuntimeError("VPC not setup.")
zones = get_zones(region)
# Remove the yugabyte SG first.
sg_group_name = get_yb_sg_name(region)
cleanup_security_group(client=client, group_name=sg_group_name, vpc=region_vpc)
# Cleanup the subnets.
for zone, subnet_id in zones.iteritems():
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
if subnet_id is not None:
client.Subnet(subnet_id).delete()
# Remove the IGW.
igw_tag = IGW_PREFIX_FORMAT.format(region)
cleanup_igw(client=client, tag_name=igw_tag)
# Remove this region's CIDR from the RT of the host vpc.
host_vpc = None
if host_vpc_id is not None and host_vpc_region is not None:
host_vpc = get_client(host_vpc_region).Vpc(host_vpc_id)
for rt in list(host_vpc.route_tables.all()):
delete_route(rt, region_vpc.cidr_block)
# Remove all of the VPC peerings of this vpc.
cleanup_vpc_peering(client=client, vpc=region_vpc, host_vpc=None)
# Delete the VPC itself.
region_vpc.delete()
# Finally cleanup the Routing Table.
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(region)
cleanup_route_table(client=client, tag_name=route_table_tag)
return {"success": "VPC deleted."}
def tag_resource_name(client, resource_id, tag_name):
"""Method to create name tag for given resource.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_name (str): Tag name.
"""
tag_resource(client, resource_id, "Name", tag_name)
def tag_resource(client, resource_id, tag_key, tag_value):
"""Method to attach arbitrary key-value tags to resources.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_key: Tag key
tag_value: Tag value
"""
tags = [{"Key": tag_key, "Value": tag_value}]
client.create_tags(Resources=[resource_id], Tags=tags)
def get_filters(key, value):
return [{'Name': key, 'Values': [value]}]
def get_tag_filter(tag_name):
return get_filters("tag:Name", tag_name)
def get_vpc_peerings(vpc, host_vpc, **kwargs):
"""Method to fetch all the VPC peerings against given VPC. If host_vpc is provided
it will check if there is a peering against that vpc.
Args:
vpc(VPC object): VPC Object to search for peerings
host_vpc (Host VPC object): Can be Null as well, to check if specific host_vpc
peering is done.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
output = []
# Search through accepted vpc peerings.
vpc_peerings = vpc.accepted_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.requester_vpc == host_vpc)])
# Also search through requested vpc peerings.
vpc_peerings = vpc.requested_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.accepter_vpc == host_vpc)])
return output
@get_and_cleanup(get_vpc_peerings)
def cleanup_vpc_peering(vpc_peerings, **kwargs):
for vpc_peering in vpc_peerings:
vpc_peering.delete()
@get_or_create(get_vpc_peerings)
def create_vpc_peering(client, vpc, host_vpc, target_region):
"""Method would create a vpc peering between the newly created VPC and caller's VPC
Also makes sure, if they aren't the same, then there is no need for vpc peering.
Args:
client (boto client): Region specific boto client
vpc (VPC object): Newly created VPC object
host_vpc(Host VPC object): Host VPC to peer with.
target_region (region name): Region name in which peering is being created.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
try:
peer_conn = client.create_vpc_peering_connection(
VpcId=host_vpc.id, PeerVpcId=vpc.id, PeerRegion=target_region)
peer_conn.wait_until_exists()
# Need to accept from the other end.
remote_peer_conn = get_client(target_region).VpcPeeringConnection(peer_conn.id)
remote_peer_conn.wait_until_exists()
remote_peer_conn.accept()
return [peer_conn]
except Exception as e:
logging.error(e)
raise YBOpsRuntimeError("Unable to create VPC peering.")
def get_device_names(instance_type, num_volumes):
device_names = []
for i in xrange(num_volumes):
device_name_format = "nvme{}n1" if is_nvme(instance_type) else "xvd{}"
index = "{}".format(i if is_nvme(instance_type) else chr(ord('b') + i))
device_names.append(device_name_format.format(index))
return device_names
def is_next_gen(instance_type):
return instance_type.startswith(("c3", "c4", "c5", "m4", "r4"))
def is_nvme(instance_type):
return instance_type.startswith("i3")
def has_ephemerals(instance_type):
return not is_nvme(instance_type) and not is_next_gen(instance_type)
def create_instance(args):
client = get_client(args.region)
vars = {
"ImageId": args.machine_image,
"KeyName": args.key_pair_name,
"MinCount": 1,
"MaxCount": 1,
"InstanceType": args.instance_type,
}
# Network setup.
# Lets assume they have provided security group id comma delimited.
sg_ids = args.security_group_id.split(",") if args.security_group_id else None
if sg_ids is None:
# Figure out which VPC this instance will be brought up in and search for the SG in there.
# This is for a bit of backwards compatibility with the previous mode of potentially using
# YW's VPC, in which we would still deploy a SG with the same name as in our normal VPCs.
# This means there could be customers that had that deployment mode from the start AND have
# a SG we created back then, with the internal naming convention we use, but NOT in the YB
# VPC (which they likely will not even have).
vpc = get_vpc_for_subnet(client, args.cloud_subnet)
sg_name = get_yb_sg_name(args.region)
sg = get_security_group(client, sg_name, vpc)
sg_ids = [sg.id]
vars["NetworkInterfaces"] = [{
"DeviceIndex": 0,
"AssociatePublicIpAddress": args.assign_public_ip,
"SubnetId": args.cloud_subnet,
"Groups": sg_ids
}]
# Volume setup.
volumes = []
ebs = {
"DeleteOnTermination": True,
# TODO: constant
"VolumeSize": 40,
"VolumeType": "gp2"
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
volumes.append({
"DeviceName": "/dev/sda1",
"Ebs": ebs
})
device_names = get_device_names(args.instance_type, args.num_volumes)
# TODO: Clean up semantics on nvme vs "next-gen" vs ephemerals, as this is currently whack...
for i, device_name in enumerate(device_names):
volume = {}
if has_ephemerals(args.instance_type):
volume = {
"DeviceName": "/dev/{}".format(device_name),
"VirtualName": "ephemeral{}".format(i)
}
elif is_next_gen(args.instance_type):
ebs = {
"DeleteOnTermination": True,
"VolumeType": args.volume_type,
# TODO: make this int.
"VolumeSize": args.volume_size
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
if args.volume_type == "io1":
# TODO: make this int.
ebs["Iops"] = args.disk_iops
volume = {
"DeviceName": "/dev/{}".format(device_name),
"Ebs": ebs
}
volumes.append(volume)
vars["BlockDeviceMappings"] = volumes
# Tag setup.
def __create_tag(k, v):
return {"Key": k, "Value": v}
# Add Name all the time.
instance_tags = [
__create_tag("Name", args.search_pattern),
__create_tag("launched-by", os.environ.get("USER", "unknown")),
__create_tag("yb-server-type", args.type)
]
custom_tags = args.instance_tags if args.instance_tags is not None else '{}'
for k, v in json.loads(custom_tags).iteritems():
instance_tags.append(__create_tag(k, v))
vars["TagSpecifications"] = [{
"ResourceType": "instance",
"Tags": instance_tags
}]
# TODO: user_data > templates/cloud_init.yml.j2, still needed?
instance_ids = client.create_instances(**vars)
if len(instance_ids) != 1:
logging.error("Invalid create_instances response: {}".format(instance_ids))
raise YBOpsRuntimeError("Expected to create 1 instance, got {}".format(
len(instance_ids)))
instance = instance_ids[0]
instance.wait_until_running()
def modify_tags(region, instance_id, tags_to_set_str, tags_to_remove_str):
instance = get_client(region).Instance(instance_id)
# Remove all the tags we were asked to, except the internal ones.
tags_to_remove = set(tags_to_remove_str.split(",") if tags_to_remove_str else [])
# TODO: combine these with the above instance creation function.
internal_tags = set(["Name", "launched-by", "yb-server-type"])
if tags_to_remove & internal_tags:
raise YBOpsRuntimeError(
"Was asked to remove tags: {}, which contain internal tags: {}".format(
tags_to_remove, internal_tags
))
# Note: passing an empty list to Tags will remove all tags from the instance.
if tags_to_remove:
instance.delete_tags(Tags=[{"Key": k} for k in tags_to_remove])
# Set all the tags provided.
tags_to_set = json.loads(tags_to_set_str if tags_to_set_str else "{}")
customer_tags = []
for k, v in tags_to_set.iteritems():
customer_tags.append({"Key": k, "Value": v})
instance.create_tags(Tags=customer_tags)
def delete_route(rt, cidr):
route = get_route_by_cidr(rt, cidr)
if route is not None:
route.delete()
def get_vpc_for_subnet(client, subnet):
return client.Subnet(subnet).vpc
def get_yb_sg_name(region):
return SG_YUGABYTE_PREFIX_FORMAT.format(region)
def list_dns_record_set(hosted_zone_id):
return boto3.client('route53').get_hosted_zone(Id=hosted_zone_id)
def create_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'CREATE')
def edit_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'UPSERT')
def delete_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'DELETE')
def _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, action):
client = boto3.client('route53')
records = []
for ip in ip_list:
records.append({'Value': ip})
result = list_dns_record_set(hosted_zone_id)
hosted_zone_name = result['HostedZone']['Name']
change_batch = {
'Comment': "YugaWare driven Record Set",
'Changes': [{
'Action': action,
'ResourceRecordSet': {
'Name': "{}.{}".format(domain_name_prefix, hosted_zone_name),
'Type': 'A',
'TTL': 5,
'ResourceRecords': records
}
}]
}
result = client.change_resource_record_sets(
HostedZoneId=hosted_zone_id,
ChangeBatch=change_batch)
client.get_waiter('resource_record_sets_changed').wait(
Id=result['ChangeInfo']['Id'],
WaiterConfig={
'Delay': 10,
'MaxAttempts': 60
})
|
get_vpc_peerings
|
Method to fetch all the VPC peerings against given VPC. If host_vpc is provided
it will check if there is a peering against that vpc.
Args:
vpc(VPC object): VPC Object to search for peerings
host_vpc (Host VPC object): Can be Null as well, to check if specific host_vpc
peering is done.
Returns:
VPC peering (array): Array list of vpc peerings.
|
#!/usr/bin/env python
#
# Copyright 2019 YugaByte, Inc. and Contributors
#
# Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt
import boto3
import json
import logging
import os
import re
from ipaddr import IPNetwork
from ybops.utils import get_or_create, get_and_cleanup
from ybops.common.exceptions import YBOpsRuntimeError
from ybops.cloud.common.utils import request_retry_decorator
RESOURCE_PREFIX_FORMAT = "yb-{}"
IGW_CIDR = "0.0.0.0/0"
SUBNET_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT
IGW_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-igw"
ROUTE_TABLE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-rt"
SG_YUGABYTE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-sg"
PEER_CONN_FORMAT = "yb-peer-conn-{}-to-{}"
class AwsBootstrapRegion():
def __init__(self, region, metadata, region_cidrs):
self.region = region
self.metadata = metadata
self.region_cidrs = region_cidrs
self.client = get_client(self.region)
# Outputs.
self.vpc = None
self.igw = None
self.peer_vpc = None
self.sg_yugabyte = None
self.subnets = []
self.route_table = None
def bootstrap(self):
self.setup_vpc()
self.setup_igw()
self.setup_subnets()
self.setup_yugabyte_sg()
self.setup_rt()
def setup_vpc(self):
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(self.region)
vpc = create_vpc(client=self.client, tag_name=vpc_region_tag,
cidr=get_region_cidr(self.metadata, self.region))
vpc.wait_until_available()
self.vpc = vpc
def setup_igw(self):
igw_tag = IGW_PREFIX_FORMAT.format(self.region)
igw = create_igw(client=self.client, tag_name=igw_tag, vpc=self.vpc)
self.igw = igw
def setup_subnets(self):
zones = get_zones(self.region)
subnets = {}
for zone_index, zone in enumerate(sorted(zones.keys())):
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
zone_cidr = self.metadata["zone_cidr_format"].format(
get_cidr_prefix(self.metadata, self.region), (zone_index + 1) * 16)
subnet = create_subnet(self.client, self.vpc, zone, zone_cidr, vpc_zone_tag)
subnets[zone] = subnet
self.subnets = subnets
def setup_yugabyte_sg(self):
sg_group_name = get_yb_sg_name(self.region)
rules = list(self.metadata["sg_rules"])
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
sg = create_security_group(client=self.client, group_name=sg_group_name,
description="YugaByte SG", vpc=self.vpc,
rules=rules)
self.sg_yugabyte = sg
def setup_rt(self):
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(self.region)
route_table = create_route_table(client=self.client, tag_name=route_table_tag,
vpc=self.vpc)
# TODO: handle private/public case at somepoint, also NAT.
add_route_to_rt(route_table, IGW_CIDR, "GatewayId", self.igw.id)
current_associated_subnet_ids = [assoc.subnet_id for assoc in route_table.associations]
missing_ids = [subnet.id for subnet in self.subnets.values()
if subnet.id not in current_associated_subnet_ids]
for subnet_id in missing_ids:
route_table.associate_with_subnet(SubnetId=subnet_id)
self.route_table = route_table
def add_sg_ingress_to_sg(self, incoming_sg, target_sg):
current_sg_ids = set([pair["GroupId"]
for perm in target_sg.ip_permissions
for pair in perm["UserIdGroupPairs"]])
if incoming_sg.id not in current_sg_ids:
target_sg.authorize_ingress(
IpPermissions=[{
"IpProtocol": "-1",
"UserIdGroupPairs": [{"GroupId": incoming_sg.id}]}])
def add_route_to_rt(route_table, cidr, target_type, target_id):
kwargs = {target_type: target_id}
route = get_route_by_cidr(route_table, cidr)
if route is None:
route_table.create_route(DestinationCidrBlock=cidr, **kwargs)
elif getattr(route, dumb_camel_to_snake(target_type)) != target_id:
route.replace(**kwargs)
def add_cidr_to_rules(rules, cidr):
rule_block = {
"ip_protocol": "-1",
"from_port": 0,
"to_port": 65535,
"cidr_ip": cidr
}
rules.append(rule_block)
def get_cidr_prefix(metadata, region):
return metadata["regions"][region]["cidr_prefix"]
def get_region_cidr(metadata, region):
return metadata["region_cidr_format"].format(get_cidr_prefix(metadata, region))
def get_region_cidrs(metadata):
return dict([(r, get_region_cidr(metadata, r)) for r in metadata["regions"].keys()])
def dumb_camel_to_snake(s):
return re.sub("([A-Z])", "_\\1", s).lower()[1:]
class YbVpcComponents:
def __init__(self):
self.region = None
self.vpc = None
self.sg_yugabyte = None
self.customer_sgs = None
self.route_table = None
self.subnets = None
@staticmethod
def from_pieces(region, vpc_id, sg_id, rt_id, az_to_subnet_ids):
c = YbVpcComponents()
c.region = region
client = get_client(region)
c.vpc = client.Vpc(vpc_id)
c.sg_yugabyte = client.SecurityGroup(sg_id)
c.route_table = client.RouteTable(rt_id)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
@staticmethod
def from_user_json(region, per_region_meta):
c = YbVpcComponents()
c.region = region
client = get_client(region)
vpc_id = per_region_meta.get("vpcId")
if vpc_id:
c.vpc = client.Vpc(vpc_id)
else:
c.vpc = get_vpc(client, RESOURCE_PREFIX_FORMAT.format(region))
sg_ids = per_region_meta.get("customSecurityGroupId")
if sg_ids:
c.customer_sgs = [client.SecurityGroup(sg_id) for sg_id in sg_ids.split(",")]
else:
c.sg_yugabyte = get_security_group(
client, SG_YUGABYTE_PREFIX_FORMAT.format(region), c.vpc)
if not vpc_id:
c.route_table = get_route_table(client, ROUTE_TABLE_PREFIX_FORMAT.format(region))
az_to_subnet_ids = {}
if vpc_id:
az_to_subnet_ids = per_region_meta.get("azToSubnetIds", {})
else:
az_to_subnet_ids = get_zones(region)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
def as_json(self):
sgs = self.customer_sgs if self.customer_sgs else [self.sg_yugabyte]
return vpc_components_as_json(self.vpc, sgs, self.subnets)
class AwsBootstrapClient():
def __init__(self, metadata, host_vpc_id, host_vpc_region):
self.metadata = metadata
self.host_vpc_id = host_vpc_id
self.host_vpc_region = host_vpc_region
self.region_cidrs = get_region_cidrs(self.metadata)
# Validation.
self._validate_cidr_overlap()
def _validate_cidr_overlap(self):
region_networks = [IPNetwork(cidr) for cidr in self.region_cidrs.values()]
all_networks = region_networks
for i in xrange(len(all_networks)):
for j in xrange(i + 1, len(all_networks)):
left = all_networks[i]
right = all_networks[j]
if left.overlaps(right):
raise YBOpsRuntimeError(
"IP blocks in the CIDRs overlap: {} - {}".format(left, right))
def bootstrap_individual_region(self, region):
if region is None:
raise YBOpsRuntimeError("Must provider region to bootstrap!")
client = AwsBootstrapRegion(region, self.metadata, self.region_cidrs)
client.bootstrap()
return YbVpcComponents.from_pieces(
region, client.vpc.id, client.sg_yugabyte.id, client.route_table.id,
{az: s.id for az, s in client.subnets.iteritems()})
def cross_link_regions(self, components):
# Do the cross linking, adding CIDR entries to RTs and SGs, as well as doing vpc peerings.
region_and_vpc_tuples = [(r, c.vpc) for r, c in components.iteritems()]
host_vpc = None
if self.host_vpc_id and self.host_vpc_region:
host_vpc = get_client(self.host_vpc_region).Vpc(self.host_vpc_id)
region_and_vpc_tuples.append((self.host_vpc_region, host_vpc))
# Setup VPC peerings.
for i in xrange(len(region_and_vpc_tuples) - 1):
i_region, i_vpc = region_and_vpc_tuples[i]
for j in xrange(i + 1, len(region_and_vpc_tuples)):
j_region, j_vpc = region_and_vpc_tuples[j]
peering = create_vpc_peering(
# i is the host, j is the target.
client=get_client(i_region), vpc=j_vpc, host_vpc=i_vpc, target_region=j_region)
if len(peering) != 1:
raise YBOpsRuntimeError(
"Expecting one peering connection, got {}".format(peer_conn))
peering = peering[0]
# Add route i -> j.
add_route_to_rt(components[i_region].route_table, j_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
# Add route j -> i.
# Note: If we have a host_vpc, it is the last in the list, and it doesn't have an
# associated component, so we special case it.
if host_vpc is None or j != len(region_and_vpc_tuples) - 1:
add_route_to_rt(components[j_region].route_table, i_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
else:
# TODO: should ideally filter to the RT that is relevant, but we do not really
# know the subnets which matter from this host_vpc...
for rt in list(host_vpc.route_tables.all()):
add_route_to_rt(rt, i_vpc.cidr_block, "VpcPeeringConnectionId", peering.id)
# Setup SG entries for all the CIDRs.
all_cidrs = [vpc.cidr_block for r, vpc in region_and_vpc_tuples]
rules = []
# Add CIDRs from all the VPCs, including the host.
for cidr in all_cidrs:
add_cidr_to_rules(rules, cidr)
# Add CIDRs from any custom networks we have internally, primarily the OpenVPN in AWS.
# TODO(bogdan): custom CIDR entries
for cidr in self.metadata.get("custom_network_whitelisted_ip_cidrs", []):
add_cidr_to_rules(rules, cidr)
for region, component in components.iteritems():
sg = component.sg_yugabyte
ip_perms = sg.ip_permissions
for rule in rules:
found = False
for perm in ip_perms:
if perm.get("FromPort") == rule["from_port"] and \
perm.get("ToPort") == rule["to_port"] and \
perm.get("IpProtocol") == rule["ip_protocol"] and \
len([True for r in perm.get("IpRanges", [])
if r.get("CidrIp") == rule["cidr_ip"]]) > 0:
# This rule matches this permission, so no need to add it.
found = True
break
if not found:
try:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
if "InvalidPermission.Duplicate" not in str(e):
raise YBOpsRuntimeError(
"Authorize Security Group Ingress failed: {}".format(repr(e)))
def aws_exception_handler(e):
"""AWS specific exception handler.
Args:
e: the exception that was raised by the underlying API call that just failed.
Returns:
True if this exception can be retried, False otherwise.
"""
return "Request limit exceeded" in str(e)
def aws_request_limit_retry(fn):
"""A decorator for retrying an AWS operation after exceeding request limit. Does retries with
randomized jitter. Ideally, we should reconfigure boto3 to do the right kind of retries
internally, but as of May 2017 there does not seem to be a good way of doing that.
Initially not adding this decorator to all functions in this module. This should be done
gradually as we encounter rate limiting errors.
Relevant boto issues:
https://github.com/boto/boto3/issues/770
https://github.com/boto/botocore/issues/882
"""
return request_retry_decorator(fn, aws_exception_handler)
def get_client(region):
"""Method to get boto3 ec2 resource for given region
Args:
region (str): Region name
Returns:
boto3 resource
"""
return boto3.resource("ec2", region_name=region)
def get_clients(regions):
"""Method to get boto3 clients for given region or all the regions if none specified.
Args:
regions (list): List of regions to return clients for
Returns:
clients(obj): Map of region to boto3 resource
"""
return {region: get_client(region) for region in regions}
def get_available_regions(metadata):
return metadata["regions"].keys()
def get_spot_pricing(region, zone, instance_type):
client = boto3.client('ec2', region_name=region)
prod_desc = ['Linux/UNIX (Amazon VPC)']
spot_price = client.describe_spot_price_history(InstanceTypes=[instance_type],
MaxResults=1,
ProductDescriptions=prod_desc,
AvailabilityZone=zone)
if len(spot_price['SpotPriceHistory']) == 0:
raise YBOpsRuntimeError('Invalid instance type {} for zone {}'.format(instance_type, zone))
return spot_price['SpotPriceHistory'][0]['SpotPrice']
def get_zones(region, dest_vpc_id=None):
"""Method to fetch zones for given region or all the regions if none specified.
Args:
region (str): Name of region to get zones of.
Returns:
zones (obj): Map of zone -> subnet
"""
result = {}
filters = get_filters("state", "available")
client = boto3.client("ec2", region_name=region)
zones = client.describe_availability_zones(Filters=filters).get("AvailabilityZones", [])
new_client = get_client(region)
zone_mapping = {}
for z in zones:
zone_name = z["ZoneName"]
zone_tag = SUBNET_PREFIX_FORMAT.format(zone_name)
region_vpc = None
if dest_vpc_id:
region_vpc = new_client.Vpc(dest_vpc_id)
else:
region_vpc = get_vpc(new_client, RESOURCE_PREFIX_FORMAT.format(region))
subnet = next(iter(fetch_subnets(region_vpc, zone_tag)), None)
if subnet is None:
subnet = next(iter([s for s in region_vpc.subnets.all()
if s.availability_zone == zone_name]), None)
zone_mapping[zone_name] = subnet.id if subnet is not None else None
return zone_mapping
def get_vpc(client, tag_name, **kwargs):
"""Method to fetch vpc based on the tag_name.
Args:
client (boto client): Boto Client for the region to query.
tag_name (str): VPC tag name.
Returns:
VPC obj: VPC object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.vpcs.filter(Filters=filters)), None)
def fetch_subnets(vpc, tag_name):
"""Method to fetch subnets based on the tag_name.
Args:
vpc (vpc obj): VPC object to search for subnets
tag_name (str): subnet tag name.
Returns:
subnets (list): list of aws subnets for given vpc.
"""
filters = get_tag_filter(tag_name)
return vpc.subnets.filter(Filters=filters)
def create_subnet(client, vpc, zone, cidr, tag_name):
"""Method to create subnet based on cidr and tag name.
Args:
client (boto client): Region specific boto client
vpc (VPC object): VPC object to create subnet.
zone (str): Availability zone name
cidr (str): CIDR string
tag_name (str): Tag name for subnet.
Returns:
subnet: Newly created subnet object.
"""
subnet = next((s for s in fetch_subnets(vpc, tag_name) if s.cidr_block == cidr), None)
if subnet is None:
subnet = vpc.create_subnet(CidrBlock=cidr, AvailabilityZone=zone)
# TODO: no direct waiter on subnet just yet, it seems...
client.meta.client.get_waiter("subnet_available").wait(SubnetIds=[subnet.id])
tag_resource_name(client, subnet.id, tag_name)
return subnet
def get_security_group(client, group_name, vpc, **kwargs):
"""Method to fetch security group based on the group_name.
Args:
client (boto client): Region specific boto client
group_name (str): Security Group name
vpc (VPC object): The VPC in which to check for the SG
Returns:
SecurityGroup: Matching security group.
"""
filters = get_filters("group-name", group_name) + get_filters("vpc-id", vpc.id)
return next(iter(client.security_groups.filter(Filters=filters)), None)
@get_or_create(get_security_group)
def create_security_group(client, group_name, vpc, description, rules):
"""Method to create a security group based on the group_name and authorize ingress with
the rules provided.
Args:
client (boto client): Region specific boto client
group_name (str): security group name
description (str): description of the security group
vpc (VPC Object): VPC object to create the security group
rules (dict): List of rules to add to security group.
"""
sg = vpc.create_security_group(GroupName=group_name, Description=description)
try:
for rule in rules:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
logging.error("Authorize Security Group Ingress failed: {}".format(e))
sg.delete()
raise YBOpsRuntimeError("Security Group creation failed.")
return sg
def get_igw(client, tag_name, **kwargs):
"""Method to fetch Internet Gateway based on tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): Internet Gateway tag name.
Returns:
internet_gateway: internet gateway object.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.internet_gateways.filter(Filters=filters)), None)
@get_or_create(get_igw)
def create_igw(client, tag_name, vpc):
"""Method to create Internet Gateway based on tag_name in given VPC. If the gateway
already exists, it would return that object. If the object doesn't have a tag, we
would tag it accordingly.
Args:
client (boto client): Region specific boto client
tag_name (str): Tag name for internet gateway.
vpc (VPC object): VPC object to create Internet Gateway
Returns:
internet gateway: newly internet gateway object.
"""
# Query to make sure the region doesn't have any IGW already attached.
existing_igw = next(iter(vpc.internet_gateways.all()), None)
if existing_igw is not None:
# If we have existing igw for the region, lets just tag it with yb-XX-igw
tag_resource_name(client, existing_igw.id, tag_name)
return existing_igw
# If we don't have a internet gateway, lets create one and attach it to vpc
igw = client.create_internet_gateway()
tag_resource_name(client, igw.id, tag_name)
vpc.attach_internet_gateway(InternetGatewayId=igw.id)
return igw
def get_route_table(client, tag_name, **kwargs):
"""Method to fetch route table based on tag_name
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name to search for.
Returns:
RouteTable (obj): Matching route table object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.route_tables.filter(Filters=filters)), None)
@get_or_create(get_route_table)
def create_route_table(client, tag_name, vpc):
"""Method to create route table based on tag_name in given VPC. It will first
query for the tag name to see if the route table already exists or if one is already
attached to the VPC, if so it will return that route table.
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name
vpc (vpc object): VPC object to create the route table against
Returns:
RouteTable (obj): newly created RouteTable object.
"""
# Check to see if there is a route table attached to VPC, if so, we can just tag it
existing_route_table = next(iter(vpc.route_tables.all()), None)
if existing_route_table is not None:
tag_resource_name(client, existing_route_table.id, tag_name)
return existing_route_table
# If no route table exists, we can create one and tag it.
route_table = vpc.create_route_table()
tag_resource_name(client, route_table.id, tag_name)
return route_table
@get_and_cleanup(get_security_group)
def cleanup_security_group(sg, **kwargs):
"""Method to cleanup security group for the matching group_name.
Args:
sg: Instance of security group matching the group_name.
"""
sg.delete()
@get_and_cleanup(get_igw)
def cleanup_igw(igw, **kwargs):
"""Method to cleanup Internet Gateway matching the tag name. And also remove any vpc
that is attached to the Internet Gateway.
Args:
igw: Instance of Internet Gateway matching tag_name.
"""
for vpc in igw.attachments:
igw.detach_from_vpc(VpcId=vpc['VpcId'])
igw.delete()
@get_and_cleanup(get_route_table)
def cleanup_route_table(rt, **kwargs):
"""Method to cleanup the Route Table matching the tag name.
Args:
rt: Instance of Route Table matching tag_name.
"""
rt.delete()
def get_route_by_cidr(route_table, cidr):
"""Method to check if given CIDR already attached to route table.
Args:
RouteTable (obj): Route Table object.
cidr (str): CIDR string to check in route table.
Returns:
Route: the route for this CIDR or None if not found
"""
return dict((r.destination_cidr_block, r) for r in route_table.routes).get(cidr)
@get_or_create(get_vpc)
def create_vpc(client, tag_name, cidr):
"""Method to create vpc based on the cidr and tag with tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): VPC tag name
cidr (str): CIDR string.
Returns:
VPC(Object): Newly created VPC object.
"""
vpc = client.create_vpc(CidrBlock=cidr)
vpc.modify_attribute(EnableDnsHostnames={'Value': True})
tag_resource_name(client, vpc.id, tag_name)
return vpc
def set_yb_sg_and_fetch_vpc(metadata, region, dest_vpc_id):
"""Method to bootstrap vpc and security group, and enable vpc peering
with the host_instance vpc.
Args:
metadata (obj): Cloud metadata object with cidr prefix and other metadata.
region (str): Region name to create the vpc in.
dest_vpc_id (str): Id of the VPC that yugabyte machines will reside in.
Returns:
vpc_info (json): return vpc, subnet and security group as json.
"""
client = get_client(region)
dest_vpc = client.Vpc(dest_vpc_id)
subnets = {subnet.availability_zone: subnet for subnet in dest_vpc.subnets.all()}
sg_group_name = get_yb_sg_name(region)
rules = metadata["sg_rules"]
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
add_cidr_to_rules(rules, dest_vpc.cidr_block)
sgs = [create_security_group(client=client, group_name=sg_group_name, vpc=dest_vpc,
description="YugaByte SG", rules=rules)]
return vpc_components_as_json(dest_vpc, sgs, subnets)
def query_vpc(region):
"""Method to query VPC against given region and respective subnets.
Args:
region (str): Region name to query the VPC.
Returns:
vpc and subnet info (obj): Object with region and zone subnet id.
"""
per_vpc_info = {}
# Fetch all available AZs, as we want to group subnets by AZ.
raw_client = boto3.client("ec2", region_name=region)
zones = [z["ZoneName"]
for z in raw_client.describe_availability_zones(
Filters=get_filters("state", "available")).get("AvailabilityZones", [])]
# Default to empty lists, in case some zones do not have subnets, so we can use this as a query
# for all available AZs in this region.
subnets_by_zone = {z: [] for z in zones}
# Fetch SGs and group them by VPC ID.
client = get_client(region)
per_vpc_sgs = {}
sgs = client.security_groups.all()
for sg in sgs:
sg_list = per_vpc_sgs.setdefault(sg.vpc_id, [])
sg_list.append({
"sg_id": sg.group_id,
# Note: Name tag is not mandatory or always present but group_name is!
"sg_name": sg.group_name
})
# Fetch all available VPCs so we can group by VPC ID.
region_vpcs = client.vpcs.all()
for vpc in region_vpcs:
# Filter for available subnets and group by AZ.
subnets = vpc.subnets.filter(Filters=get_filters("state", "available"))
for s in subnets:
subnets_for_this_az = subnets_by_zone.setdefault(s.availability_zone, [])
subnets_for_this_az.append({
"subnet_id": s.subnet_id,
"name": _get_name_from_tags(s.tags),
"public": s.map_public_ip_on_launch
})
vpc_info = {
"subnets_by_zone": subnets_by_zone,
# In case we somehow did not find any SGs, default to empty list.
"security_groups": per_vpc_sgs.get(vpc.id, [])
}
per_vpc_info[vpc.id] = vpc_info
region_json = {
"per_vpc_info": per_vpc_info
}
return region_json
def _get_name_from_tags(tags):
for t in tags if tags else []:
if t.get("Key") == "Name":
return t.get("Value", None)
return None
def vpc_components_as_json(vpc, sgs, subnets):
"""Method takes VPC, Security Group and Subnets and returns a json data format with ids.
Args:
vpc (VPC Object): Region specific VPC object
sgs (List of Security Group Object): Region specific Security Group object
subnets (subnet object map): Map of Subnet objects keyed of zone.
Retuns:
json (str): A Json string for yugaware to consume with necessary ids.
"""
result = {}
result["vpc_id"] = vpc.id
result["security_group"] = [{"id": sg.group_id, "name": sg.group_name} for sg in sgs]
result["zones"] = {}
for zone, subnet in subnets.iteritems():
result["zones"][zone] = subnet.id
return result
def delete_vpc(region, host_vpc_id=None, host_vpc_region=None):
"""Method to delete VPC, Subnet, Internet Gateway, Route Table and VPC peering.
Args:
region (str): Region name to query the VPC.
"""
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(region)
client = get_client(region)
region_vpc = get_vpc(client, vpc_region_tag)
if region_vpc is None:
raise YBOpsRuntimeError("VPC not setup.")
zones = get_zones(region)
# Remove the yugabyte SG first.
sg_group_name = get_yb_sg_name(region)
cleanup_security_group(client=client, group_name=sg_group_name, vpc=region_vpc)
# Cleanup the subnets.
for zone, subnet_id in zones.iteritems():
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
if subnet_id is not None:
client.Subnet(subnet_id).delete()
# Remove the IGW.
igw_tag = IGW_PREFIX_FORMAT.format(region)
cleanup_igw(client=client, tag_name=igw_tag)
# Remove this region's CIDR from the RT of the host vpc.
host_vpc = None
if host_vpc_id is not None and host_vpc_region is not None:
host_vpc = get_client(host_vpc_region).Vpc(host_vpc_id)
for rt in list(host_vpc.route_tables.all()):
delete_route(rt, region_vpc.cidr_block)
# Remove all of the VPC peerings of this vpc.
cleanup_vpc_peering(client=client, vpc=region_vpc, host_vpc=None)
# Delete the VPC itself.
region_vpc.delete()
# Finally cleanup the Routing Table.
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(region)
cleanup_route_table(client=client, tag_name=route_table_tag)
return {"success": "VPC deleted."}
def tag_resource_name(client, resource_id, tag_name):
"""Method to create name tag for given resource.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_name (str): Tag name.
"""
tag_resource(client, resource_id, "Name", tag_name)
def tag_resource(client, resource_id, tag_key, tag_value):
"""Method to attach arbitrary key-value tags to resources.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_key: Tag key
tag_value: Tag value
"""
tags = [{"Key": tag_key, "Value": tag_value}]
client.create_tags(Resources=[resource_id], Tags=tags)
def get_filters(key, value):
return [{'Name': key, 'Values': [value]}]
def get_tag_filter(tag_name):
return get_filters("tag:Name", tag_name)
# MASKED: get_vpc_peerings function (lines 774-795)
@get_and_cleanup(get_vpc_peerings)
def cleanup_vpc_peering(vpc_peerings, **kwargs):
for vpc_peering in vpc_peerings:
vpc_peering.delete()
@get_or_create(get_vpc_peerings)
def create_vpc_peering(client, vpc, host_vpc, target_region):
"""Method would create a vpc peering between the newly created VPC and caller's VPC
Also makes sure, if they aren't the same, then there is no need for vpc peering.
Args:
client (boto client): Region specific boto client
vpc (VPC object): Newly created VPC object
host_vpc(Host VPC object): Host VPC to peer with.
target_region (region name): Region name in which peering is being created.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
try:
peer_conn = client.create_vpc_peering_connection(
VpcId=host_vpc.id, PeerVpcId=vpc.id, PeerRegion=target_region)
peer_conn.wait_until_exists()
# Need to accept from the other end.
remote_peer_conn = get_client(target_region).VpcPeeringConnection(peer_conn.id)
remote_peer_conn.wait_until_exists()
remote_peer_conn.accept()
return [peer_conn]
except Exception as e:
logging.error(e)
raise YBOpsRuntimeError("Unable to create VPC peering.")
def get_device_names(instance_type, num_volumes):
device_names = []
for i in xrange(num_volumes):
device_name_format = "nvme{}n1" if is_nvme(instance_type) else "xvd{}"
index = "{}".format(i if is_nvme(instance_type) else chr(ord('b') + i))
device_names.append(device_name_format.format(index))
return device_names
def is_next_gen(instance_type):
return instance_type.startswith(("c3", "c4", "c5", "m4", "r4"))
def is_nvme(instance_type):
return instance_type.startswith("i3")
def has_ephemerals(instance_type):
return not is_nvme(instance_type) and not is_next_gen(instance_type)
def create_instance(args):
client = get_client(args.region)
vars = {
"ImageId": args.machine_image,
"KeyName": args.key_pair_name,
"MinCount": 1,
"MaxCount": 1,
"InstanceType": args.instance_type,
}
# Network setup.
# Lets assume they have provided security group id comma delimited.
sg_ids = args.security_group_id.split(",") if args.security_group_id else None
if sg_ids is None:
# Figure out which VPC this instance will be brought up in and search for the SG in there.
# This is for a bit of backwards compatibility with the previous mode of potentially using
# YW's VPC, in which we would still deploy a SG with the same name as in our normal VPCs.
# This means there could be customers that had that deployment mode from the start AND have
# a SG we created back then, with the internal naming convention we use, but NOT in the YB
# VPC (which they likely will not even have).
vpc = get_vpc_for_subnet(client, args.cloud_subnet)
sg_name = get_yb_sg_name(args.region)
sg = get_security_group(client, sg_name, vpc)
sg_ids = [sg.id]
vars["NetworkInterfaces"] = [{
"DeviceIndex": 0,
"AssociatePublicIpAddress": args.assign_public_ip,
"SubnetId": args.cloud_subnet,
"Groups": sg_ids
}]
# Volume setup.
volumes = []
ebs = {
"DeleteOnTermination": True,
# TODO: constant
"VolumeSize": 40,
"VolumeType": "gp2"
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
volumes.append({
"DeviceName": "/dev/sda1",
"Ebs": ebs
})
device_names = get_device_names(args.instance_type, args.num_volumes)
# TODO: Clean up semantics on nvme vs "next-gen" vs ephemerals, as this is currently whack...
for i, device_name in enumerate(device_names):
volume = {}
if has_ephemerals(args.instance_type):
volume = {
"DeviceName": "/dev/{}".format(device_name),
"VirtualName": "ephemeral{}".format(i)
}
elif is_next_gen(args.instance_type):
ebs = {
"DeleteOnTermination": True,
"VolumeType": args.volume_type,
# TODO: make this int.
"VolumeSize": args.volume_size
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
if args.volume_type == "io1":
# TODO: make this int.
ebs["Iops"] = args.disk_iops
volume = {
"DeviceName": "/dev/{}".format(device_name),
"Ebs": ebs
}
volumes.append(volume)
vars["BlockDeviceMappings"] = volumes
# Tag setup.
def __create_tag(k, v):
return {"Key": k, "Value": v}
# Add Name all the time.
instance_tags = [
__create_tag("Name", args.search_pattern),
__create_tag("launched-by", os.environ.get("USER", "unknown")),
__create_tag("yb-server-type", args.type)
]
custom_tags = args.instance_tags if args.instance_tags is not None else '{}'
for k, v in json.loads(custom_tags).iteritems():
instance_tags.append(__create_tag(k, v))
vars["TagSpecifications"] = [{
"ResourceType": "instance",
"Tags": instance_tags
}]
# TODO: user_data > templates/cloud_init.yml.j2, still needed?
instance_ids = client.create_instances(**vars)
if len(instance_ids) != 1:
logging.error("Invalid create_instances response: {}".format(instance_ids))
raise YBOpsRuntimeError("Expected to create 1 instance, got {}".format(
len(instance_ids)))
instance = instance_ids[0]
instance.wait_until_running()
def modify_tags(region, instance_id, tags_to_set_str, tags_to_remove_str):
instance = get_client(region).Instance(instance_id)
# Remove all the tags we were asked to, except the internal ones.
tags_to_remove = set(tags_to_remove_str.split(",") if tags_to_remove_str else [])
# TODO: combine these with the above instance creation function.
internal_tags = set(["Name", "launched-by", "yb-server-type"])
if tags_to_remove & internal_tags:
raise YBOpsRuntimeError(
"Was asked to remove tags: {}, which contain internal tags: {}".format(
tags_to_remove, internal_tags
))
# Note: passing an empty list to Tags will remove all tags from the instance.
if tags_to_remove:
instance.delete_tags(Tags=[{"Key": k} for k in tags_to_remove])
# Set all the tags provided.
tags_to_set = json.loads(tags_to_set_str if tags_to_set_str else "{}")
customer_tags = []
for k, v in tags_to_set.iteritems():
customer_tags.append({"Key": k, "Value": v})
instance.create_tags(Tags=customer_tags)
def delete_route(rt, cidr):
route = get_route_by_cidr(rt, cidr)
if route is not None:
route.delete()
def get_vpc_for_subnet(client, subnet):
return client.Subnet(subnet).vpc
def get_yb_sg_name(region):
return SG_YUGABYTE_PREFIX_FORMAT.format(region)
def list_dns_record_set(hosted_zone_id):
return boto3.client('route53').get_hosted_zone(Id=hosted_zone_id)
def create_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'CREATE')
def edit_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'UPSERT')
def delete_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'DELETE')
def _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, action):
client = boto3.client('route53')
records = []
for ip in ip_list:
records.append({'Value': ip})
result = list_dns_record_set(hosted_zone_id)
hosted_zone_name = result['HostedZone']['Name']
change_batch = {
'Comment': "YugaWare driven Record Set",
'Changes': [{
'Action': action,
'ResourceRecordSet': {
'Name': "{}.{}".format(domain_name_prefix, hosted_zone_name),
'Type': 'A',
'TTL': 5,
'ResourceRecords': records
}
}]
}
result = client.change_resource_record_sets(
HostedZoneId=hosted_zone_id,
ChangeBatch=change_batch)
client.get_waiter('resource_record_sets_changed').wait(
Id=result['ChangeInfo']['Id'],
WaiterConfig={
'Delay': 10,
'MaxAttempts': 60
})
|
def get_vpc_peerings(vpc, host_vpc, **kwargs):
"""Method to fetch all the VPC peerings against given VPC. If host_vpc is provided
it will check if there is a peering against that vpc.
Args:
vpc(VPC object): VPC Object to search for peerings
host_vpc (Host VPC object): Can be Null as well, to check if specific host_vpc
peering is done.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
output = []
# Search through accepted vpc peerings.
vpc_peerings = vpc.accepted_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.requester_vpc == host_vpc)])
# Also search through requested vpc peerings.
vpc_peerings = vpc.requested_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.accepter_vpc == host_vpc)])
return output
| 774 | 795 |
#!/usr/bin/env python
#
# Copyright 2019 YugaByte, Inc. and Contributors
#
# Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt
import boto3
import json
import logging
import os
import re
from ipaddr import IPNetwork
from ybops.utils import get_or_create, get_and_cleanup
from ybops.common.exceptions import YBOpsRuntimeError
from ybops.cloud.common.utils import request_retry_decorator
RESOURCE_PREFIX_FORMAT = "yb-{}"
IGW_CIDR = "0.0.0.0/0"
SUBNET_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT
IGW_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-igw"
ROUTE_TABLE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-rt"
SG_YUGABYTE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-sg"
PEER_CONN_FORMAT = "yb-peer-conn-{}-to-{}"
class AwsBootstrapRegion():
def __init__(self, region, metadata, region_cidrs):
self.region = region
self.metadata = metadata
self.region_cidrs = region_cidrs
self.client = get_client(self.region)
# Outputs.
self.vpc = None
self.igw = None
self.peer_vpc = None
self.sg_yugabyte = None
self.subnets = []
self.route_table = None
def bootstrap(self):
self.setup_vpc()
self.setup_igw()
self.setup_subnets()
self.setup_yugabyte_sg()
self.setup_rt()
def setup_vpc(self):
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(self.region)
vpc = create_vpc(client=self.client, tag_name=vpc_region_tag,
cidr=get_region_cidr(self.metadata, self.region))
vpc.wait_until_available()
self.vpc = vpc
def setup_igw(self):
igw_tag = IGW_PREFIX_FORMAT.format(self.region)
igw = create_igw(client=self.client, tag_name=igw_tag, vpc=self.vpc)
self.igw = igw
def setup_subnets(self):
zones = get_zones(self.region)
subnets = {}
for zone_index, zone in enumerate(sorted(zones.keys())):
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
zone_cidr = self.metadata["zone_cidr_format"].format(
get_cidr_prefix(self.metadata, self.region), (zone_index + 1) * 16)
subnet = create_subnet(self.client, self.vpc, zone, zone_cidr, vpc_zone_tag)
subnets[zone] = subnet
self.subnets = subnets
def setup_yugabyte_sg(self):
sg_group_name = get_yb_sg_name(self.region)
rules = list(self.metadata["sg_rules"])
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
sg = create_security_group(client=self.client, group_name=sg_group_name,
description="YugaByte SG", vpc=self.vpc,
rules=rules)
self.sg_yugabyte = sg
def setup_rt(self):
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(self.region)
route_table = create_route_table(client=self.client, tag_name=route_table_tag,
vpc=self.vpc)
# TODO: handle private/public case at somepoint, also NAT.
add_route_to_rt(route_table, IGW_CIDR, "GatewayId", self.igw.id)
current_associated_subnet_ids = [assoc.subnet_id for assoc in route_table.associations]
missing_ids = [subnet.id for subnet in self.subnets.values()
if subnet.id not in current_associated_subnet_ids]
for subnet_id in missing_ids:
route_table.associate_with_subnet(SubnetId=subnet_id)
self.route_table = route_table
def add_sg_ingress_to_sg(self, incoming_sg, target_sg):
current_sg_ids = set([pair["GroupId"]
for perm in target_sg.ip_permissions
for pair in perm["UserIdGroupPairs"]])
if incoming_sg.id not in current_sg_ids:
target_sg.authorize_ingress(
IpPermissions=[{
"IpProtocol": "-1",
"UserIdGroupPairs": [{"GroupId": incoming_sg.id}]}])
def add_route_to_rt(route_table, cidr, target_type, target_id):
kwargs = {target_type: target_id}
route = get_route_by_cidr(route_table, cidr)
if route is None:
route_table.create_route(DestinationCidrBlock=cidr, **kwargs)
elif getattr(route, dumb_camel_to_snake(target_type)) != target_id:
route.replace(**kwargs)
def add_cidr_to_rules(rules, cidr):
rule_block = {
"ip_protocol": "-1",
"from_port": 0,
"to_port": 65535,
"cidr_ip": cidr
}
rules.append(rule_block)
def get_cidr_prefix(metadata, region):
return metadata["regions"][region]["cidr_prefix"]
def get_region_cidr(metadata, region):
return metadata["region_cidr_format"].format(get_cidr_prefix(metadata, region))
def get_region_cidrs(metadata):
return dict([(r, get_region_cidr(metadata, r)) for r in metadata["regions"].keys()])
def dumb_camel_to_snake(s):
return re.sub("([A-Z])", "_\\1", s).lower()[1:]
class YbVpcComponents:
def __init__(self):
self.region = None
self.vpc = None
self.sg_yugabyte = None
self.customer_sgs = None
self.route_table = None
self.subnets = None
@staticmethod
def from_pieces(region, vpc_id, sg_id, rt_id, az_to_subnet_ids):
c = YbVpcComponents()
c.region = region
client = get_client(region)
c.vpc = client.Vpc(vpc_id)
c.sg_yugabyte = client.SecurityGroup(sg_id)
c.route_table = client.RouteTable(rt_id)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
@staticmethod
def from_user_json(region, per_region_meta):
c = YbVpcComponents()
c.region = region
client = get_client(region)
vpc_id = per_region_meta.get("vpcId")
if vpc_id:
c.vpc = client.Vpc(vpc_id)
else:
c.vpc = get_vpc(client, RESOURCE_PREFIX_FORMAT.format(region))
sg_ids = per_region_meta.get("customSecurityGroupId")
if sg_ids:
c.customer_sgs = [client.SecurityGroup(sg_id) for sg_id in sg_ids.split(",")]
else:
c.sg_yugabyte = get_security_group(
client, SG_YUGABYTE_PREFIX_FORMAT.format(region), c.vpc)
if not vpc_id:
c.route_table = get_route_table(client, ROUTE_TABLE_PREFIX_FORMAT.format(region))
az_to_subnet_ids = {}
if vpc_id:
az_to_subnet_ids = per_region_meta.get("azToSubnetIds", {})
else:
az_to_subnet_ids = get_zones(region)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
def as_json(self):
sgs = self.customer_sgs if self.customer_sgs else [self.sg_yugabyte]
return vpc_components_as_json(self.vpc, sgs, self.subnets)
class AwsBootstrapClient():
def __init__(self, metadata, host_vpc_id, host_vpc_region):
self.metadata = metadata
self.host_vpc_id = host_vpc_id
self.host_vpc_region = host_vpc_region
self.region_cidrs = get_region_cidrs(self.metadata)
# Validation.
self._validate_cidr_overlap()
def _validate_cidr_overlap(self):
region_networks = [IPNetwork(cidr) for cidr in self.region_cidrs.values()]
all_networks = region_networks
for i in xrange(len(all_networks)):
for j in xrange(i + 1, len(all_networks)):
left = all_networks[i]
right = all_networks[j]
if left.overlaps(right):
raise YBOpsRuntimeError(
"IP blocks in the CIDRs overlap: {} - {}".format(left, right))
def bootstrap_individual_region(self, region):
if region is None:
raise YBOpsRuntimeError("Must provider region to bootstrap!")
client = AwsBootstrapRegion(region, self.metadata, self.region_cidrs)
client.bootstrap()
return YbVpcComponents.from_pieces(
region, client.vpc.id, client.sg_yugabyte.id, client.route_table.id,
{az: s.id for az, s in client.subnets.iteritems()})
def cross_link_regions(self, components):
# Do the cross linking, adding CIDR entries to RTs and SGs, as well as doing vpc peerings.
region_and_vpc_tuples = [(r, c.vpc) for r, c in components.iteritems()]
host_vpc = None
if self.host_vpc_id and self.host_vpc_region:
host_vpc = get_client(self.host_vpc_region).Vpc(self.host_vpc_id)
region_and_vpc_tuples.append((self.host_vpc_region, host_vpc))
# Setup VPC peerings.
for i in xrange(len(region_and_vpc_tuples) - 1):
i_region, i_vpc = region_and_vpc_tuples[i]
for j in xrange(i + 1, len(region_and_vpc_tuples)):
j_region, j_vpc = region_and_vpc_tuples[j]
peering = create_vpc_peering(
# i is the host, j is the target.
client=get_client(i_region), vpc=j_vpc, host_vpc=i_vpc, target_region=j_region)
if len(peering) != 1:
raise YBOpsRuntimeError(
"Expecting one peering connection, got {}".format(peer_conn))
peering = peering[0]
# Add route i -> j.
add_route_to_rt(components[i_region].route_table, j_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
# Add route j -> i.
# Note: If we have a host_vpc, it is the last in the list, and it doesn't have an
# associated component, so we special case it.
if host_vpc is None or j != len(region_and_vpc_tuples) - 1:
add_route_to_rt(components[j_region].route_table, i_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
else:
# TODO: should ideally filter to the RT that is relevant, but we do not really
# know the subnets which matter from this host_vpc...
for rt in list(host_vpc.route_tables.all()):
add_route_to_rt(rt, i_vpc.cidr_block, "VpcPeeringConnectionId", peering.id)
# Setup SG entries for all the CIDRs.
all_cidrs = [vpc.cidr_block for r, vpc in region_and_vpc_tuples]
rules = []
# Add CIDRs from all the VPCs, including the host.
for cidr in all_cidrs:
add_cidr_to_rules(rules, cidr)
# Add CIDRs from any custom networks we have internally, primarily the OpenVPN in AWS.
# TODO(bogdan): custom CIDR entries
for cidr in self.metadata.get("custom_network_whitelisted_ip_cidrs", []):
add_cidr_to_rules(rules, cidr)
for region, component in components.iteritems():
sg = component.sg_yugabyte
ip_perms = sg.ip_permissions
for rule in rules:
found = False
for perm in ip_perms:
if perm.get("FromPort") == rule["from_port"] and \
perm.get("ToPort") == rule["to_port"] and \
perm.get("IpProtocol") == rule["ip_protocol"] and \
len([True for r in perm.get("IpRanges", [])
if r.get("CidrIp") == rule["cidr_ip"]]) > 0:
# This rule matches this permission, so no need to add it.
found = True
break
if not found:
try:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
if "InvalidPermission.Duplicate" not in str(e):
raise YBOpsRuntimeError(
"Authorize Security Group Ingress failed: {}".format(repr(e)))
def aws_exception_handler(e):
"""AWS specific exception handler.
Args:
e: the exception that was raised by the underlying API call that just failed.
Returns:
True if this exception can be retried, False otherwise.
"""
return "Request limit exceeded" in str(e)
def aws_request_limit_retry(fn):
"""A decorator for retrying an AWS operation after exceeding request limit. Does retries with
randomized jitter. Ideally, we should reconfigure boto3 to do the right kind of retries
internally, but as of May 2017 there does not seem to be a good way of doing that.
Initially not adding this decorator to all functions in this module. This should be done
gradually as we encounter rate limiting errors.
Relevant boto issues:
https://github.com/boto/boto3/issues/770
https://github.com/boto/botocore/issues/882
"""
return request_retry_decorator(fn, aws_exception_handler)
def get_client(region):
"""Method to get boto3 ec2 resource for given region
Args:
region (str): Region name
Returns:
boto3 resource
"""
return boto3.resource("ec2", region_name=region)
def get_clients(regions):
"""Method to get boto3 clients for given region or all the regions if none specified.
Args:
regions (list): List of regions to return clients for
Returns:
clients(obj): Map of region to boto3 resource
"""
return {region: get_client(region) for region in regions}
def get_available_regions(metadata):
return metadata["regions"].keys()
def get_spot_pricing(region, zone, instance_type):
client = boto3.client('ec2', region_name=region)
prod_desc = ['Linux/UNIX (Amazon VPC)']
spot_price = client.describe_spot_price_history(InstanceTypes=[instance_type],
MaxResults=1,
ProductDescriptions=prod_desc,
AvailabilityZone=zone)
if len(spot_price['SpotPriceHistory']) == 0:
raise YBOpsRuntimeError('Invalid instance type {} for zone {}'.format(instance_type, zone))
return spot_price['SpotPriceHistory'][0]['SpotPrice']
def get_zones(region, dest_vpc_id=None):
"""Method to fetch zones for given region or all the regions if none specified.
Args:
region (str): Name of region to get zones of.
Returns:
zones (obj): Map of zone -> subnet
"""
result = {}
filters = get_filters("state", "available")
client = boto3.client("ec2", region_name=region)
zones = client.describe_availability_zones(Filters=filters).get("AvailabilityZones", [])
new_client = get_client(region)
zone_mapping = {}
for z in zones:
zone_name = z["ZoneName"]
zone_tag = SUBNET_PREFIX_FORMAT.format(zone_name)
region_vpc = None
if dest_vpc_id:
region_vpc = new_client.Vpc(dest_vpc_id)
else:
region_vpc = get_vpc(new_client, RESOURCE_PREFIX_FORMAT.format(region))
subnet = next(iter(fetch_subnets(region_vpc, zone_tag)), None)
if subnet is None:
subnet = next(iter([s for s in region_vpc.subnets.all()
if s.availability_zone == zone_name]), None)
zone_mapping[zone_name] = subnet.id if subnet is not None else None
return zone_mapping
def get_vpc(client, tag_name, **kwargs):
"""Method to fetch vpc based on the tag_name.
Args:
client (boto client): Boto Client for the region to query.
tag_name (str): VPC tag name.
Returns:
VPC obj: VPC object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.vpcs.filter(Filters=filters)), None)
def fetch_subnets(vpc, tag_name):
"""Method to fetch subnets based on the tag_name.
Args:
vpc (vpc obj): VPC object to search for subnets
tag_name (str): subnet tag name.
Returns:
subnets (list): list of aws subnets for given vpc.
"""
filters = get_tag_filter(tag_name)
return vpc.subnets.filter(Filters=filters)
def create_subnet(client, vpc, zone, cidr, tag_name):
"""Method to create subnet based on cidr and tag name.
Args:
client (boto client): Region specific boto client
vpc (VPC object): VPC object to create subnet.
zone (str): Availability zone name
cidr (str): CIDR string
tag_name (str): Tag name for subnet.
Returns:
subnet: Newly created subnet object.
"""
subnet = next((s for s in fetch_subnets(vpc, tag_name) if s.cidr_block == cidr), None)
if subnet is None:
subnet = vpc.create_subnet(CidrBlock=cidr, AvailabilityZone=zone)
# TODO: no direct waiter on subnet just yet, it seems...
client.meta.client.get_waiter("subnet_available").wait(SubnetIds=[subnet.id])
tag_resource_name(client, subnet.id, tag_name)
return subnet
def get_security_group(client, group_name, vpc, **kwargs):
"""Method to fetch security group based on the group_name.
Args:
client (boto client): Region specific boto client
group_name (str): Security Group name
vpc (VPC object): The VPC in which to check for the SG
Returns:
SecurityGroup: Matching security group.
"""
filters = get_filters("group-name", group_name) + get_filters("vpc-id", vpc.id)
return next(iter(client.security_groups.filter(Filters=filters)), None)
@get_or_create(get_security_group)
def create_security_group(client, group_name, vpc, description, rules):
"""Method to create a security group based on the group_name and authorize ingress with
the rules provided.
Args:
client (boto client): Region specific boto client
group_name (str): security group name
description (str): description of the security group
vpc (VPC Object): VPC object to create the security group
rules (dict): List of rules to add to security group.
"""
sg = vpc.create_security_group(GroupName=group_name, Description=description)
try:
for rule in rules:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
logging.error("Authorize Security Group Ingress failed: {}".format(e))
sg.delete()
raise YBOpsRuntimeError("Security Group creation failed.")
return sg
def get_igw(client, tag_name, **kwargs):
"""Method to fetch Internet Gateway based on tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): Internet Gateway tag name.
Returns:
internet_gateway: internet gateway object.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.internet_gateways.filter(Filters=filters)), None)
@get_or_create(get_igw)
def create_igw(client, tag_name, vpc):
"""Method to create Internet Gateway based on tag_name in given VPC. If the gateway
already exists, it would return that object. If the object doesn't have a tag, we
would tag it accordingly.
Args:
client (boto client): Region specific boto client
tag_name (str): Tag name for internet gateway.
vpc (VPC object): VPC object to create Internet Gateway
Returns:
internet gateway: newly internet gateway object.
"""
# Query to make sure the region doesn't have any IGW already attached.
existing_igw = next(iter(vpc.internet_gateways.all()), None)
if existing_igw is not None:
# If we have existing igw for the region, lets just tag it with yb-XX-igw
tag_resource_name(client, existing_igw.id, tag_name)
return existing_igw
# If we don't have a internet gateway, lets create one and attach it to vpc
igw = client.create_internet_gateway()
tag_resource_name(client, igw.id, tag_name)
vpc.attach_internet_gateway(InternetGatewayId=igw.id)
return igw
def get_route_table(client, tag_name, **kwargs):
"""Method to fetch route table based on tag_name
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name to search for.
Returns:
RouteTable (obj): Matching route table object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.route_tables.filter(Filters=filters)), None)
@get_or_create(get_route_table)
def create_route_table(client, tag_name, vpc):
"""Method to create route table based on tag_name in given VPC. It will first
query for the tag name to see if the route table already exists or if one is already
attached to the VPC, if so it will return that route table.
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name
vpc (vpc object): VPC object to create the route table against
Returns:
RouteTable (obj): newly created RouteTable object.
"""
# Check to see if there is a route table attached to VPC, if so, we can just tag it
existing_route_table = next(iter(vpc.route_tables.all()), None)
if existing_route_table is not None:
tag_resource_name(client, existing_route_table.id, tag_name)
return existing_route_table
# If no route table exists, we can create one and tag it.
route_table = vpc.create_route_table()
tag_resource_name(client, route_table.id, tag_name)
return route_table
@get_and_cleanup(get_security_group)
def cleanup_security_group(sg, **kwargs):
"""Method to cleanup security group for the matching group_name.
Args:
sg: Instance of security group matching the group_name.
"""
sg.delete()
@get_and_cleanup(get_igw)
def cleanup_igw(igw, **kwargs):
"""Method to cleanup Internet Gateway matching the tag name. And also remove any vpc
that is attached to the Internet Gateway.
Args:
igw: Instance of Internet Gateway matching tag_name.
"""
for vpc in igw.attachments:
igw.detach_from_vpc(VpcId=vpc['VpcId'])
igw.delete()
@get_and_cleanup(get_route_table)
def cleanup_route_table(rt, **kwargs):
"""Method to cleanup the Route Table matching the tag name.
Args:
rt: Instance of Route Table matching tag_name.
"""
rt.delete()
def get_route_by_cidr(route_table, cidr):
"""Method to check if given CIDR already attached to route table.
Args:
RouteTable (obj): Route Table object.
cidr (str): CIDR string to check in route table.
Returns:
Route: the route for this CIDR or None if not found
"""
return dict((r.destination_cidr_block, r) for r in route_table.routes).get(cidr)
@get_or_create(get_vpc)
def create_vpc(client, tag_name, cidr):
"""Method to create vpc based on the cidr and tag with tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): VPC tag name
cidr (str): CIDR string.
Returns:
VPC(Object): Newly created VPC object.
"""
vpc = client.create_vpc(CidrBlock=cidr)
vpc.modify_attribute(EnableDnsHostnames={'Value': True})
tag_resource_name(client, vpc.id, tag_name)
return vpc
def set_yb_sg_and_fetch_vpc(metadata, region, dest_vpc_id):
"""Method to bootstrap vpc and security group, and enable vpc peering
with the host_instance vpc.
Args:
metadata (obj): Cloud metadata object with cidr prefix and other metadata.
region (str): Region name to create the vpc in.
dest_vpc_id (str): Id of the VPC that yugabyte machines will reside in.
Returns:
vpc_info (json): return vpc, subnet and security group as json.
"""
client = get_client(region)
dest_vpc = client.Vpc(dest_vpc_id)
subnets = {subnet.availability_zone: subnet for subnet in dest_vpc.subnets.all()}
sg_group_name = get_yb_sg_name(region)
rules = metadata["sg_rules"]
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
add_cidr_to_rules(rules, dest_vpc.cidr_block)
sgs = [create_security_group(client=client, group_name=sg_group_name, vpc=dest_vpc,
description="YugaByte SG", rules=rules)]
return vpc_components_as_json(dest_vpc, sgs, subnets)
def query_vpc(region):
"""Method to query VPC against given region and respective subnets.
Args:
region (str): Region name to query the VPC.
Returns:
vpc and subnet info (obj): Object with region and zone subnet id.
"""
per_vpc_info = {}
# Fetch all available AZs, as we want to group subnets by AZ.
raw_client = boto3.client("ec2", region_name=region)
zones = [z["ZoneName"]
for z in raw_client.describe_availability_zones(
Filters=get_filters("state", "available")).get("AvailabilityZones", [])]
# Default to empty lists, in case some zones do not have subnets, so we can use this as a query
# for all available AZs in this region.
subnets_by_zone = {z: [] for z in zones}
# Fetch SGs and group them by VPC ID.
client = get_client(region)
per_vpc_sgs = {}
sgs = client.security_groups.all()
for sg in sgs:
sg_list = per_vpc_sgs.setdefault(sg.vpc_id, [])
sg_list.append({
"sg_id": sg.group_id,
# Note: Name tag is not mandatory or always present but group_name is!
"sg_name": sg.group_name
})
# Fetch all available VPCs so we can group by VPC ID.
region_vpcs = client.vpcs.all()
for vpc in region_vpcs:
# Filter for available subnets and group by AZ.
subnets = vpc.subnets.filter(Filters=get_filters("state", "available"))
for s in subnets:
subnets_for_this_az = subnets_by_zone.setdefault(s.availability_zone, [])
subnets_for_this_az.append({
"subnet_id": s.subnet_id,
"name": _get_name_from_tags(s.tags),
"public": s.map_public_ip_on_launch
})
vpc_info = {
"subnets_by_zone": subnets_by_zone,
# In case we somehow did not find any SGs, default to empty list.
"security_groups": per_vpc_sgs.get(vpc.id, [])
}
per_vpc_info[vpc.id] = vpc_info
region_json = {
"per_vpc_info": per_vpc_info
}
return region_json
def _get_name_from_tags(tags):
for t in tags if tags else []:
if t.get("Key") == "Name":
return t.get("Value", None)
return None
def vpc_components_as_json(vpc, sgs, subnets):
"""Method takes VPC, Security Group and Subnets and returns a json data format with ids.
Args:
vpc (VPC Object): Region specific VPC object
sgs (List of Security Group Object): Region specific Security Group object
subnets (subnet object map): Map of Subnet objects keyed of zone.
Retuns:
json (str): A Json string for yugaware to consume with necessary ids.
"""
result = {}
result["vpc_id"] = vpc.id
result["security_group"] = [{"id": sg.group_id, "name": sg.group_name} for sg in sgs]
result["zones"] = {}
for zone, subnet in subnets.iteritems():
result["zones"][zone] = subnet.id
return result
def delete_vpc(region, host_vpc_id=None, host_vpc_region=None):
"""Method to delete VPC, Subnet, Internet Gateway, Route Table and VPC peering.
Args:
region (str): Region name to query the VPC.
"""
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(region)
client = get_client(region)
region_vpc = get_vpc(client, vpc_region_tag)
if region_vpc is None:
raise YBOpsRuntimeError("VPC not setup.")
zones = get_zones(region)
# Remove the yugabyte SG first.
sg_group_name = get_yb_sg_name(region)
cleanup_security_group(client=client, group_name=sg_group_name, vpc=region_vpc)
# Cleanup the subnets.
for zone, subnet_id in zones.iteritems():
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
if subnet_id is not None:
client.Subnet(subnet_id).delete()
# Remove the IGW.
igw_tag = IGW_PREFIX_FORMAT.format(region)
cleanup_igw(client=client, tag_name=igw_tag)
# Remove this region's CIDR from the RT of the host vpc.
host_vpc = None
if host_vpc_id is not None and host_vpc_region is not None:
host_vpc = get_client(host_vpc_region).Vpc(host_vpc_id)
for rt in list(host_vpc.route_tables.all()):
delete_route(rt, region_vpc.cidr_block)
# Remove all of the VPC peerings of this vpc.
cleanup_vpc_peering(client=client, vpc=region_vpc, host_vpc=None)
# Delete the VPC itself.
region_vpc.delete()
# Finally cleanup the Routing Table.
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(region)
cleanup_route_table(client=client, tag_name=route_table_tag)
return {"success": "VPC deleted."}
def tag_resource_name(client, resource_id, tag_name):
"""Method to create name tag for given resource.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_name (str): Tag name.
"""
tag_resource(client, resource_id, "Name", tag_name)
def tag_resource(client, resource_id, tag_key, tag_value):
"""Method to attach arbitrary key-value tags to resources.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_key: Tag key
tag_value: Tag value
"""
tags = [{"Key": tag_key, "Value": tag_value}]
client.create_tags(Resources=[resource_id], Tags=tags)
def get_filters(key, value):
return [{'Name': key, 'Values': [value]}]
def get_tag_filter(tag_name):
return get_filters("tag:Name", tag_name)
def get_vpc_peerings(vpc, host_vpc, **kwargs):
"""Method to fetch all the VPC peerings against given VPC. If host_vpc is provided
it will check if there is a peering against that vpc.
Args:
vpc(VPC object): VPC Object to search for peerings
host_vpc (Host VPC object): Can be Null as well, to check if specific host_vpc
peering is done.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
output = []
# Search through accepted vpc peerings.
vpc_peerings = vpc.accepted_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.requester_vpc == host_vpc)])
# Also search through requested vpc peerings.
vpc_peerings = vpc.requested_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.accepter_vpc == host_vpc)])
return output
@get_and_cleanup(get_vpc_peerings)
def cleanup_vpc_peering(vpc_peerings, **kwargs):
for vpc_peering in vpc_peerings:
vpc_peering.delete()
@get_or_create(get_vpc_peerings)
def create_vpc_peering(client, vpc, host_vpc, target_region):
"""Method would create a vpc peering between the newly created VPC and caller's VPC
Also makes sure, if they aren't the same, then there is no need for vpc peering.
Args:
client (boto client): Region specific boto client
vpc (VPC object): Newly created VPC object
host_vpc(Host VPC object): Host VPC to peer with.
target_region (region name): Region name in which peering is being created.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
try:
peer_conn = client.create_vpc_peering_connection(
VpcId=host_vpc.id, PeerVpcId=vpc.id, PeerRegion=target_region)
peer_conn.wait_until_exists()
# Need to accept from the other end.
remote_peer_conn = get_client(target_region).VpcPeeringConnection(peer_conn.id)
remote_peer_conn.wait_until_exists()
remote_peer_conn.accept()
return [peer_conn]
except Exception as e:
logging.error(e)
raise YBOpsRuntimeError("Unable to create VPC peering.")
def get_device_names(instance_type, num_volumes):
device_names = []
for i in xrange(num_volumes):
device_name_format = "nvme{}n1" if is_nvme(instance_type) else "xvd{}"
index = "{}".format(i if is_nvme(instance_type) else chr(ord('b') + i))
device_names.append(device_name_format.format(index))
return device_names
def is_next_gen(instance_type):
return instance_type.startswith(("c3", "c4", "c5", "m4", "r4"))
def is_nvme(instance_type):
return instance_type.startswith("i3")
def has_ephemerals(instance_type):
return not is_nvme(instance_type) and not is_next_gen(instance_type)
def create_instance(args):
client = get_client(args.region)
vars = {
"ImageId": args.machine_image,
"KeyName": args.key_pair_name,
"MinCount": 1,
"MaxCount": 1,
"InstanceType": args.instance_type,
}
# Network setup.
# Lets assume they have provided security group id comma delimited.
sg_ids = args.security_group_id.split(",") if args.security_group_id else None
if sg_ids is None:
# Figure out which VPC this instance will be brought up in and search for the SG in there.
# This is for a bit of backwards compatibility with the previous mode of potentially using
# YW's VPC, in which we would still deploy a SG with the same name as in our normal VPCs.
# This means there could be customers that had that deployment mode from the start AND have
# a SG we created back then, with the internal naming convention we use, but NOT in the YB
# VPC (which they likely will not even have).
vpc = get_vpc_for_subnet(client, args.cloud_subnet)
sg_name = get_yb_sg_name(args.region)
sg = get_security_group(client, sg_name, vpc)
sg_ids = [sg.id]
vars["NetworkInterfaces"] = [{
"DeviceIndex": 0,
"AssociatePublicIpAddress": args.assign_public_ip,
"SubnetId": args.cloud_subnet,
"Groups": sg_ids
}]
# Volume setup.
volumes = []
ebs = {
"DeleteOnTermination": True,
# TODO: constant
"VolumeSize": 40,
"VolumeType": "gp2"
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
volumes.append({
"DeviceName": "/dev/sda1",
"Ebs": ebs
})
device_names = get_device_names(args.instance_type, args.num_volumes)
# TODO: Clean up semantics on nvme vs "next-gen" vs ephemerals, as this is currently whack...
for i, device_name in enumerate(device_names):
volume = {}
if has_ephemerals(args.instance_type):
volume = {
"DeviceName": "/dev/{}".format(device_name),
"VirtualName": "ephemeral{}".format(i)
}
elif is_next_gen(args.instance_type):
ebs = {
"DeleteOnTermination": True,
"VolumeType": args.volume_type,
# TODO: make this int.
"VolumeSize": args.volume_size
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
if args.volume_type == "io1":
# TODO: make this int.
ebs["Iops"] = args.disk_iops
volume = {
"DeviceName": "/dev/{}".format(device_name),
"Ebs": ebs
}
volumes.append(volume)
vars["BlockDeviceMappings"] = volumes
# Tag setup.
def __create_tag(k, v):
return {"Key": k, "Value": v}
# Add Name all the time.
instance_tags = [
__create_tag("Name", args.search_pattern),
__create_tag("launched-by", os.environ.get("USER", "unknown")),
__create_tag("yb-server-type", args.type)
]
custom_tags = args.instance_tags if args.instance_tags is not None else '{}'
for k, v in json.loads(custom_tags).iteritems():
instance_tags.append(__create_tag(k, v))
vars["TagSpecifications"] = [{
"ResourceType": "instance",
"Tags": instance_tags
}]
# TODO: user_data > templates/cloud_init.yml.j2, still needed?
instance_ids = client.create_instances(**vars)
if len(instance_ids) != 1:
logging.error("Invalid create_instances response: {}".format(instance_ids))
raise YBOpsRuntimeError("Expected to create 1 instance, got {}".format(
len(instance_ids)))
instance = instance_ids[0]
instance.wait_until_running()
def modify_tags(region, instance_id, tags_to_set_str, tags_to_remove_str):
instance = get_client(region).Instance(instance_id)
# Remove all the tags we were asked to, except the internal ones.
tags_to_remove = set(tags_to_remove_str.split(",") if tags_to_remove_str else [])
# TODO: combine these with the above instance creation function.
internal_tags = set(["Name", "launched-by", "yb-server-type"])
if tags_to_remove & internal_tags:
raise YBOpsRuntimeError(
"Was asked to remove tags: {}, which contain internal tags: {}".format(
tags_to_remove, internal_tags
))
# Note: passing an empty list to Tags will remove all tags from the instance.
if tags_to_remove:
instance.delete_tags(Tags=[{"Key": k} for k in tags_to_remove])
# Set all the tags provided.
tags_to_set = json.loads(tags_to_set_str if tags_to_set_str else "{}")
customer_tags = []
for k, v in tags_to_set.iteritems():
customer_tags.append({"Key": k, "Value": v})
instance.create_tags(Tags=customer_tags)
def delete_route(rt, cidr):
route = get_route_by_cidr(rt, cidr)
if route is not None:
route.delete()
def get_vpc_for_subnet(client, subnet):
return client.Subnet(subnet).vpc
def get_yb_sg_name(region):
return SG_YUGABYTE_PREFIX_FORMAT.format(region)
def list_dns_record_set(hosted_zone_id):
return boto3.client('route53').get_hosted_zone(Id=hosted_zone_id)
def create_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'CREATE')
def edit_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'UPSERT')
def delete_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'DELETE')
def _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, action):
client = boto3.client('route53')
records = []
for ip in ip_list:
records.append({'Value': ip})
result = list_dns_record_set(hosted_zone_id)
hosted_zone_name = result['HostedZone']['Name']
change_batch = {
'Comment': "YugaWare driven Record Set",
'Changes': [{
'Action': action,
'ResourceRecordSet': {
'Name': "{}.{}".format(domain_name_prefix, hosted_zone_name),
'Type': 'A',
'TTL': 5,
'ResourceRecords': records
}
}]
}
result = client.change_resource_record_sets(
HostedZoneId=hosted_zone_id,
ChangeBatch=change_batch)
client.get_waiter('resource_record_sets_changed').wait(
Id=result['ChangeInfo']['Id'],
WaiterConfig={
'Delay': 10,
'MaxAttempts': 60
})
|
__init__
|
Keyword args:
transmitted_bytes_per_sec (float): Total bytes transmitted per second.
received_bytes_per_sec (float): Total bytes received per second.
|
# coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.3, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_3 import models
class ReplicationPerformance(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'transmitted_bytes_per_sec': 'float',
'received_bytes_per_sec': 'float'
}
attribute_map = {
'transmitted_bytes_per_sec': 'transmitted_bytes_per_sec',
'received_bytes_per_sec': 'received_bytes_per_sec'
}
required_args = {
}
# MASKED: __init__ function (lines 45-58)
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ReplicationPerformance`".format(key))
if key == "transmitted_bytes_per_sec" and value is not None:
if value < 0.0:
raise ValueError("Invalid value for `transmitted_bytes_per_sec`, must be a value greater than or equal to `0.0`")
if key == "received_bytes_per_sec" and value is not None:
if value < 0.0:
raise ValueError("Invalid value for `received_bytes_per_sec`, must be a value greater than or equal to `0.0`")
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ReplicationPerformance, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReplicationPerformance):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
def __init__(
self,
transmitted_bytes_per_sec=None, # type: float
received_bytes_per_sec=None, # type: float
):
"""
Keyword args:
transmitted_bytes_per_sec (float): Total bytes transmitted per second.
received_bytes_per_sec (float): Total bytes received per second.
"""
if transmitted_bytes_per_sec is not None:
self.transmitted_bytes_per_sec = transmitted_bytes_per_sec
if received_bytes_per_sec is not None:
self.received_bytes_per_sec = received_bytes_per_sec
| 45 | 58 |
# coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.3, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_3 import models
class ReplicationPerformance(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'transmitted_bytes_per_sec': 'float',
'received_bytes_per_sec': 'float'
}
attribute_map = {
'transmitted_bytes_per_sec': 'transmitted_bytes_per_sec',
'received_bytes_per_sec': 'received_bytes_per_sec'
}
required_args = {
}
def __init__(
self,
transmitted_bytes_per_sec=None, # type: float
received_bytes_per_sec=None, # type: float
):
"""
Keyword args:
transmitted_bytes_per_sec (float): Total bytes transmitted per second.
received_bytes_per_sec (float): Total bytes received per second.
"""
if transmitted_bytes_per_sec is not None:
self.transmitted_bytes_per_sec = transmitted_bytes_per_sec
if received_bytes_per_sec is not None:
self.received_bytes_per_sec = received_bytes_per_sec
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ReplicationPerformance`".format(key))
if key == "transmitted_bytes_per_sec" and value is not None:
if value < 0.0:
raise ValueError("Invalid value for `transmitted_bytes_per_sec`, must be a value greater than or equal to `0.0`")
if key == "received_bytes_per_sec" and value is not None:
if value < 0.0:
raise ValueError("Invalid value for `received_bytes_per_sec`, must be a value greater than or equal to `0.0`")
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ReplicationPerformance, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReplicationPerformance):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
debug_identity
|
Debug Identity Op.
Provides an identity mapping of the non-Ref type input tensor for debugging.
Args:
input: A `Tensor`. Input tensor, non-Reference type.
device_name: An optional `string`. Defaults to `""`.
tensor_name: An optional `string`. Defaults to `""`.
Name of the input tensor.
debug_urls: An optional list of `strings`. Defaults to `[]`.
List of URLs to debug targets, e.g.,
file:///foo/tfdbg_dump, grpc:://localhost:11011
gated_grpc: An optional `bool`. Defaults to `False`.
Whether this op will be gated. If any of the debug_urls of this
debug node is of the grpc:// scheme, when the value of this attribute is set
to True, the data will not actually be sent via the grpc stream unless this
debug op has been enabled at the debug_url. If all of the debug_urls of this
debug node are of the grpc:// scheme and the debug op is enabled at none of
them, the output will be an empty Tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Output tensor that equals the input tensor.
|
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
"""
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.tf_export import tf_export
@tf_export('copy')
def copy(input, tensor_name="", debug_ops_spec=[], name=None):
r"""Copy Op.
Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the
device on which the tensor is allocated.
N.B.: If the all downstream attached debug ops are disabled given the current
gRPC gating status, the output will simply forward the input tensor without
deep-copying. See the documentation of Debug* ops for more details.
Unlike the CopyHost Op, this op does not have HostMemory constraint on its
input or output.
Args:
input: A `Tensor`. Input tensor.
tensor_name: An optional `string`. Defaults to `""`.
The name of the input tensor.
debug_ops_spec: An optional list of `strings`. Defaults to `[]`.
A list of debug op spec (op, url, gated_grpc) for attached debug
ops. Each element of the list has the format
<debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented
as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1",
"DebugIdentity;file:///tmp/tfdbg_1;0".
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Output tensor, deep-copied from input.
"""
_ctx = _context.context()
if not _ctx.executing_eagerly():
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_ops_spec is None:
debug_ops_spec = []
if not isinstance(debug_ops_spec, (list, tuple)):
raise TypeError(
"Expected list for 'debug_ops_spec' argument to "
"'copy' Op, not %r." % debug_ops_spec)
debug_ops_spec = [_execute.make_str(_s, "debug_ops_spec") for _s in debug_ops_spec]
_, _, _op = _op_def_lib._apply_op_helper(
"Copy", input=input, tensor_name=tensor_name,
debug_ops_spec=debug_ops_spec, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "tensor_name",
_op.get_attr("tensor_name"), "debug_ops_spec",
_op.get_attr("debug_ops_spec"))
_execute.record_gradient(
"Copy", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._handle, _ctx.device_name, "Copy", name,
_ctx._post_execution_callbacks, input, "tensor_name", tensor_name,
"debug_ops_spec", debug_ops_spec)
return _result
except _core._FallbackException:
return copy_eager_fallback(
input, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec,
name=name)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def copy_eager_fallback(input, tensor_name="", debug_ops_spec=[], name=None):
r"""This is the slowpath function for Eager mode.
This is for function copy
"""
_ctx = _context.context()
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_ops_spec is None:
debug_ops_spec = []
if not isinstance(debug_ops_spec, (list, tuple)):
raise TypeError(
"Expected list for 'debug_ops_spec' argument to "
"'copy' Op, not %r." % debug_ops_spec)
debug_ops_spec = [_execute.make_str(_s, "debug_ops_spec") for _s in debug_ops_spec]
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "tensor_name", tensor_name, "debug_ops_spec",
debug_ops_spec)
_result = _execute.execute(b"Copy", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Copy", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('copy_host')
def copy_host(input, tensor_name="", debug_ops_spec=[], name=None):
r"""Copy Host Op.
Performs CPU-to-CPU deep-copying of tensor.
N.B.: If the all downstream attached debug ops are disabled given the current
gRPC gating status, the output will simply forward the input tensor without
deep-copying. See the documentation of Debug* ops for more details.
Unlike the Copy Op, this op has HostMemory constraint on its input or output.
Args:
input: A `Tensor`. Input tensor.
tensor_name: An optional `string`. Defaults to `""`.
The name of the input tensor.
debug_ops_spec: An optional list of `strings`. Defaults to `[]`.
A list of debug op spec (op, url, gated_grpc) for attached debug
ops. Each element of the list has the format
<debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented
as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1",
"DebugIdentity;file:///tmp/tfdbg_1;0".
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Output tensor, deep-copied from input.
"""
_ctx = _context.context()
if not _ctx.executing_eagerly():
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_ops_spec is None:
debug_ops_spec = []
if not isinstance(debug_ops_spec, (list, tuple)):
raise TypeError(
"Expected list for 'debug_ops_spec' argument to "
"'copy_host' Op, not %r." % debug_ops_spec)
debug_ops_spec = [_execute.make_str(_s, "debug_ops_spec") for _s in debug_ops_spec]
_, _, _op = _op_def_lib._apply_op_helper(
"CopyHost", input=input, tensor_name=tensor_name,
debug_ops_spec=debug_ops_spec, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "tensor_name",
_op.get_attr("tensor_name"), "debug_ops_spec",
_op.get_attr("debug_ops_spec"))
_execute.record_gradient(
"CopyHost", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._handle, _ctx.device_name, "CopyHost", name,
_ctx._post_execution_callbacks, input, "tensor_name", tensor_name,
"debug_ops_spec", debug_ops_spec)
return _result
except _core._FallbackException:
return copy_host_eager_fallback(
input, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec,
name=name)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def copy_host_eager_fallback(input, tensor_name="", debug_ops_spec=[], name=None):
r"""This is the slowpath function for Eager mode.
This is for function copy_host
"""
_ctx = _context.context()
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_ops_spec is None:
debug_ops_spec = []
if not isinstance(debug_ops_spec, (list, tuple)):
raise TypeError(
"Expected list for 'debug_ops_spec' argument to "
"'copy_host' Op, not %r." % debug_ops_spec)
debug_ops_spec = [_execute.make_str(_s, "debug_ops_spec") for _s in debug_ops_spec]
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "tensor_name", tensor_name, "debug_ops_spec",
debug_ops_spec)
_result = _execute.execute(b"CopyHost", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"CopyHost", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
# MASKED: debug_identity function (lines 224-302)
def debug_identity_eager_fallback(input, device_name="", tensor_name="", debug_urls=[], gated_grpc=False, name=None):
r"""This is the slowpath function for Eager mode.
This is for function debug_identity
"""
_ctx = _context.context()
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_identity' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "device_name", device_name, "tensor_name",
tensor_name, "debug_urls", debug_urls, "gated_grpc", gated_grpc)
_result = _execute.execute(b"DebugIdentity", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"DebugIdentity", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('debug_nan_count')
def debug_nan_count(input, device_name="", tensor_name="", debug_urls=[], gated_grpc=False, name=None):
r"""Debug NaN Value Counter Op
Counts number of NaNs in the input tensor, for debugging.
Args:
input: A `Tensor`. Input tensor, non-Reference type.
device_name: An optional `string`. Defaults to `""`.
tensor_name: An optional `string`. Defaults to `""`.
Name of the input tensor.
debug_urls: An optional list of `strings`. Defaults to `[]`.
List of URLs to debug targets, e.g.,
file:///foo/tfdbg_dump, grpc:://localhost:11011.
gated_grpc: An optional `bool`. Defaults to `False`.
Whether this op will be gated. If any of the debug_urls of this
debug node is of the grpc:// scheme, when the value of this attribute is set
to True, the data will not actually be sent via the grpc stream unless this
debug op has been enabled at the debug_url. If all of the debug_urls of this
debug node are of the grpc:// scheme and the debug op is enabled at none of
them, the output will be an empty Tensor.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int64`.
An integer output tensor that is the number of NaNs in the input.
"""
_ctx = _context.context()
if not _ctx.executing_eagerly():
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_nan_count' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_, _, _op = _op_def_lib._apply_op_helper(
"DebugNanCount", input=input, device_name=device_name,
tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "device_name",
_op.get_attr("device_name"), "tensor_name",
_op.get_attr("tensor_name"), "debug_urls",
_op.get_attr("debug_urls"), "gated_grpc",
_op.get_attr("gated_grpc"))
_execute.record_gradient(
"DebugNanCount", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._handle, _ctx.device_name, "DebugNanCount", name,
_ctx._post_execution_callbacks, input, "device_name", device_name,
"tensor_name", tensor_name, "debug_urls", debug_urls, "gated_grpc",
gated_grpc)
return _result
except _core._FallbackException:
return debug_nan_count_eager_fallback(
input, device_name=device_name, tensor_name=tensor_name,
debug_urls=debug_urls, gated_grpc=gated_grpc, name=name)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def debug_nan_count_eager_fallback(input, device_name="", tensor_name="", debug_urls=[], gated_grpc=False, name=None):
r"""This is the slowpath function for Eager mode.
This is for function debug_nan_count
"""
_ctx = _context.context()
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_nan_count' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "device_name", device_name, "tensor_name",
tensor_name, "debug_urls", debug_urls, "gated_grpc", gated_grpc)
_result = _execute.execute(b"DebugNanCount", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"DebugNanCount", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('debug_numeric_summary')
def debug_numeric_summary(input, device_name="", tensor_name="", debug_urls=[], lower_bound=float('-inf'), upper_bound=float('inf'), mute_if_healthy=False, gated_grpc=False, name=None):
r"""Debug Numeric Summary Op.
Provide a basic summary of numeric value types, range and distribution.
Args:
input: A `Tensor`. Input tensor, non-Reference type, float or double.
device_name: An optional `string`. Defaults to `""`.
tensor_name: An optional `string`. Defaults to `""`.
Name of the input tensor.
debug_urls: An optional list of `strings`. Defaults to `[]`.
List of URLs to debug targets, e.g.,
file:///foo/tfdbg_dump, grpc:://localhost:11011
lower_bound: An optional `float`. Defaults to `float('-inf')`.
(float) The lower bound <= which values will be included in the
generalized -inf count. Default: -inf.
upper_bound: An optional `float`. Defaults to `float('inf')`.
(float) The upper bound >= which values will be included in the
generalized +inf count. Default: +inf.
mute_if_healthy: An optional `bool`. Defaults to `False`.
(bool) Do not send data to the debug URLs unless at least one
of elements [2], [3] and [7] (i.e., the nan count and the generalized -inf and
inf counts) is non-zero.
gated_grpc: An optional `bool`. Defaults to `False`.
Whether this op will be gated. If any of the debug_urls of this
debug node is of the grpc:// scheme, when the value of this attribute is set
to True, the data will not actually be sent via the grpc stream unless this
debug op has been enabled at the debug_url. If all of the debug_urls of this
debug node are of the grpc:// scheme and the debug op is enabled at none of
them, the output will be an empty Tensor.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float64`.
A double tensor of shape [14 + nDimensions], where nDimensions is the
the number of dimensions of the tensor's shape. The elements of output are:
[0]: is initialized (1.0) or not (0.0).
[1]: total number of elements
[2]: NaN element count
[3]: generalized -inf count: elements <= lower_bound. lower_bound is -inf by
default.
[4]: negative element count (excluding -inf), if lower_bound is the default
-inf. Otherwise, this is the count of elements > lower_bound and < 0.
[5]: zero element count
[6]: positive element count (excluding +inf), if upper_bound is the default
-inf. Otherwise, this is the count of elements < upper_bound and > 0.
[7]: generalized +inf count, elements >= upper_bound. upper_bound is +inf by
default.
Output elements [1:8] are all zero, if the tensor is uninitialized.
[8]: minimum of all non-inf and non-NaN elements.
If uninitialized or no such element exists: +inf.
[9]: maximum of all non-inf and non-NaN elements.
If uninitialized or no such element exists: -inf.
[10]: mean of all non-inf and non-NaN elements.
If uninitialized or no such element exists: NaN.
[11]: variance of all non-inf and non-NaN elements.
If uninitialized or no such element exists: NaN.
[12]: Data type of the tensor encoded as an enum integer. See the DataType
proto for more details.
[13]: Number of dimensions of the tensor (ndims).
[14+]: Sizes of the dimensions.
"""
_ctx = _context.context()
if not _ctx.executing_eagerly():
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_numeric_summary' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if lower_bound is None:
lower_bound = float('-inf')
lower_bound = _execute.make_float(lower_bound, "lower_bound")
if upper_bound is None:
upper_bound = float('inf')
upper_bound = _execute.make_float(upper_bound, "upper_bound")
if mute_if_healthy is None:
mute_if_healthy = False
mute_if_healthy = _execute.make_bool(mute_if_healthy, "mute_if_healthy")
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_, _, _op = _op_def_lib._apply_op_helper(
"DebugNumericSummary", input=input, device_name=device_name,
tensor_name=tensor_name, debug_urls=debug_urls,
lower_bound=lower_bound, upper_bound=upper_bound,
mute_if_healthy=mute_if_healthy, gated_grpc=gated_grpc, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "device_name",
_op.get_attr("device_name"), "tensor_name",
_op.get_attr("tensor_name"), "debug_urls",
_op.get_attr("debug_urls"), "lower_bound",
_op.get_attr("lower_bound"), "upper_bound",
_op.get_attr("upper_bound"), "mute_if_healthy",
_op.get_attr("mute_if_healthy"), "gated_grpc",
_op.get_attr("gated_grpc"))
_execute.record_gradient(
"DebugNumericSummary", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._handle, _ctx.device_name, "DebugNumericSummary", name,
_ctx._post_execution_callbacks, input, "device_name", device_name,
"tensor_name", tensor_name, "debug_urls", debug_urls, "lower_bound",
lower_bound, "upper_bound", upper_bound, "mute_if_healthy",
mute_if_healthy, "gated_grpc", gated_grpc)
return _result
except _core._FallbackException:
return debug_numeric_summary_eager_fallback(
input, device_name=device_name, tensor_name=tensor_name,
debug_urls=debug_urls, lower_bound=lower_bound,
upper_bound=upper_bound, mute_if_healthy=mute_if_healthy,
gated_grpc=gated_grpc, name=name)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def debug_numeric_summary_eager_fallback(input, device_name="", tensor_name="", debug_urls=[], lower_bound=float('-inf'), upper_bound=float('inf'), mute_if_healthy=False, gated_grpc=False, name=None):
r"""This is the slowpath function for Eager mode.
This is for function debug_numeric_summary
"""
_ctx = _context.context()
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_numeric_summary' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if lower_bound is None:
lower_bound = float('-inf')
lower_bound = _execute.make_float(lower_bound, "lower_bound")
if upper_bound is None:
upper_bound = float('inf')
upper_bound = _execute.make_float(upper_bound, "upper_bound")
if mute_if_healthy is None:
mute_if_healthy = False
mute_if_healthy = _execute.make_bool(mute_if_healthy, "mute_if_healthy")
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "device_name", device_name, "tensor_name",
tensor_name, "debug_urls", debug_urls, "lower_bound", lower_bound,
"upper_bound", upper_bound, "mute_if_healthy", mute_if_healthy,
"gated_grpc", gated_grpc)
_result = _execute.execute(b"DebugNumericSummary", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"DebugNumericSummary", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "Copy"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# }
# attr {
# name: "tensor_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "debug_ops_spec"
# type: "list(string)"
# default_value {
# list {
# }
# }
# }
# allows_uninitialized_input: true
# }
# op {
# name: "CopyHost"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# }
# attr {
# name: "tensor_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "debug_ops_spec"
# type: "list(string)"
# default_value {
# list {
# }
# }
# }
# allows_uninitialized_input: true
# }
# op {
# name: "DebugIdentity"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# }
# attr {
# name: "device_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "tensor_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "debug_urls"
# type: "list(string)"
# default_value {
# list {
# }
# }
# }
# attr {
# name: "gated_grpc"
# type: "bool"
# default_value {
# b: false
# }
# }
# allows_uninitialized_input: true
# }
# op {
# name: "DebugNanCount"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type: DT_INT64
# }
# attr {
# name: "T"
# type: "type"
# }
# attr {
# name: "device_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "tensor_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "debug_urls"
# type: "list(string)"
# default_value {
# list {
# }
# }
# }
# attr {
# name: "gated_grpc"
# type: "bool"
# default_value {
# b: false
# }
# }
# allows_uninitialized_input: true
# }
# op {
# name: "DebugNumericSummary"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type: DT_DOUBLE
# }
# attr {
# name: "T"
# type: "type"
# }
# attr {
# name: "device_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "tensor_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "debug_urls"
# type: "list(string)"
# default_value {
# list {
# }
# }
# }
# attr {
# name: "lower_bound"
# type: "float"
# default_value {
# f: -inf
# }
# }
# attr {
# name: "upper_bound"
# type: "float"
# default_value {
# f: inf
# }
# }
# attr {
# name: "mute_if_healthy"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "gated_grpc"
# type: "bool"
# default_value {
# b: false
# }
# }
# allows_uninitialized_input: true
# }
_op_def_lib = _InitOpDefLibrary(b"\nl\n\004Copy\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\031\n\013tensor_name\022\006string\032\002\022\000\"\"\n\016debug_ops_spec\022\014list(string)\032\002\n\000\230\001\001\np\n\010CopyHost\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\031\n\013tensor_name\022\006string\032\002\022\000\"\"\n\016debug_ops_spec\022\014list(string)\032\002\n\000\230\001\001\n\244\001\n\rDebugIdentity\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\031\n\013device_name\022\006string\032\002\022\000\"\031\n\013tensor_name\022\006string\032\002\022\000\"\036\n\ndebug_urls\022\014list(string)\032\002\n\000\"\026\n\ngated_grpc\022\004bool\032\002(\000\230\001\001\n\243\001\n\rDebugNanCount\022\n\n\005input\"\001T\032\n\n\006output\030\t\"\t\n\001T\022\004type\"\031\n\013device_name\022\006string\032\002\022\000\"\031\n\013tensor_name\022\006string\032\002\022\000\"\036\n\ndebug_urls\022\014list(string)\032\002\n\000\"\026\n\ngated_grpc\022\004bool\032\002(\000\230\001\001\n\200\002\n\023DebugNumericSummary\022\n\n\005input\"\001T\032\n\n\006output\030\002\"\t\n\001T\022\004type\"\031\n\013device_name\022\006string\032\002\022\000\"\031\n\013tensor_name\022\006string\032\002\022\000\"\036\n\ndebug_urls\022\014list(string)\032\002\n\000\"\033\n\013lower_bound\022\005float\032\005%\000\000\200\377\"\033\n\013upper_bound\022\005float\032\005%\000\000\200\177\"\033\n\017mute_if_healthy\022\004bool\032\002(\000\"\026\n\ngated_grpc\022\004bool\032\002(\000\230\001\001")
|
@tf_export('debug_identity')
def debug_identity(input, device_name="", tensor_name="", debug_urls=[], gated_grpc=False, name=None):
r"""Debug Identity Op.
Provides an identity mapping of the non-Ref type input tensor for debugging.
Args:
input: A `Tensor`. Input tensor, non-Reference type.
device_name: An optional `string`. Defaults to `""`.
tensor_name: An optional `string`. Defaults to `""`.
Name of the input tensor.
debug_urls: An optional list of `strings`. Defaults to `[]`.
List of URLs to debug targets, e.g.,
file:///foo/tfdbg_dump, grpc:://localhost:11011
gated_grpc: An optional `bool`. Defaults to `False`.
Whether this op will be gated. If any of the debug_urls of this
debug node is of the grpc:// scheme, when the value of this attribute is set
to True, the data will not actually be sent via the grpc stream unless this
debug op has been enabled at the debug_url. If all of the debug_urls of this
debug node are of the grpc:// scheme and the debug op is enabled at none of
them, the output will be an empty Tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Output tensor that equals the input tensor.
"""
_ctx = _context.context()
if not _ctx.executing_eagerly():
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_identity' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_, _, _op = _op_def_lib._apply_op_helper(
"DebugIdentity", input=input, device_name=device_name,
tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "device_name",
_op.get_attr("device_name"), "tensor_name",
_op.get_attr("tensor_name"), "debug_urls",
_op.get_attr("debug_urls"), "gated_grpc",
_op.get_attr("gated_grpc"))
_execute.record_gradient(
"DebugIdentity", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._handle, _ctx.device_name, "DebugIdentity", name,
_ctx._post_execution_callbacks, input, "device_name", device_name,
"tensor_name", tensor_name, "debug_urls", debug_urls, "gated_grpc",
gated_grpc)
return _result
except _core._FallbackException:
return debug_identity_eager_fallback(
input, device_name=device_name, tensor_name=tensor_name,
debug_urls=debug_urls, gated_grpc=gated_grpc, name=name)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
| 224 | 302 |
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
"""
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.tf_export import tf_export
@tf_export('copy')
def copy(input, tensor_name="", debug_ops_spec=[], name=None):
r"""Copy Op.
Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the
device on which the tensor is allocated.
N.B.: If the all downstream attached debug ops are disabled given the current
gRPC gating status, the output will simply forward the input tensor without
deep-copying. See the documentation of Debug* ops for more details.
Unlike the CopyHost Op, this op does not have HostMemory constraint on its
input or output.
Args:
input: A `Tensor`. Input tensor.
tensor_name: An optional `string`. Defaults to `""`.
The name of the input tensor.
debug_ops_spec: An optional list of `strings`. Defaults to `[]`.
A list of debug op spec (op, url, gated_grpc) for attached debug
ops. Each element of the list has the format
<debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented
as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1",
"DebugIdentity;file:///tmp/tfdbg_1;0".
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Output tensor, deep-copied from input.
"""
_ctx = _context.context()
if not _ctx.executing_eagerly():
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_ops_spec is None:
debug_ops_spec = []
if not isinstance(debug_ops_spec, (list, tuple)):
raise TypeError(
"Expected list for 'debug_ops_spec' argument to "
"'copy' Op, not %r." % debug_ops_spec)
debug_ops_spec = [_execute.make_str(_s, "debug_ops_spec") for _s in debug_ops_spec]
_, _, _op = _op_def_lib._apply_op_helper(
"Copy", input=input, tensor_name=tensor_name,
debug_ops_spec=debug_ops_spec, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "tensor_name",
_op.get_attr("tensor_name"), "debug_ops_spec",
_op.get_attr("debug_ops_spec"))
_execute.record_gradient(
"Copy", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._handle, _ctx.device_name, "Copy", name,
_ctx._post_execution_callbacks, input, "tensor_name", tensor_name,
"debug_ops_spec", debug_ops_spec)
return _result
except _core._FallbackException:
return copy_eager_fallback(
input, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec,
name=name)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def copy_eager_fallback(input, tensor_name="", debug_ops_spec=[], name=None):
r"""This is the slowpath function for Eager mode.
This is for function copy
"""
_ctx = _context.context()
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_ops_spec is None:
debug_ops_spec = []
if not isinstance(debug_ops_spec, (list, tuple)):
raise TypeError(
"Expected list for 'debug_ops_spec' argument to "
"'copy' Op, not %r." % debug_ops_spec)
debug_ops_spec = [_execute.make_str(_s, "debug_ops_spec") for _s in debug_ops_spec]
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "tensor_name", tensor_name, "debug_ops_spec",
debug_ops_spec)
_result = _execute.execute(b"Copy", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Copy", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('copy_host')
def copy_host(input, tensor_name="", debug_ops_spec=[], name=None):
r"""Copy Host Op.
Performs CPU-to-CPU deep-copying of tensor.
N.B.: If the all downstream attached debug ops are disabled given the current
gRPC gating status, the output will simply forward the input tensor without
deep-copying. See the documentation of Debug* ops for more details.
Unlike the Copy Op, this op has HostMemory constraint on its input or output.
Args:
input: A `Tensor`. Input tensor.
tensor_name: An optional `string`. Defaults to `""`.
The name of the input tensor.
debug_ops_spec: An optional list of `strings`. Defaults to `[]`.
A list of debug op spec (op, url, gated_grpc) for attached debug
ops. Each element of the list has the format
<debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented
as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1",
"DebugIdentity;file:///tmp/tfdbg_1;0".
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Output tensor, deep-copied from input.
"""
_ctx = _context.context()
if not _ctx.executing_eagerly():
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_ops_spec is None:
debug_ops_spec = []
if not isinstance(debug_ops_spec, (list, tuple)):
raise TypeError(
"Expected list for 'debug_ops_spec' argument to "
"'copy_host' Op, not %r." % debug_ops_spec)
debug_ops_spec = [_execute.make_str(_s, "debug_ops_spec") for _s in debug_ops_spec]
_, _, _op = _op_def_lib._apply_op_helper(
"CopyHost", input=input, tensor_name=tensor_name,
debug_ops_spec=debug_ops_spec, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "tensor_name",
_op.get_attr("tensor_name"), "debug_ops_spec",
_op.get_attr("debug_ops_spec"))
_execute.record_gradient(
"CopyHost", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._handle, _ctx.device_name, "CopyHost", name,
_ctx._post_execution_callbacks, input, "tensor_name", tensor_name,
"debug_ops_spec", debug_ops_spec)
return _result
except _core._FallbackException:
return copy_host_eager_fallback(
input, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec,
name=name)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def copy_host_eager_fallback(input, tensor_name="", debug_ops_spec=[], name=None):
r"""This is the slowpath function for Eager mode.
This is for function copy_host
"""
_ctx = _context.context()
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_ops_spec is None:
debug_ops_spec = []
if not isinstance(debug_ops_spec, (list, tuple)):
raise TypeError(
"Expected list for 'debug_ops_spec' argument to "
"'copy_host' Op, not %r." % debug_ops_spec)
debug_ops_spec = [_execute.make_str(_s, "debug_ops_spec") for _s in debug_ops_spec]
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "tensor_name", tensor_name, "debug_ops_spec",
debug_ops_spec)
_result = _execute.execute(b"CopyHost", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"CopyHost", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('debug_identity')
def debug_identity(input, device_name="", tensor_name="", debug_urls=[], gated_grpc=False, name=None):
r"""Debug Identity Op.
Provides an identity mapping of the non-Ref type input tensor for debugging.
Args:
input: A `Tensor`. Input tensor, non-Reference type.
device_name: An optional `string`. Defaults to `""`.
tensor_name: An optional `string`. Defaults to `""`.
Name of the input tensor.
debug_urls: An optional list of `strings`. Defaults to `[]`.
List of URLs to debug targets, e.g.,
file:///foo/tfdbg_dump, grpc:://localhost:11011
gated_grpc: An optional `bool`. Defaults to `False`.
Whether this op will be gated. If any of the debug_urls of this
debug node is of the grpc:// scheme, when the value of this attribute is set
to True, the data will not actually be sent via the grpc stream unless this
debug op has been enabled at the debug_url. If all of the debug_urls of this
debug node are of the grpc:// scheme and the debug op is enabled at none of
them, the output will be an empty Tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Output tensor that equals the input tensor.
"""
_ctx = _context.context()
if not _ctx.executing_eagerly():
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_identity' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_, _, _op = _op_def_lib._apply_op_helper(
"DebugIdentity", input=input, device_name=device_name,
tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "device_name",
_op.get_attr("device_name"), "tensor_name",
_op.get_attr("tensor_name"), "debug_urls",
_op.get_attr("debug_urls"), "gated_grpc",
_op.get_attr("gated_grpc"))
_execute.record_gradient(
"DebugIdentity", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._handle, _ctx.device_name, "DebugIdentity", name,
_ctx._post_execution_callbacks, input, "device_name", device_name,
"tensor_name", tensor_name, "debug_urls", debug_urls, "gated_grpc",
gated_grpc)
return _result
except _core._FallbackException:
return debug_identity_eager_fallback(
input, device_name=device_name, tensor_name=tensor_name,
debug_urls=debug_urls, gated_grpc=gated_grpc, name=name)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def debug_identity_eager_fallback(input, device_name="", tensor_name="", debug_urls=[], gated_grpc=False, name=None):
r"""This is the slowpath function for Eager mode.
This is for function debug_identity
"""
_ctx = _context.context()
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_identity' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "device_name", device_name, "tensor_name",
tensor_name, "debug_urls", debug_urls, "gated_grpc", gated_grpc)
_result = _execute.execute(b"DebugIdentity", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"DebugIdentity", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('debug_nan_count')
def debug_nan_count(input, device_name="", tensor_name="", debug_urls=[], gated_grpc=False, name=None):
r"""Debug NaN Value Counter Op
Counts number of NaNs in the input tensor, for debugging.
Args:
input: A `Tensor`. Input tensor, non-Reference type.
device_name: An optional `string`. Defaults to `""`.
tensor_name: An optional `string`. Defaults to `""`.
Name of the input tensor.
debug_urls: An optional list of `strings`. Defaults to `[]`.
List of URLs to debug targets, e.g.,
file:///foo/tfdbg_dump, grpc:://localhost:11011.
gated_grpc: An optional `bool`. Defaults to `False`.
Whether this op will be gated. If any of the debug_urls of this
debug node is of the grpc:// scheme, when the value of this attribute is set
to True, the data will not actually be sent via the grpc stream unless this
debug op has been enabled at the debug_url. If all of the debug_urls of this
debug node are of the grpc:// scheme and the debug op is enabled at none of
them, the output will be an empty Tensor.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int64`.
An integer output tensor that is the number of NaNs in the input.
"""
_ctx = _context.context()
if not _ctx.executing_eagerly():
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_nan_count' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_, _, _op = _op_def_lib._apply_op_helper(
"DebugNanCount", input=input, device_name=device_name,
tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "device_name",
_op.get_attr("device_name"), "tensor_name",
_op.get_attr("tensor_name"), "debug_urls",
_op.get_attr("debug_urls"), "gated_grpc",
_op.get_attr("gated_grpc"))
_execute.record_gradient(
"DebugNanCount", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._handle, _ctx.device_name, "DebugNanCount", name,
_ctx._post_execution_callbacks, input, "device_name", device_name,
"tensor_name", tensor_name, "debug_urls", debug_urls, "gated_grpc",
gated_grpc)
return _result
except _core._FallbackException:
return debug_nan_count_eager_fallback(
input, device_name=device_name, tensor_name=tensor_name,
debug_urls=debug_urls, gated_grpc=gated_grpc, name=name)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def debug_nan_count_eager_fallback(input, device_name="", tensor_name="", debug_urls=[], gated_grpc=False, name=None):
r"""This is the slowpath function for Eager mode.
This is for function debug_nan_count
"""
_ctx = _context.context()
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_nan_count' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "device_name", device_name, "tensor_name",
tensor_name, "debug_urls", debug_urls, "gated_grpc", gated_grpc)
_result = _execute.execute(b"DebugNanCount", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"DebugNanCount", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('debug_numeric_summary')
def debug_numeric_summary(input, device_name="", tensor_name="", debug_urls=[], lower_bound=float('-inf'), upper_bound=float('inf'), mute_if_healthy=False, gated_grpc=False, name=None):
r"""Debug Numeric Summary Op.
Provide a basic summary of numeric value types, range and distribution.
Args:
input: A `Tensor`. Input tensor, non-Reference type, float or double.
device_name: An optional `string`. Defaults to `""`.
tensor_name: An optional `string`. Defaults to `""`.
Name of the input tensor.
debug_urls: An optional list of `strings`. Defaults to `[]`.
List of URLs to debug targets, e.g.,
file:///foo/tfdbg_dump, grpc:://localhost:11011
lower_bound: An optional `float`. Defaults to `float('-inf')`.
(float) The lower bound <= which values will be included in the
generalized -inf count. Default: -inf.
upper_bound: An optional `float`. Defaults to `float('inf')`.
(float) The upper bound >= which values will be included in the
generalized +inf count. Default: +inf.
mute_if_healthy: An optional `bool`. Defaults to `False`.
(bool) Do not send data to the debug URLs unless at least one
of elements [2], [3] and [7] (i.e., the nan count and the generalized -inf and
inf counts) is non-zero.
gated_grpc: An optional `bool`. Defaults to `False`.
Whether this op will be gated. If any of the debug_urls of this
debug node is of the grpc:// scheme, when the value of this attribute is set
to True, the data will not actually be sent via the grpc stream unless this
debug op has been enabled at the debug_url. If all of the debug_urls of this
debug node are of the grpc:// scheme and the debug op is enabled at none of
them, the output will be an empty Tensor.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float64`.
A double tensor of shape [14 + nDimensions], where nDimensions is the
the number of dimensions of the tensor's shape. The elements of output are:
[0]: is initialized (1.0) or not (0.0).
[1]: total number of elements
[2]: NaN element count
[3]: generalized -inf count: elements <= lower_bound. lower_bound is -inf by
default.
[4]: negative element count (excluding -inf), if lower_bound is the default
-inf. Otherwise, this is the count of elements > lower_bound and < 0.
[5]: zero element count
[6]: positive element count (excluding +inf), if upper_bound is the default
-inf. Otherwise, this is the count of elements < upper_bound and > 0.
[7]: generalized +inf count, elements >= upper_bound. upper_bound is +inf by
default.
Output elements [1:8] are all zero, if the tensor is uninitialized.
[8]: minimum of all non-inf and non-NaN elements.
If uninitialized or no such element exists: +inf.
[9]: maximum of all non-inf and non-NaN elements.
If uninitialized or no such element exists: -inf.
[10]: mean of all non-inf and non-NaN elements.
If uninitialized or no such element exists: NaN.
[11]: variance of all non-inf and non-NaN elements.
If uninitialized or no such element exists: NaN.
[12]: Data type of the tensor encoded as an enum integer. See the DataType
proto for more details.
[13]: Number of dimensions of the tensor (ndims).
[14+]: Sizes of the dimensions.
"""
_ctx = _context.context()
if not _ctx.executing_eagerly():
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_numeric_summary' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if lower_bound is None:
lower_bound = float('-inf')
lower_bound = _execute.make_float(lower_bound, "lower_bound")
if upper_bound is None:
upper_bound = float('inf')
upper_bound = _execute.make_float(upper_bound, "upper_bound")
if mute_if_healthy is None:
mute_if_healthy = False
mute_if_healthy = _execute.make_bool(mute_if_healthy, "mute_if_healthy")
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_, _, _op = _op_def_lib._apply_op_helper(
"DebugNumericSummary", input=input, device_name=device_name,
tensor_name=tensor_name, debug_urls=debug_urls,
lower_bound=lower_bound, upper_bound=upper_bound,
mute_if_healthy=mute_if_healthy, gated_grpc=gated_grpc, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "device_name",
_op.get_attr("device_name"), "tensor_name",
_op.get_attr("tensor_name"), "debug_urls",
_op.get_attr("debug_urls"), "lower_bound",
_op.get_attr("lower_bound"), "upper_bound",
_op.get_attr("upper_bound"), "mute_if_healthy",
_op.get_attr("mute_if_healthy"), "gated_grpc",
_op.get_attr("gated_grpc"))
_execute.record_gradient(
"DebugNumericSummary", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._handle, _ctx.device_name, "DebugNumericSummary", name,
_ctx._post_execution_callbacks, input, "device_name", device_name,
"tensor_name", tensor_name, "debug_urls", debug_urls, "lower_bound",
lower_bound, "upper_bound", upper_bound, "mute_if_healthy",
mute_if_healthy, "gated_grpc", gated_grpc)
return _result
except _core._FallbackException:
return debug_numeric_summary_eager_fallback(
input, device_name=device_name, tensor_name=tensor_name,
debug_urls=debug_urls, lower_bound=lower_bound,
upper_bound=upper_bound, mute_if_healthy=mute_if_healthy,
gated_grpc=gated_grpc, name=name)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def debug_numeric_summary_eager_fallback(input, device_name="", tensor_name="", debug_urls=[], lower_bound=float('-inf'), upper_bound=float('inf'), mute_if_healthy=False, gated_grpc=False, name=None):
r"""This is the slowpath function for Eager mode.
This is for function debug_numeric_summary
"""
_ctx = _context.context()
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_numeric_summary' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if lower_bound is None:
lower_bound = float('-inf')
lower_bound = _execute.make_float(lower_bound, "lower_bound")
if upper_bound is None:
upper_bound = float('inf')
upper_bound = _execute.make_float(upper_bound, "upper_bound")
if mute_if_healthy is None:
mute_if_healthy = False
mute_if_healthy = _execute.make_bool(mute_if_healthy, "mute_if_healthy")
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "device_name", device_name, "tensor_name",
tensor_name, "debug_urls", debug_urls, "lower_bound", lower_bound,
"upper_bound", upper_bound, "mute_if_healthy", mute_if_healthy,
"gated_grpc", gated_grpc)
_result = _execute.execute(b"DebugNumericSummary", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"DebugNumericSummary", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "Copy"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# }
# attr {
# name: "tensor_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "debug_ops_spec"
# type: "list(string)"
# default_value {
# list {
# }
# }
# }
# allows_uninitialized_input: true
# }
# op {
# name: "CopyHost"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# }
# attr {
# name: "tensor_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "debug_ops_spec"
# type: "list(string)"
# default_value {
# list {
# }
# }
# }
# allows_uninitialized_input: true
# }
# op {
# name: "DebugIdentity"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# }
# attr {
# name: "device_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "tensor_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "debug_urls"
# type: "list(string)"
# default_value {
# list {
# }
# }
# }
# attr {
# name: "gated_grpc"
# type: "bool"
# default_value {
# b: false
# }
# }
# allows_uninitialized_input: true
# }
# op {
# name: "DebugNanCount"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type: DT_INT64
# }
# attr {
# name: "T"
# type: "type"
# }
# attr {
# name: "device_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "tensor_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "debug_urls"
# type: "list(string)"
# default_value {
# list {
# }
# }
# }
# attr {
# name: "gated_grpc"
# type: "bool"
# default_value {
# b: false
# }
# }
# allows_uninitialized_input: true
# }
# op {
# name: "DebugNumericSummary"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type: DT_DOUBLE
# }
# attr {
# name: "T"
# type: "type"
# }
# attr {
# name: "device_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "tensor_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "debug_urls"
# type: "list(string)"
# default_value {
# list {
# }
# }
# }
# attr {
# name: "lower_bound"
# type: "float"
# default_value {
# f: -inf
# }
# }
# attr {
# name: "upper_bound"
# type: "float"
# default_value {
# f: inf
# }
# }
# attr {
# name: "mute_if_healthy"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "gated_grpc"
# type: "bool"
# default_value {
# b: false
# }
# }
# allows_uninitialized_input: true
# }
_op_def_lib = _InitOpDefLibrary(b"\nl\n\004Copy\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\031\n\013tensor_name\022\006string\032\002\022\000\"\"\n\016debug_ops_spec\022\014list(string)\032\002\n\000\230\001\001\np\n\010CopyHost\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\031\n\013tensor_name\022\006string\032\002\022\000\"\"\n\016debug_ops_spec\022\014list(string)\032\002\n\000\230\001\001\n\244\001\n\rDebugIdentity\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\031\n\013device_name\022\006string\032\002\022\000\"\031\n\013tensor_name\022\006string\032\002\022\000\"\036\n\ndebug_urls\022\014list(string)\032\002\n\000\"\026\n\ngated_grpc\022\004bool\032\002(\000\230\001\001\n\243\001\n\rDebugNanCount\022\n\n\005input\"\001T\032\n\n\006output\030\t\"\t\n\001T\022\004type\"\031\n\013device_name\022\006string\032\002\022\000\"\031\n\013tensor_name\022\006string\032\002\022\000\"\036\n\ndebug_urls\022\014list(string)\032\002\n\000\"\026\n\ngated_grpc\022\004bool\032\002(\000\230\001\001\n\200\002\n\023DebugNumericSummary\022\n\n\005input\"\001T\032\n\n\006output\030\002\"\t\n\001T\022\004type\"\031\n\013device_name\022\006string\032\002\022\000\"\031\n\013tensor_name\022\006string\032\002\022\000\"\036\n\ndebug_urls\022\014list(string)\032\002\n\000\"\033\n\013lower_bound\022\005float\032\005%\000\000\200\377\"\033\n\013upper_bound\022\005float\032\005%\000\000\200\177\"\033\n\017mute_if_healthy\022\004bool\032\002(\000\"\026\n\ngated_grpc\022\004bool\032\002(\000\230\001\001")
|
debug_nan_count
|
Debug NaN Value Counter Op
Counts number of NaNs in the input tensor, for debugging.
Args:
input: A `Tensor`. Input tensor, non-Reference type.
device_name: An optional `string`. Defaults to `""`.
tensor_name: An optional `string`. Defaults to `""`.
Name of the input tensor.
debug_urls: An optional list of `strings`. Defaults to `[]`.
List of URLs to debug targets, e.g.,
file:///foo/tfdbg_dump, grpc:://localhost:11011.
gated_grpc: An optional `bool`. Defaults to `False`.
Whether this op will be gated. If any of the debug_urls of this
debug node is of the grpc:// scheme, when the value of this attribute is set
to True, the data will not actually be sent via the grpc stream unless this
debug op has been enabled at the debug_url. If all of the debug_urls of this
debug node are of the grpc:// scheme and the debug op is enabled at none of
them, the output will be an empty Tensor.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int64`.
An integer output tensor that is the number of NaNs in the input.
|
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
"""
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.tf_export import tf_export
@tf_export('copy')
def copy(input, tensor_name="", debug_ops_spec=[], name=None):
r"""Copy Op.
Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the
device on which the tensor is allocated.
N.B.: If the all downstream attached debug ops are disabled given the current
gRPC gating status, the output will simply forward the input tensor without
deep-copying. See the documentation of Debug* ops for more details.
Unlike the CopyHost Op, this op does not have HostMemory constraint on its
input or output.
Args:
input: A `Tensor`. Input tensor.
tensor_name: An optional `string`. Defaults to `""`.
The name of the input tensor.
debug_ops_spec: An optional list of `strings`. Defaults to `[]`.
A list of debug op spec (op, url, gated_grpc) for attached debug
ops. Each element of the list has the format
<debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented
as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1",
"DebugIdentity;file:///tmp/tfdbg_1;0".
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Output tensor, deep-copied from input.
"""
_ctx = _context.context()
if not _ctx.executing_eagerly():
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_ops_spec is None:
debug_ops_spec = []
if not isinstance(debug_ops_spec, (list, tuple)):
raise TypeError(
"Expected list for 'debug_ops_spec' argument to "
"'copy' Op, not %r." % debug_ops_spec)
debug_ops_spec = [_execute.make_str(_s, "debug_ops_spec") for _s in debug_ops_spec]
_, _, _op = _op_def_lib._apply_op_helper(
"Copy", input=input, tensor_name=tensor_name,
debug_ops_spec=debug_ops_spec, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "tensor_name",
_op.get_attr("tensor_name"), "debug_ops_spec",
_op.get_attr("debug_ops_spec"))
_execute.record_gradient(
"Copy", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._handle, _ctx.device_name, "Copy", name,
_ctx._post_execution_callbacks, input, "tensor_name", tensor_name,
"debug_ops_spec", debug_ops_spec)
return _result
except _core._FallbackException:
return copy_eager_fallback(
input, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec,
name=name)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def copy_eager_fallback(input, tensor_name="", debug_ops_spec=[], name=None):
r"""This is the slowpath function for Eager mode.
This is for function copy
"""
_ctx = _context.context()
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_ops_spec is None:
debug_ops_spec = []
if not isinstance(debug_ops_spec, (list, tuple)):
raise TypeError(
"Expected list for 'debug_ops_spec' argument to "
"'copy' Op, not %r." % debug_ops_spec)
debug_ops_spec = [_execute.make_str(_s, "debug_ops_spec") for _s in debug_ops_spec]
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "tensor_name", tensor_name, "debug_ops_spec",
debug_ops_spec)
_result = _execute.execute(b"Copy", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Copy", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('copy_host')
def copy_host(input, tensor_name="", debug_ops_spec=[], name=None):
r"""Copy Host Op.
Performs CPU-to-CPU deep-copying of tensor.
N.B.: If the all downstream attached debug ops are disabled given the current
gRPC gating status, the output will simply forward the input tensor without
deep-copying. See the documentation of Debug* ops for more details.
Unlike the Copy Op, this op has HostMemory constraint on its input or output.
Args:
input: A `Tensor`. Input tensor.
tensor_name: An optional `string`. Defaults to `""`.
The name of the input tensor.
debug_ops_spec: An optional list of `strings`. Defaults to `[]`.
A list of debug op spec (op, url, gated_grpc) for attached debug
ops. Each element of the list has the format
<debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented
as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1",
"DebugIdentity;file:///tmp/tfdbg_1;0".
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Output tensor, deep-copied from input.
"""
_ctx = _context.context()
if not _ctx.executing_eagerly():
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_ops_spec is None:
debug_ops_spec = []
if not isinstance(debug_ops_spec, (list, tuple)):
raise TypeError(
"Expected list for 'debug_ops_spec' argument to "
"'copy_host' Op, not %r." % debug_ops_spec)
debug_ops_spec = [_execute.make_str(_s, "debug_ops_spec") for _s in debug_ops_spec]
_, _, _op = _op_def_lib._apply_op_helper(
"CopyHost", input=input, tensor_name=tensor_name,
debug_ops_spec=debug_ops_spec, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "tensor_name",
_op.get_attr("tensor_name"), "debug_ops_spec",
_op.get_attr("debug_ops_spec"))
_execute.record_gradient(
"CopyHost", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._handle, _ctx.device_name, "CopyHost", name,
_ctx._post_execution_callbacks, input, "tensor_name", tensor_name,
"debug_ops_spec", debug_ops_spec)
return _result
except _core._FallbackException:
return copy_host_eager_fallback(
input, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec,
name=name)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def copy_host_eager_fallback(input, tensor_name="", debug_ops_spec=[], name=None):
r"""This is the slowpath function for Eager mode.
This is for function copy_host
"""
_ctx = _context.context()
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_ops_spec is None:
debug_ops_spec = []
if not isinstance(debug_ops_spec, (list, tuple)):
raise TypeError(
"Expected list for 'debug_ops_spec' argument to "
"'copy_host' Op, not %r." % debug_ops_spec)
debug_ops_spec = [_execute.make_str(_s, "debug_ops_spec") for _s in debug_ops_spec]
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "tensor_name", tensor_name, "debug_ops_spec",
debug_ops_spec)
_result = _execute.execute(b"CopyHost", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"CopyHost", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('debug_identity')
def debug_identity(input, device_name="", tensor_name="", debug_urls=[], gated_grpc=False, name=None):
r"""Debug Identity Op.
Provides an identity mapping of the non-Ref type input tensor for debugging.
Args:
input: A `Tensor`. Input tensor, non-Reference type.
device_name: An optional `string`. Defaults to `""`.
tensor_name: An optional `string`. Defaults to `""`.
Name of the input tensor.
debug_urls: An optional list of `strings`. Defaults to `[]`.
List of URLs to debug targets, e.g.,
file:///foo/tfdbg_dump, grpc:://localhost:11011
gated_grpc: An optional `bool`. Defaults to `False`.
Whether this op will be gated. If any of the debug_urls of this
debug node is of the grpc:// scheme, when the value of this attribute is set
to True, the data will not actually be sent via the grpc stream unless this
debug op has been enabled at the debug_url. If all of the debug_urls of this
debug node are of the grpc:// scheme and the debug op is enabled at none of
them, the output will be an empty Tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Output tensor that equals the input tensor.
"""
_ctx = _context.context()
if not _ctx.executing_eagerly():
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_identity' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_, _, _op = _op_def_lib._apply_op_helper(
"DebugIdentity", input=input, device_name=device_name,
tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "device_name",
_op.get_attr("device_name"), "tensor_name",
_op.get_attr("tensor_name"), "debug_urls",
_op.get_attr("debug_urls"), "gated_grpc",
_op.get_attr("gated_grpc"))
_execute.record_gradient(
"DebugIdentity", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._handle, _ctx.device_name, "DebugIdentity", name,
_ctx._post_execution_callbacks, input, "device_name", device_name,
"tensor_name", tensor_name, "debug_urls", debug_urls, "gated_grpc",
gated_grpc)
return _result
except _core._FallbackException:
return debug_identity_eager_fallback(
input, device_name=device_name, tensor_name=tensor_name,
debug_urls=debug_urls, gated_grpc=gated_grpc, name=name)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def debug_identity_eager_fallback(input, device_name="", tensor_name="", debug_urls=[], gated_grpc=False, name=None):
r"""This is the slowpath function for Eager mode.
This is for function debug_identity
"""
_ctx = _context.context()
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_identity' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "device_name", device_name, "tensor_name",
tensor_name, "debug_urls", debug_urls, "gated_grpc", gated_grpc)
_result = _execute.execute(b"DebugIdentity", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"DebugIdentity", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
# MASKED: debug_nan_count function (lines 338-416)
def debug_nan_count_eager_fallback(input, device_name="", tensor_name="", debug_urls=[], gated_grpc=False, name=None):
r"""This is the slowpath function for Eager mode.
This is for function debug_nan_count
"""
_ctx = _context.context()
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_nan_count' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "device_name", device_name, "tensor_name",
tensor_name, "debug_urls", debug_urls, "gated_grpc", gated_grpc)
_result = _execute.execute(b"DebugNanCount", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"DebugNanCount", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('debug_numeric_summary')
def debug_numeric_summary(input, device_name="", tensor_name="", debug_urls=[], lower_bound=float('-inf'), upper_bound=float('inf'), mute_if_healthy=False, gated_grpc=False, name=None):
r"""Debug Numeric Summary Op.
Provide a basic summary of numeric value types, range and distribution.
Args:
input: A `Tensor`. Input tensor, non-Reference type, float or double.
device_name: An optional `string`. Defaults to `""`.
tensor_name: An optional `string`. Defaults to `""`.
Name of the input tensor.
debug_urls: An optional list of `strings`. Defaults to `[]`.
List of URLs to debug targets, e.g.,
file:///foo/tfdbg_dump, grpc:://localhost:11011
lower_bound: An optional `float`. Defaults to `float('-inf')`.
(float) The lower bound <= which values will be included in the
generalized -inf count. Default: -inf.
upper_bound: An optional `float`. Defaults to `float('inf')`.
(float) The upper bound >= which values will be included in the
generalized +inf count. Default: +inf.
mute_if_healthy: An optional `bool`. Defaults to `False`.
(bool) Do not send data to the debug URLs unless at least one
of elements [2], [3] and [7] (i.e., the nan count and the generalized -inf and
inf counts) is non-zero.
gated_grpc: An optional `bool`. Defaults to `False`.
Whether this op will be gated. If any of the debug_urls of this
debug node is of the grpc:// scheme, when the value of this attribute is set
to True, the data will not actually be sent via the grpc stream unless this
debug op has been enabled at the debug_url. If all of the debug_urls of this
debug node are of the grpc:// scheme and the debug op is enabled at none of
them, the output will be an empty Tensor.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float64`.
A double tensor of shape [14 + nDimensions], where nDimensions is the
the number of dimensions of the tensor's shape. The elements of output are:
[0]: is initialized (1.0) or not (0.0).
[1]: total number of elements
[2]: NaN element count
[3]: generalized -inf count: elements <= lower_bound. lower_bound is -inf by
default.
[4]: negative element count (excluding -inf), if lower_bound is the default
-inf. Otherwise, this is the count of elements > lower_bound and < 0.
[5]: zero element count
[6]: positive element count (excluding +inf), if upper_bound is the default
-inf. Otherwise, this is the count of elements < upper_bound and > 0.
[7]: generalized +inf count, elements >= upper_bound. upper_bound is +inf by
default.
Output elements [1:8] are all zero, if the tensor is uninitialized.
[8]: minimum of all non-inf and non-NaN elements.
If uninitialized or no such element exists: +inf.
[9]: maximum of all non-inf and non-NaN elements.
If uninitialized or no such element exists: -inf.
[10]: mean of all non-inf and non-NaN elements.
If uninitialized or no such element exists: NaN.
[11]: variance of all non-inf and non-NaN elements.
If uninitialized or no such element exists: NaN.
[12]: Data type of the tensor encoded as an enum integer. See the DataType
proto for more details.
[13]: Number of dimensions of the tensor (ndims).
[14+]: Sizes of the dimensions.
"""
_ctx = _context.context()
if not _ctx.executing_eagerly():
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_numeric_summary' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if lower_bound is None:
lower_bound = float('-inf')
lower_bound = _execute.make_float(lower_bound, "lower_bound")
if upper_bound is None:
upper_bound = float('inf')
upper_bound = _execute.make_float(upper_bound, "upper_bound")
if mute_if_healthy is None:
mute_if_healthy = False
mute_if_healthy = _execute.make_bool(mute_if_healthy, "mute_if_healthy")
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_, _, _op = _op_def_lib._apply_op_helper(
"DebugNumericSummary", input=input, device_name=device_name,
tensor_name=tensor_name, debug_urls=debug_urls,
lower_bound=lower_bound, upper_bound=upper_bound,
mute_if_healthy=mute_if_healthy, gated_grpc=gated_grpc, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "device_name",
_op.get_attr("device_name"), "tensor_name",
_op.get_attr("tensor_name"), "debug_urls",
_op.get_attr("debug_urls"), "lower_bound",
_op.get_attr("lower_bound"), "upper_bound",
_op.get_attr("upper_bound"), "mute_if_healthy",
_op.get_attr("mute_if_healthy"), "gated_grpc",
_op.get_attr("gated_grpc"))
_execute.record_gradient(
"DebugNumericSummary", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._handle, _ctx.device_name, "DebugNumericSummary", name,
_ctx._post_execution_callbacks, input, "device_name", device_name,
"tensor_name", tensor_name, "debug_urls", debug_urls, "lower_bound",
lower_bound, "upper_bound", upper_bound, "mute_if_healthy",
mute_if_healthy, "gated_grpc", gated_grpc)
return _result
except _core._FallbackException:
return debug_numeric_summary_eager_fallback(
input, device_name=device_name, tensor_name=tensor_name,
debug_urls=debug_urls, lower_bound=lower_bound,
upper_bound=upper_bound, mute_if_healthy=mute_if_healthy,
gated_grpc=gated_grpc, name=name)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def debug_numeric_summary_eager_fallback(input, device_name="", tensor_name="", debug_urls=[], lower_bound=float('-inf'), upper_bound=float('inf'), mute_if_healthy=False, gated_grpc=False, name=None):
r"""This is the slowpath function for Eager mode.
This is for function debug_numeric_summary
"""
_ctx = _context.context()
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_numeric_summary' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if lower_bound is None:
lower_bound = float('-inf')
lower_bound = _execute.make_float(lower_bound, "lower_bound")
if upper_bound is None:
upper_bound = float('inf')
upper_bound = _execute.make_float(upper_bound, "upper_bound")
if mute_if_healthy is None:
mute_if_healthy = False
mute_if_healthy = _execute.make_bool(mute_if_healthy, "mute_if_healthy")
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "device_name", device_name, "tensor_name",
tensor_name, "debug_urls", debug_urls, "lower_bound", lower_bound,
"upper_bound", upper_bound, "mute_if_healthy", mute_if_healthy,
"gated_grpc", gated_grpc)
_result = _execute.execute(b"DebugNumericSummary", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"DebugNumericSummary", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "Copy"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# }
# attr {
# name: "tensor_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "debug_ops_spec"
# type: "list(string)"
# default_value {
# list {
# }
# }
# }
# allows_uninitialized_input: true
# }
# op {
# name: "CopyHost"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# }
# attr {
# name: "tensor_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "debug_ops_spec"
# type: "list(string)"
# default_value {
# list {
# }
# }
# }
# allows_uninitialized_input: true
# }
# op {
# name: "DebugIdentity"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# }
# attr {
# name: "device_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "tensor_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "debug_urls"
# type: "list(string)"
# default_value {
# list {
# }
# }
# }
# attr {
# name: "gated_grpc"
# type: "bool"
# default_value {
# b: false
# }
# }
# allows_uninitialized_input: true
# }
# op {
# name: "DebugNanCount"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type: DT_INT64
# }
# attr {
# name: "T"
# type: "type"
# }
# attr {
# name: "device_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "tensor_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "debug_urls"
# type: "list(string)"
# default_value {
# list {
# }
# }
# }
# attr {
# name: "gated_grpc"
# type: "bool"
# default_value {
# b: false
# }
# }
# allows_uninitialized_input: true
# }
# op {
# name: "DebugNumericSummary"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type: DT_DOUBLE
# }
# attr {
# name: "T"
# type: "type"
# }
# attr {
# name: "device_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "tensor_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "debug_urls"
# type: "list(string)"
# default_value {
# list {
# }
# }
# }
# attr {
# name: "lower_bound"
# type: "float"
# default_value {
# f: -inf
# }
# }
# attr {
# name: "upper_bound"
# type: "float"
# default_value {
# f: inf
# }
# }
# attr {
# name: "mute_if_healthy"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "gated_grpc"
# type: "bool"
# default_value {
# b: false
# }
# }
# allows_uninitialized_input: true
# }
_op_def_lib = _InitOpDefLibrary(b"\nl\n\004Copy\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\031\n\013tensor_name\022\006string\032\002\022\000\"\"\n\016debug_ops_spec\022\014list(string)\032\002\n\000\230\001\001\np\n\010CopyHost\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\031\n\013tensor_name\022\006string\032\002\022\000\"\"\n\016debug_ops_spec\022\014list(string)\032\002\n\000\230\001\001\n\244\001\n\rDebugIdentity\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\031\n\013device_name\022\006string\032\002\022\000\"\031\n\013tensor_name\022\006string\032\002\022\000\"\036\n\ndebug_urls\022\014list(string)\032\002\n\000\"\026\n\ngated_grpc\022\004bool\032\002(\000\230\001\001\n\243\001\n\rDebugNanCount\022\n\n\005input\"\001T\032\n\n\006output\030\t\"\t\n\001T\022\004type\"\031\n\013device_name\022\006string\032\002\022\000\"\031\n\013tensor_name\022\006string\032\002\022\000\"\036\n\ndebug_urls\022\014list(string)\032\002\n\000\"\026\n\ngated_grpc\022\004bool\032\002(\000\230\001\001\n\200\002\n\023DebugNumericSummary\022\n\n\005input\"\001T\032\n\n\006output\030\002\"\t\n\001T\022\004type\"\031\n\013device_name\022\006string\032\002\022\000\"\031\n\013tensor_name\022\006string\032\002\022\000\"\036\n\ndebug_urls\022\014list(string)\032\002\n\000\"\033\n\013lower_bound\022\005float\032\005%\000\000\200\377\"\033\n\013upper_bound\022\005float\032\005%\000\000\200\177\"\033\n\017mute_if_healthy\022\004bool\032\002(\000\"\026\n\ngated_grpc\022\004bool\032\002(\000\230\001\001")
|
@tf_export('debug_nan_count')
def debug_nan_count(input, device_name="", tensor_name="", debug_urls=[], gated_grpc=False, name=None):
r"""Debug NaN Value Counter Op
Counts number of NaNs in the input tensor, for debugging.
Args:
input: A `Tensor`. Input tensor, non-Reference type.
device_name: An optional `string`. Defaults to `""`.
tensor_name: An optional `string`. Defaults to `""`.
Name of the input tensor.
debug_urls: An optional list of `strings`. Defaults to `[]`.
List of URLs to debug targets, e.g.,
file:///foo/tfdbg_dump, grpc:://localhost:11011.
gated_grpc: An optional `bool`. Defaults to `False`.
Whether this op will be gated. If any of the debug_urls of this
debug node is of the grpc:// scheme, when the value of this attribute is set
to True, the data will not actually be sent via the grpc stream unless this
debug op has been enabled at the debug_url. If all of the debug_urls of this
debug node are of the grpc:// scheme and the debug op is enabled at none of
them, the output will be an empty Tensor.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int64`.
An integer output tensor that is the number of NaNs in the input.
"""
_ctx = _context.context()
if not _ctx.executing_eagerly():
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_nan_count' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_, _, _op = _op_def_lib._apply_op_helper(
"DebugNanCount", input=input, device_name=device_name,
tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "device_name",
_op.get_attr("device_name"), "tensor_name",
_op.get_attr("tensor_name"), "debug_urls",
_op.get_attr("debug_urls"), "gated_grpc",
_op.get_attr("gated_grpc"))
_execute.record_gradient(
"DebugNanCount", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._handle, _ctx.device_name, "DebugNanCount", name,
_ctx._post_execution_callbacks, input, "device_name", device_name,
"tensor_name", tensor_name, "debug_urls", debug_urls, "gated_grpc",
gated_grpc)
return _result
except _core._FallbackException:
return debug_nan_count_eager_fallback(
input, device_name=device_name, tensor_name=tensor_name,
debug_urls=debug_urls, gated_grpc=gated_grpc, name=name)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
| 338 | 416 |
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
"""
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.tf_export import tf_export
@tf_export('copy')
def copy(input, tensor_name="", debug_ops_spec=[], name=None):
r"""Copy Op.
Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the
device on which the tensor is allocated.
N.B.: If the all downstream attached debug ops are disabled given the current
gRPC gating status, the output will simply forward the input tensor without
deep-copying. See the documentation of Debug* ops for more details.
Unlike the CopyHost Op, this op does not have HostMemory constraint on its
input or output.
Args:
input: A `Tensor`. Input tensor.
tensor_name: An optional `string`. Defaults to `""`.
The name of the input tensor.
debug_ops_spec: An optional list of `strings`. Defaults to `[]`.
A list of debug op spec (op, url, gated_grpc) for attached debug
ops. Each element of the list has the format
<debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented
as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1",
"DebugIdentity;file:///tmp/tfdbg_1;0".
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Output tensor, deep-copied from input.
"""
_ctx = _context.context()
if not _ctx.executing_eagerly():
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_ops_spec is None:
debug_ops_spec = []
if not isinstance(debug_ops_spec, (list, tuple)):
raise TypeError(
"Expected list for 'debug_ops_spec' argument to "
"'copy' Op, not %r." % debug_ops_spec)
debug_ops_spec = [_execute.make_str(_s, "debug_ops_spec") for _s in debug_ops_spec]
_, _, _op = _op_def_lib._apply_op_helper(
"Copy", input=input, tensor_name=tensor_name,
debug_ops_spec=debug_ops_spec, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "tensor_name",
_op.get_attr("tensor_name"), "debug_ops_spec",
_op.get_attr("debug_ops_spec"))
_execute.record_gradient(
"Copy", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._handle, _ctx.device_name, "Copy", name,
_ctx._post_execution_callbacks, input, "tensor_name", tensor_name,
"debug_ops_spec", debug_ops_spec)
return _result
except _core._FallbackException:
return copy_eager_fallback(
input, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec,
name=name)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def copy_eager_fallback(input, tensor_name="", debug_ops_spec=[], name=None):
r"""This is the slowpath function for Eager mode.
This is for function copy
"""
_ctx = _context.context()
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_ops_spec is None:
debug_ops_spec = []
if not isinstance(debug_ops_spec, (list, tuple)):
raise TypeError(
"Expected list for 'debug_ops_spec' argument to "
"'copy' Op, not %r." % debug_ops_spec)
debug_ops_spec = [_execute.make_str(_s, "debug_ops_spec") for _s in debug_ops_spec]
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "tensor_name", tensor_name, "debug_ops_spec",
debug_ops_spec)
_result = _execute.execute(b"Copy", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Copy", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('copy_host')
def copy_host(input, tensor_name="", debug_ops_spec=[], name=None):
r"""Copy Host Op.
Performs CPU-to-CPU deep-copying of tensor.
N.B.: If the all downstream attached debug ops are disabled given the current
gRPC gating status, the output will simply forward the input tensor without
deep-copying. See the documentation of Debug* ops for more details.
Unlike the Copy Op, this op has HostMemory constraint on its input or output.
Args:
input: A `Tensor`. Input tensor.
tensor_name: An optional `string`. Defaults to `""`.
The name of the input tensor.
debug_ops_spec: An optional list of `strings`. Defaults to `[]`.
A list of debug op spec (op, url, gated_grpc) for attached debug
ops. Each element of the list has the format
<debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented
as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1",
"DebugIdentity;file:///tmp/tfdbg_1;0".
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Output tensor, deep-copied from input.
"""
_ctx = _context.context()
if not _ctx.executing_eagerly():
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_ops_spec is None:
debug_ops_spec = []
if not isinstance(debug_ops_spec, (list, tuple)):
raise TypeError(
"Expected list for 'debug_ops_spec' argument to "
"'copy_host' Op, not %r." % debug_ops_spec)
debug_ops_spec = [_execute.make_str(_s, "debug_ops_spec") for _s in debug_ops_spec]
_, _, _op = _op_def_lib._apply_op_helper(
"CopyHost", input=input, tensor_name=tensor_name,
debug_ops_spec=debug_ops_spec, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "tensor_name",
_op.get_attr("tensor_name"), "debug_ops_spec",
_op.get_attr("debug_ops_spec"))
_execute.record_gradient(
"CopyHost", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._handle, _ctx.device_name, "CopyHost", name,
_ctx._post_execution_callbacks, input, "tensor_name", tensor_name,
"debug_ops_spec", debug_ops_spec)
return _result
except _core._FallbackException:
return copy_host_eager_fallback(
input, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec,
name=name)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def copy_host_eager_fallback(input, tensor_name="", debug_ops_spec=[], name=None):
r"""This is the slowpath function for Eager mode.
This is for function copy_host
"""
_ctx = _context.context()
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_ops_spec is None:
debug_ops_spec = []
if not isinstance(debug_ops_spec, (list, tuple)):
raise TypeError(
"Expected list for 'debug_ops_spec' argument to "
"'copy_host' Op, not %r." % debug_ops_spec)
debug_ops_spec = [_execute.make_str(_s, "debug_ops_spec") for _s in debug_ops_spec]
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "tensor_name", tensor_name, "debug_ops_spec",
debug_ops_spec)
_result = _execute.execute(b"CopyHost", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"CopyHost", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('debug_identity')
def debug_identity(input, device_name="", tensor_name="", debug_urls=[], gated_grpc=False, name=None):
r"""Debug Identity Op.
Provides an identity mapping of the non-Ref type input tensor for debugging.
Args:
input: A `Tensor`. Input tensor, non-Reference type.
device_name: An optional `string`. Defaults to `""`.
tensor_name: An optional `string`. Defaults to `""`.
Name of the input tensor.
debug_urls: An optional list of `strings`. Defaults to `[]`.
List of URLs to debug targets, e.g.,
file:///foo/tfdbg_dump, grpc:://localhost:11011
gated_grpc: An optional `bool`. Defaults to `False`.
Whether this op will be gated. If any of the debug_urls of this
debug node is of the grpc:// scheme, when the value of this attribute is set
to True, the data will not actually be sent via the grpc stream unless this
debug op has been enabled at the debug_url. If all of the debug_urls of this
debug node are of the grpc:// scheme and the debug op is enabled at none of
them, the output will be an empty Tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Output tensor that equals the input tensor.
"""
_ctx = _context.context()
if not _ctx.executing_eagerly():
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_identity' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_, _, _op = _op_def_lib._apply_op_helper(
"DebugIdentity", input=input, device_name=device_name,
tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "device_name",
_op.get_attr("device_name"), "tensor_name",
_op.get_attr("tensor_name"), "debug_urls",
_op.get_attr("debug_urls"), "gated_grpc",
_op.get_attr("gated_grpc"))
_execute.record_gradient(
"DebugIdentity", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._handle, _ctx.device_name, "DebugIdentity", name,
_ctx._post_execution_callbacks, input, "device_name", device_name,
"tensor_name", tensor_name, "debug_urls", debug_urls, "gated_grpc",
gated_grpc)
return _result
except _core._FallbackException:
return debug_identity_eager_fallback(
input, device_name=device_name, tensor_name=tensor_name,
debug_urls=debug_urls, gated_grpc=gated_grpc, name=name)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def debug_identity_eager_fallback(input, device_name="", tensor_name="", debug_urls=[], gated_grpc=False, name=None):
r"""This is the slowpath function for Eager mode.
This is for function debug_identity
"""
_ctx = _context.context()
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_identity' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "device_name", device_name, "tensor_name",
tensor_name, "debug_urls", debug_urls, "gated_grpc", gated_grpc)
_result = _execute.execute(b"DebugIdentity", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"DebugIdentity", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('debug_nan_count')
def debug_nan_count(input, device_name="", tensor_name="", debug_urls=[], gated_grpc=False, name=None):
r"""Debug NaN Value Counter Op
Counts number of NaNs in the input tensor, for debugging.
Args:
input: A `Tensor`. Input tensor, non-Reference type.
device_name: An optional `string`. Defaults to `""`.
tensor_name: An optional `string`. Defaults to `""`.
Name of the input tensor.
debug_urls: An optional list of `strings`. Defaults to `[]`.
List of URLs to debug targets, e.g.,
file:///foo/tfdbg_dump, grpc:://localhost:11011.
gated_grpc: An optional `bool`. Defaults to `False`.
Whether this op will be gated. If any of the debug_urls of this
debug node is of the grpc:// scheme, when the value of this attribute is set
to True, the data will not actually be sent via the grpc stream unless this
debug op has been enabled at the debug_url. If all of the debug_urls of this
debug node are of the grpc:// scheme and the debug op is enabled at none of
them, the output will be an empty Tensor.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int64`.
An integer output tensor that is the number of NaNs in the input.
"""
_ctx = _context.context()
if not _ctx.executing_eagerly():
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_nan_count' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_, _, _op = _op_def_lib._apply_op_helper(
"DebugNanCount", input=input, device_name=device_name,
tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "device_name",
_op.get_attr("device_name"), "tensor_name",
_op.get_attr("tensor_name"), "debug_urls",
_op.get_attr("debug_urls"), "gated_grpc",
_op.get_attr("gated_grpc"))
_execute.record_gradient(
"DebugNanCount", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._handle, _ctx.device_name, "DebugNanCount", name,
_ctx._post_execution_callbacks, input, "device_name", device_name,
"tensor_name", tensor_name, "debug_urls", debug_urls, "gated_grpc",
gated_grpc)
return _result
except _core._FallbackException:
return debug_nan_count_eager_fallback(
input, device_name=device_name, tensor_name=tensor_name,
debug_urls=debug_urls, gated_grpc=gated_grpc, name=name)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def debug_nan_count_eager_fallback(input, device_name="", tensor_name="", debug_urls=[], gated_grpc=False, name=None):
r"""This is the slowpath function for Eager mode.
This is for function debug_nan_count
"""
_ctx = _context.context()
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_nan_count' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "device_name", device_name, "tensor_name",
tensor_name, "debug_urls", debug_urls, "gated_grpc", gated_grpc)
_result = _execute.execute(b"DebugNanCount", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"DebugNanCount", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('debug_numeric_summary')
def debug_numeric_summary(input, device_name="", tensor_name="", debug_urls=[], lower_bound=float('-inf'), upper_bound=float('inf'), mute_if_healthy=False, gated_grpc=False, name=None):
r"""Debug Numeric Summary Op.
Provide a basic summary of numeric value types, range and distribution.
Args:
input: A `Tensor`. Input tensor, non-Reference type, float or double.
device_name: An optional `string`. Defaults to `""`.
tensor_name: An optional `string`. Defaults to `""`.
Name of the input tensor.
debug_urls: An optional list of `strings`. Defaults to `[]`.
List of URLs to debug targets, e.g.,
file:///foo/tfdbg_dump, grpc:://localhost:11011
lower_bound: An optional `float`. Defaults to `float('-inf')`.
(float) The lower bound <= which values will be included in the
generalized -inf count. Default: -inf.
upper_bound: An optional `float`. Defaults to `float('inf')`.
(float) The upper bound >= which values will be included in the
generalized +inf count. Default: +inf.
mute_if_healthy: An optional `bool`. Defaults to `False`.
(bool) Do not send data to the debug URLs unless at least one
of elements [2], [3] and [7] (i.e., the nan count and the generalized -inf and
inf counts) is non-zero.
gated_grpc: An optional `bool`. Defaults to `False`.
Whether this op will be gated. If any of the debug_urls of this
debug node is of the grpc:// scheme, when the value of this attribute is set
to True, the data will not actually be sent via the grpc stream unless this
debug op has been enabled at the debug_url. If all of the debug_urls of this
debug node are of the grpc:// scheme and the debug op is enabled at none of
them, the output will be an empty Tensor.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float64`.
A double tensor of shape [14 + nDimensions], where nDimensions is the
the number of dimensions of the tensor's shape. The elements of output are:
[0]: is initialized (1.0) or not (0.0).
[1]: total number of elements
[2]: NaN element count
[3]: generalized -inf count: elements <= lower_bound. lower_bound is -inf by
default.
[4]: negative element count (excluding -inf), if lower_bound is the default
-inf. Otherwise, this is the count of elements > lower_bound and < 0.
[5]: zero element count
[6]: positive element count (excluding +inf), if upper_bound is the default
-inf. Otherwise, this is the count of elements < upper_bound and > 0.
[7]: generalized +inf count, elements >= upper_bound. upper_bound is +inf by
default.
Output elements [1:8] are all zero, if the tensor is uninitialized.
[8]: minimum of all non-inf and non-NaN elements.
If uninitialized or no such element exists: +inf.
[9]: maximum of all non-inf and non-NaN elements.
If uninitialized or no such element exists: -inf.
[10]: mean of all non-inf and non-NaN elements.
If uninitialized or no such element exists: NaN.
[11]: variance of all non-inf and non-NaN elements.
If uninitialized or no such element exists: NaN.
[12]: Data type of the tensor encoded as an enum integer. See the DataType
proto for more details.
[13]: Number of dimensions of the tensor (ndims).
[14+]: Sizes of the dimensions.
"""
_ctx = _context.context()
if not _ctx.executing_eagerly():
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_numeric_summary' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if lower_bound is None:
lower_bound = float('-inf')
lower_bound = _execute.make_float(lower_bound, "lower_bound")
if upper_bound is None:
upper_bound = float('inf')
upper_bound = _execute.make_float(upper_bound, "upper_bound")
if mute_if_healthy is None:
mute_if_healthy = False
mute_if_healthy = _execute.make_bool(mute_if_healthy, "mute_if_healthy")
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_, _, _op = _op_def_lib._apply_op_helper(
"DebugNumericSummary", input=input, device_name=device_name,
tensor_name=tensor_name, debug_urls=debug_urls,
lower_bound=lower_bound, upper_bound=upper_bound,
mute_if_healthy=mute_if_healthy, gated_grpc=gated_grpc, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "device_name",
_op.get_attr("device_name"), "tensor_name",
_op.get_attr("tensor_name"), "debug_urls",
_op.get_attr("debug_urls"), "lower_bound",
_op.get_attr("lower_bound"), "upper_bound",
_op.get_attr("upper_bound"), "mute_if_healthy",
_op.get_attr("mute_if_healthy"), "gated_grpc",
_op.get_attr("gated_grpc"))
_execute.record_gradient(
"DebugNumericSummary", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._handle, _ctx.device_name, "DebugNumericSummary", name,
_ctx._post_execution_callbacks, input, "device_name", device_name,
"tensor_name", tensor_name, "debug_urls", debug_urls, "lower_bound",
lower_bound, "upper_bound", upper_bound, "mute_if_healthy",
mute_if_healthy, "gated_grpc", gated_grpc)
return _result
except _core._FallbackException:
return debug_numeric_summary_eager_fallback(
input, device_name=device_name, tensor_name=tensor_name,
debug_urls=debug_urls, lower_bound=lower_bound,
upper_bound=upper_bound, mute_if_healthy=mute_if_healthy,
gated_grpc=gated_grpc, name=name)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def debug_numeric_summary_eager_fallback(input, device_name="", tensor_name="", debug_urls=[], lower_bound=float('-inf'), upper_bound=float('inf'), mute_if_healthy=False, gated_grpc=False, name=None):
r"""This is the slowpath function for Eager mode.
This is for function debug_numeric_summary
"""
_ctx = _context.context()
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_numeric_summary' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if lower_bound is None:
lower_bound = float('-inf')
lower_bound = _execute.make_float(lower_bound, "lower_bound")
if upper_bound is None:
upper_bound = float('inf')
upper_bound = _execute.make_float(upper_bound, "upper_bound")
if mute_if_healthy is None:
mute_if_healthy = False
mute_if_healthy = _execute.make_bool(mute_if_healthy, "mute_if_healthy")
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "device_name", device_name, "tensor_name",
tensor_name, "debug_urls", debug_urls, "lower_bound", lower_bound,
"upper_bound", upper_bound, "mute_if_healthy", mute_if_healthy,
"gated_grpc", gated_grpc)
_result = _execute.execute(b"DebugNumericSummary", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"DebugNumericSummary", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "Copy"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# }
# attr {
# name: "tensor_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "debug_ops_spec"
# type: "list(string)"
# default_value {
# list {
# }
# }
# }
# allows_uninitialized_input: true
# }
# op {
# name: "CopyHost"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# }
# attr {
# name: "tensor_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "debug_ops_spec"
# type: "list(string)"
# default_value {
# list {
# }
# }
# }
# allows_uninitialized_input: true
# }
# op {
# name: "DebugIdentity"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# }
# attr {
# name: "device_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "tensor_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "debug_urls"
# type: "list(string)"
# default_value {
# list {
# }
# }
# }
# attr {
# name: "gated_grpc"
# type: "bool"
# default_value {
# b: false
# }
# }
# allows_uninitialized_input: true
# }
# op {
# name: "DebugNanCount"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type: DT_INT64
# }
# attr {
# name: "T"
# type: "type"
# }
# attr {
# name: "device_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "tensor_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "debug_urls"
# type: "list(string)"
# default_value {
# list {
# }
# }
# }
# attr {
# name: "gated_grpc"
# type: "bool"
# default_value {
# b: false
# }
# }
# allows_uninitialized_input: true
# }
# op {
# name: "DebugNumericSummary"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type: DT_DOUBLE
# }
# attr {
# name: "T"
# type: "type"
# }
# attr {
# name: "device_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "tensor_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "debug_urls"
# type: "list(string)"
# default_value {
# list {
# }
# }
# }
# attr {
# name: "lower_bound"
# type: "float"
# default_value {
# f: -inf
# }
# }
# attr {
# name: "upper_bound"
# type: "float"
# default_value {
# f: inf
# }
# }
# attr {
# name: "mute_if_healthy"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "gated_grpc"
# type: "bool"
# default_value {
# b: false
# }
# }
# allows_uninitialized_input: true
# }
_op_def_lib = _InitOpDefLibrary(b"\nl\n\004Copy\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\031\n\013tensor_name\022\006string\032\002\022\000\"\"\n\016debug_ops_spec\022\014list(string)\032\002\n\000\230\001\001\np\n\010CopyHost\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\031\n\013tensor_name\022\006string\032\002\022\000\"\"\n\016debug_ops_spec\022\014list(string)\032\002\n\000\230\001\001\n\244\001\n\rDebugIdentity\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\031\n\013device_name\022\006string\032\002\022\000\"\031\n\013tensor_name\022\006string\032\002\022\000\"\036\n\ndebug_urls\022\014list(string)\032\002\n\000\"\026\n\ngated_grpc\022\004bool\032\002(\000\230\001\001\n\243\001\n\rDebugNanCount\022\n\n\005input\"\001T\032\n\n\006output\030\t\"\t\n\001T\022\004type\"\031\n\013device_name\022\006string\032\002\022\000\"\031\n\013tensor_name\022\006string\032\002\022\000\"\036\n\ndebug_urls\022\014list(string)\032\002\n\000\"\026\n\ngated_grpc\022\004bool\032\002(\000\230\001\001\n\200\002\n\023DebugNumericSummary\022\n\n\005input\"\001T\032\n\n\006output\030\002\"\t\n\001T\022\004type\"\031\n\013device_name\022\006string\032\002\022\000\"\031\n\013tensor_name\022\006string\032\002\022\000\"\036\n\ndebug_urls\022\014list(string)\032\002\n\000\"\033\n\013lower_bound\022\005float\032\005%\000\000\200\377\"\033\n\013upper_bound\022\005float\032\005%\000\000\200\177\"\033\n\017mute_if_healthy\022\004bool\032\002(\000\"\026\n\ngated_grpc\022\004bool\032\002(\000\230\001\001")
|
test_module
|
Tests a given module's stub against introspecting it at runtime.
Requires the stub to have been built already, accomplished by a call to ``build_stubs``.
:param module_name: The module to test
|
"""Tests for stubs.
Verify that various things in stubs are consistent with how things behave at runtime.
"""
import argparse
import copy
import enum
import importlib
import inspect
import re
import sys
import types
import warnings
from functools import singledispatch
from pathlib import Path
from typing import Any, Dict, Generic, Iterator, List, Optional, Tuple, TypeVar, Union, cast
from typing_extensions import Type
import mypy.build
import mypy.modulefinder
import mypy.types
from mypy import nodes
from mypy.config_parser import parse_config_file
from mypy.options import Options
from mypy.util import FancyFormatter
class Missing:
"""Marker object for things that are missing (from a stub or the runtime)."""
def __repr__(self) -> str:
return "MISSING"
MISSING = Missing()
T = TypeVar("T")
if sys.version_info >= (3, 5, 3):
MaybeMissing = Union[T, Missing]
else:
# work around a bug in 3.5.2 and earlier's typing.py
class MaybeMissingMeta(type):
def __getitem__(self, arg: Any) -> Any:
return Union[arg, Missing]
class MaybeMissing(metaclass=MaybeMissingMeta): # type: ignore
pass
_formatter = FancyFormatter(sys.stdout, sys.stderr, False)
def _style(message: str, **kwargs: Any) -> str:
"""Wrapper around mypy.util for fancy formatting."""
kwargs.setdefault("color", "none")
return _formatter.style(message, **kwargs)
class Error:
def __init__(
self,
object_path: List[str],
message: str,
stub_object: MaybeMissing[nodes.Node],
runtime_object: MaybeMissing[Any],
*,
stub_desc: Optional[str] = None,
runtime_desc: Optional[str] = None
) -> None:
"""Represents an error found by stubtest.
:param object_path: Location of the object with the error,
e.g. ``["module", "Class", "method"]``
:param message: Error message
:param stub_object: The mypy node representing the stub
:param runtime_object: Actual object obtained from the runtime
:param stub_desc: Specialised description for the stub object, should you wish
:param runtime_desc: Specialised description for the runtime object, should you wish
"""
self.object_desc = ".".join(object_path)
self.message = message
self.stub_object = stub_object
self.runtime_object = runtime_object
self.stub_desc = stub_desc or str(getattr(stub_object, "type", stub_object))
self.runtime_desc = runtime_desc or str(runtime_object)
def is_missing_stub(self) -> bool:
"""Whether or not the error is for something missing from the stub."""
return isinstance(self.stub_object, Missing)
def is_positional_only_related(self) -> bool:
"""Whether or not the error is for something being (or not being) positional-only."""
# TODO: This is hacky, use error codes or something more resilient
return "leading double underscore" in self.message
def get_description(self, concise: bool = False) -> str:
"""Returns a description of the error.
:param concise: Whether to return a concise, one-line description
"""
if concise:
return _style(self.object_desc, bold=True) + " " + self.message
stub_line = None
stub_file = None # type: None
if not isinstance(self.stub_object, Missing):
stub_line = self.stub_object.line
# TODO: Find a way of getting the stub file
stub_loc_str = ""
if stub_line:
stub_loc_str += " at line {}".format(stub_line)
if stub_file:
stub_loc_str += " in file {}".format(Path(stub_file))
runtime_line = None
runtime_file = None
if not isinstance(self.runtime_object, Missing):
try:
runtime_line = inspect.getsourcelines(self.runtime_object)[1]
except (OSError, TypeError):
pass
try:
runtime_file = inspect.getsourcefile(self.runtime_object)
except TypeError:
pass
runtime_loc_str = ""
if runtime_line:
runtime_loc_str += " at line {}".format(runtime_line)
if runtime_file:
runtime_loc_str += " in file {}".format(Path(runtime_file))
output = [
_style("error: ", color="red", bold=True),
_style(self.object_desc, bold=True),
" ",
self.message,
"\n",
"Stub:",
_style(stub_loc_str, dim=True),
"\n",
_style(self.stub_desc + "\n", color="blue", dim=True),
"Runtime:",
_style(runtime_loc_str, dim=True),
"\n",
_style(self.runtime_desc + "\n", color="blue", dim=True),
]
return "".join(output)
# MASKED: test_module function (lines 157-180)
@singledispatch
def verify(
stub: nodes.Node, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
"""Entry point for comparing a stub to a runtime object.
We use single dispatch based on the type of ``stub``.
:param stub: The mypy node representing a part of the stub
:param runtime: The runtime object corresponding to ``stub``
"""
yield Error(object_path, "is an unknown mypy node", stub, runtime)
@verify.register(nodes.MypyFile)
def verify_mypyfile(
stub: nodes.MypyFile, runtime: MaybeMissing[types.ModuleType], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if not isinstance(runtime, types.ModuleType):
yield Error(object_path, "is not a module", stub, runtime)
return
# Check things in the stub that are public
to_check = set(
m
for m, o in stub.names.items()
if o.module_public and (not m.startswith("_") or hasattr(runtime, m))
)
runtime_public_contents = [
m
for m in dir(runtime)
if not m.startswith("_")
# Ensure that the object's module is `runtime`, e.g. so that we don't pick up reexported
# modules and infinitely recurse. Unfortunately, there's no way to detect an explicit
# reexport missing from the stubs (that isn't specified in __all__)
and getattr(getattr(runtime, m), "__module__", None) == runtime.__name__
]
# Check all things declared in module's __all__, falling back to runtime_public_contents
to_check.update(getattr(runtime, "__all__", runtime_public_contents))
to_check.difference_update({"__file__", "__doc__", "__name__", "__builtins__", "__package__"})
for entry in sorted(to_check):
yield from verify(
stub.names[entry].node if entry in stub.names else MISSING,
getattr(runtime, entry, MISSING),
object_path + [entry],
)
@verify.register(nodes.TypeInfo)
def verify_typeinfo(
stub: nodes.TypeInfo, runtime: MaybeMissing[Type[Any]], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime, stub_desc=repr(stub))
return
if not isinstance(runtime, type):
yield Error(object_path, "is not a type", stub, runtime, stub_desc=repr(stub))
return
# Check everything already defined in the stub
to_check = set(stub.names)
# There's a reasonable case to be made that we should always check all dunders, but it's
# currently quite noisy. We could turn this into a denylist instead of an allowlist.
to_check.update(
# cast to workaround mypyc complaints
m for m in cast(Any, vars)(runtime) if not m.startswith("_") or m in SPECIAL_DUNDERS
)
for entry in sorted(to_check):
mangled_entry = entry
if entry.startswith("__") and not entry.endswith("__"):
mangled_entry = "_{}{}".format(stub.name, entry)
yield from verify(
next((t.names[entry].node for t in stub.mro if entry in t.names), MISSING),
getattr(runtime, mangled_entry, MISSING),
object_path + [entry],
)
def _verify_static_class_methods(
stub: nodes.FuncBase, runtime: Any, object_path: List[str]
) -> Iterator[str]:
if stub.name in ("__new__", "__init_subclass__", "__class_getitem__"):
# Special cased by Python, so don't bother checking
return
if inspect.isbuiltin(runtime):
# The isinstance checks don't work reliably for builtins, e.g. datetime.datetime.now, so do
# something a little hacky that seems to work well
probably_class_method = isinstance(getattr(runtime, "__self__", None), type)
if probably_class_method and not stub.is_class:
yield "runtime is a classmethod but stub is not"
if not probably_class_method and stub.is_class:
yield "stub is a classmethod but runtime is not"
return
# Look the object up statically, to avoid binding by the descriptor protocol
static_runtime = importlib.import_module(object_path[0])
for entry in object_path[1:]:
try:
static_runtime = inspect.getattr_static(static_runtime, entry)
except AttributeError:
# This can happen with mangled names, ignore for now.
# TODO: pass more information about ancestors of nodes/objects to verify, so we don't
# have to do this hacky lookup. Would be useful in a couple other places too.
return
if isinstance(static_runtime, classmethod) and not stub.is_class:
yield "runtime is a classmethod but stub is not"
if not isinstance(static_runtime, classmethod) and stub.is_class:
yield "stub is a classmethod but runtime is not"
if isinstance(static_runtime, staticmethod) and not stub.is_static:
yield "runtime is a staticmethod but stub is not"
if not isinstance(static_runtime, staticmethod) and stub.is_static:
yield "stub is a staticmethod but runtime is not"
def _verify_arg_name(
stub_arg: nodes.Argument, runtime_arg: inspect.Parameter, function_name: str
) -> Iterator[str]:
"""Checks whether argument names match."""
# Ignore exact names for most dunder methods
if is_dunder(function_name, exclude_special=True):
return
def strip_prefix(s: str, prefix: str) -> str:
return s[len(prefix):] if s.startswith(prefix) else s
if strip_prefix(stub_arg.variable.name, "__") == runtime_arg.name:
return
def names_approx_match(a: str, b: str) -> bool:
a = a.strip("_")
b = b.strip("_")
return a.startswith(b) or b.startswith(a) or len(a) == 1 or len(b) == 1
# Be more permissive about names matching for positional-only arguments
if runtime_arg.kind == inspect.Parameter.POSITIONAL_ONLY and names_approx_match(
stub_arg.variable.name, runtime_arg.name
):
return
# This comes up with namedtuples, so ignore
if stub_arg.variable.name == "_self":
return
yield (
'stub argument "{}" differs from runtime argument "{}"'.format(
stub_arg.variable.name, runtime_arg.name
)
)
def _verify_arg_default_value(
stub_arg: nodes.Argument, runtime_arg: inspect.Parameter
) -> Iterator[str]:
"""Checks whether argument default values are compatible."""
if runtime_arg.default != inspect.Parameter.empty:
if stub_arg.kind not in (nodes.ARG_OPT, nodes.ARG_NAMED_OPT):
yield (
'runtime argument "{}" has a default value but stub argument does not'.format(
runtime_arg.name
)
)
else:
runtime_type = get_mypy_type_of_runtime_value(runtime_arg.default)
# Fallback to the type annotation type if var type is missing. The type annotation
# is an UnboundType, but I don't know enough to know what the pros and cons here are.
# UnboundTypes have ugly question marks following them, so default to var type.
# Note we do this same fallback when constructing signatures in from_overloadedfuncdef
stub_type = stub_arg.variable.type or stub_arg.type_annotation
if isinstance(stub_type, mypy.types.TypeVarType):
stub_type = stub_type.upper_bound
if (
runtime_type is not None
and stub_type is not None
# Avoid false positives for marker objects
and type(runtime_arg.default) != object
and not is_subtype_helper(runtime_type, stub_type)
):
yield (
'runtime argument "{}" has a default value of type {}, '
"which is incompatible with stub argument type {}".format(
runtime_arg.name, runtime_type, stub_type
)
)
else:
if stub_arg.kind in (nodes.ARG_OPT, nodes.ARG_NAMED_OPT):
yield (
'stub argument "{}" has a default value but runtime argument does not'.format(
stub_arg.variable.name
)
)
def maybe_strip_cls(name: str, args: List[nodes.Argument]) -> List[nodes.Argument]:
if name in ("__init_subclass__", "__class_getitem__"):
# These are implicitly classmethods. If the stub chooses not to have @classmethod, we
# should remove the cls argument
if args[0].variable.name == "cls":
return args[1:]
return args
class Signature(Generic[T]):
def __init__(self) -> None:
self.pos = [] # type: List[T]
self.kwonly = {} # type: Dict[str, T]
self.varpos = None # type: Optional[T]
self.varkw = None # type: Optional[T]
def __str__(self) -> str:
def get_name(arg: Any) -> str:
if isinstance(arg, inspect.Parameter):
return arg.name
if isinstance(arg, nodes.Argument):
return arg.variable.name
raise AssertionError
def get_type(arg: Any) -> Optional[str]:
if isinstance(arg, inspect.Parameter):
return None
if isinstance(arg, nodes.Argument):
return str(arg.variable.type or arg.type_annotation)
raise AssertionError
def has_default(arg: Any) -> bool:
if isinstance(arg, inspect.Parameter):
return arg.default != inspect.Parameter.empty
if isinstance(arg, nodes.Argument):
return arg.kind in (nodes.ARG_OPT, nodes.ARG_NAMED_OPT)
raise AssertionError
def get_desc(arg: Any) -> str:
arg_type = get_type(arg)
return (
get_name(arg)
+ (": {}".format(arg_type) if arg_type else "")
+ (" = ..." if has_default(arg) else "")
)
kw_only = sorted(self.kwonly.values(), key=lambda a: (has_default(a), get_name(a)))
ret = "def ("
ret += ", ".join(
[get_desc(arg) for arg in self.pos]
+ (["*" + get_name(self.varpos)] if self.varpos else (["*"] if self.kwonly else []))
+ [get_desc(arg) for arg in kw_only]
+ (["**" + get_name(self.varkw)] if self.varkw else [])
)
ret += ")"
return ret
@staticmethod
def from_funcitem(stub: nodes.FuncItem) -> "Signature[nodes.Argument]":
stub_sig = Signature() # type: Signature[nodes.Argument]
stub_args = maybe_strip_cls(stub.name, stub.arguments)
for stub_arg in stub_args:
if stub_arg.kind in (nodes.ARG_POS, nodes.ARG_OPT):
stub_sig.pos.append(stub_arg)
elif stub_arg.kind in (nodes.ARG_NAMED, nodes.ARG_NAMED_OPT):
stub_sig.kwonly[stub_arg.variable.name] = stub_arg
elif stub_arg.kind == nodes.ARG_STAR:
stub_sig.varpos = stub_arg
elif stub_arg.kind == nodes.ARG_STAR2:
stub_sig.varkw = stub_arg
else:
raise AssertionError
return stub_sig
@staticmethod
def from_inspect_signature(signature: inspect.Signature) -> "Signature[inspect.Parameter]":
runtime_sig = Signature() # type: Signature[inspect.Parameter]
for runtime_arg in signature.parameters.values():
if runtime_arg.kind in (
inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD,
):
runtime_sig.pos.append(runtime_arg)
elif runtime_arg.kind == inspect.Parameter.KEYWORD_ONLY:
runtime_sig.kwonly[runtime_arg.name] = runtime_arg
elif runtime_arg.kind == inspect.Parameter.VAR_POSITIONAL:
runtime_sig.varpos = runtime_arg
elif runtime_arg.kind == inspect.Parameter.VAR_KEYWORD:
runtime_sig.varkw = runtime_arg
else:
raise AssertionError
return runtime_sig
@staticmethod
def from_overloadedfuncdef(stub: nodes.OverloadedFuncDef) -> "Signature[nodes.Argument]":
"""Returns a Signature from an OverloadedFuncDef.
If life were simple, to verify_overloadedfuncdef, we'd just verify_funcitem for each of its
items. Unfortunately, life isn't simple and overloads are pretty deceitful. So instead, we
try and combine the overload's items into a single signature that is compatible with any
lies it might try to tell.
"""
# For most dunder methods, just assume all args are positional-only
assume_positional_only = is_dunder(stub.name, exclude_special=True)
all_args = {} # type: Dict[str, List[Tuple[nodes.Argument, int]]]
for func in map(_resolve_funcitem_from_decorator, stub.items):
assert func is not None
args = maybe_strip_cls(stub.name, func.arguments)
for index, arg in enumerate(args):
# For positional-only args, we allow overloads to have different names for the same
# argument. To accomplish this, we just make up a fake index-based name.
name = (
"__{}".format(index)
if arg.variable.name.startswith("__") or assume_positional_only
else arg.variable.name
)
all_args.setdefault(name, []).append((arg, index))
def get_position(arg_name: str) -> int:
# We just need this to return the positional args in the correct order.
return max(index for _, index in all_args[arg_name])
def get_type(arg_name: str) -> mypy.types.ProperType:
with mypy.state.strict_optional_set(True):
all_types = [
arg.variable.type or arg.type_annotation for arg, _ in all_args[arg_name]
]
return mypy.typeops.make_simplified_union([t for t in all_types if t])
def get_kind(arg_name: str) -> int:
kinds = {arg.kind for arg, _ in all_args[arg_name]}
if nodes.ARG_STAR in kinds:
return nodes.ARG_STAR
if nodes.ARG_STAR2 in kinds:
return nodes.ARG_STAR2
# The logic here is based on two tenets:
# 1) If an arg is ever optional (or unspecified), it is optional
# 2) If an arg is ever positional, it is positional
is_opt = (
len(all_args[arg_name]) < len(stub.items)
or nodes.ARG_OPT in kinds
or nodes.ARG_NAMED_OPT in kinds
)
is_pos = nodes.ARG_OPT in kinds or nodes.ARG_POS in kinds
if is_opt:
return nodes.ARG_OPT if is_pos else nodes.ARG_NAMED_OPT
return nodes.ARG_POS if is_pos else nodes.ARG_NAMED
sig = Signature() # type: Signature[nodes.Argument]
for arg_name in sorted(all_args, key=get_position):
# example_arg_name gives us a real name (in case we had a fake index-based name)
example_arg_name = all_args[arg_name][0][0].variable.name
arg = nodes.Argument(
nodes.Var(example_arg_name, get_type(arg_name)),
type_annotation=None,
initializer=None,
kind=get_kind(arg_name),
)
if arg.kind in (nodes.ARG_POS, nodes.ARG_OPT):
sig.pos.append(arg)
elif arg.kind in (nodes.ARG_NAMED, nodes.ARG_NAMED_OPT):
sig.kwonly[arg.variable.name] = arg
elif arg.kind == nodes.ARG_STAR:
sig.varpos = arg
elif arg.kind == nodes.ARG_STAR2:
sig.varkw = arg
else:
raise AssertionError
return sig
def _verify_signature(
stub: Signature[nodes.Argument], runtime: Signature[inspect.Parameter], function_name: str
) -> Iterator[str]:
# Check positional arguments match up
for stub_arg, runtime_arg in zip(stub.pos, runtime.pos):
yield from _verify_arg_name(stub_arg, runtime_arg, function_name)
yield from _verify_arg_default_value(stub_arg, runtime_arg)
if (
runtime_arg.kind == inspect.Parameter.POSITIONAL_ONLY
and not stub_arg.variable.name.startswith("__")
and not stub_arg.variable.name.strip("_") == "self"
and not is_dunder(function_name, exclude_special=True) # noisy for dunder methods
):
yield (
'stub argument "{}" should be positional-only '
'(rename with a leading double underscore, i.e. "__{}")'.format(
stub_arg.variable.name, runtime_arg.name
)
)
if (
runtime_arg.kind != inspect.Parameter.POSITIONAL_ONLY
and stub_arg.variable.name.startswith("__")
):
yield (
'stub argument "{}" should be positional or keyword '
"(remove leading double underscore)".format(stub_arg.variable.name)
)
# Check unmatched positional args
if len(stub.pos) > len(runtime.pos):
# There are cases where the stub exhaustively lists out the extra parameters the function
# would take through *args. Hence, a) we can't check that the runtime actually takes those
# parameters and b) below, we don't enforce that the stub takes *args, since runtime logic
# may prevent those arguments from actually being accepted.
if runtime.varpos is None:
for stub_arg in stub.pos[len(runtime.pos):]:
# If the variable is in runtime.kwonly, it's just mislabelled as not a
# keyword-only argument
if stub_arg.variable.name not in runtime.kwonly:
yield 'runtime does not have argument "{}"'.format(stub_arg.variable.name)
else:
yield 'stub argument "{}" is not keyword-only'.format(stub_arg.variable.name)
if stub.varpos is not None:
yield 'runtime does not have *args argument "{}"'.format(stub.varpos.variable.name)
elif len(stub.pos) < len(runtime.pos):
for runtime_arg in runtime.pos[len(stub.pos):]:
if runtime_arg.name not in stub.kwonly:
yield 'stub does not have argument "{}"'.format(runtime_arg.name)
else:
yield 'runtime argument "{}" is not keyword-only'.format(runtime_arg.name)
# Checks involving *args
if len(stub.pos) <= len(runtime.pos) or runtime.varpos is None:
if stub.varpos is None and runtime.varpos is not None:
yield 'stub does not have *args argument "{}"'.format(runtime.varpos.name)
if stub.varpos is not None and runtime.varpos is None:
yield 'runtime does not have *args argument "{}"'.format(stub.varpos.variable.name)
# Check keyword-only args
for arg in sorted(set(stub.kwonly) & set(runtime.kwonly)):
stub_arg, runtime_arg = stub.kwonly[arg], runtime.kwonly[arg]
yield from _verify_arg_name(stub_arg, runtime_arg, function_name)
yield from _verify_arg_default_value(stub_arg, runtime_arg)
# Check unmatched keyword-only args
if runtime.varkw is None or not set(runtime.kwonly).issubset(set(stub.kwonly)):
# There are cases where the stub exhaustively lists out the extra parameters the function
# would take through *kwargs. Hence, a) we only check if the runtime actually takes those
# parameters when the above condition holds and b) below, we don't enforce that the stub
# takes *kwargs, since runtime logic may prevent additional arguments from actually being
# accepted.
for arg in sorted(set(stub.kwonly) - set(runtime.kwonly)):
yield 'runtime does not have argument "{}"'.format(arg)
for arg in sorted(set(runtime.kwonly) - set(stub.kwonly)):
if arg in set(stub_arg.variable.name for stub_arg in stub.pos):
# Don't report this if we've reported it before
if len(stub.pos) > len(runtime.pos) and runtime.varpos is not None:
yield 'stub argument "{}" is not keyword-only'.format(arg)
else:
yield 'stub does not have argument "{}"'.format(arg)
# Checks involving **kwargs
if stub.varkw is None and runtime.varkw is not None:
# As mentioned above, don't enforce that the stub takes **kwargs.
# Also check against positional parameters, to avoid a nitpicky message when an argument
# isn't marked as keyword-only
stub_pos_names = set(stub_arg.variable.name for stub_arg in stub.pos)
# Ideally we'd do a strict subset check, but in practice the errors from that aren't useful
if not set(runtime.kwonly).issubset(set(stub.kwonly) | stub_pos_names):
yield 'stub does not have **kwargs argument "{}"'.format(runtime.varkw.name)
if stub.varkw is not None and runtime.varkw is None:
yield 'runtime does not have **kwargs argument "{}"'.format(stub.varkw.variable.name)
@verify.register(nodes.FuncItem)
def verify_funcitem(
stub: nodes.FuncItem, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if (
not isinstance(runtime, (types.FunctionType, types.BuiltinFunctionType))
and not isinstance(runtime, (types.MethodType, types.BuiltinMethodType))
and not inspect.ismethoddescriptor(runtime)
):
yield Error(object_path, "is not a function", stub, runtime)
if not callable(runtime):
return
for message in _verify_static_class_methods(stub, runtime, object_path):
yield Error(object_path, "is inconsistent, " + message, stub, runtime)
try:
signature = inspect.signature(runtime)
except (ValueError, RuntimeError):
# inspect.signature throws sometimes
# catch RuntimeError because of https://bugs.python.org/issue39504
return
stub_sig = Signature.from_funcitem(stub)
runtime_sig = Signature.from_inspect_signature(signature)
for message in _verify_signature(stub_sig, runtime_sig, function_name=stub.name):
yield Error(
object_path,
"is inconsistent, " + message,
stub,
runtime,
runtime_desc="def " + str(signature),
)
@verify.register(Missing)
def verify_none(
stub: Missing, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
yield Error(object_path, "is not present in stub", stub, runtime)
@verify.register(nodes.Var)
def verify_var(
stub: nodes.Var, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
# Don't always yield an error here, because we often can't find instance variables
if len(object_path) <= 2:
yield Error(object_path, "is not present at runtime", stub, runtime)
return
runtime_type = get_mypy_type_of_runtime_value(runtime)
if (
runtime_type is not None
and stub.type is not None
and not is_subtype_helper(runtime_type, stub.type)
):
should_error = True
# Avoid errors when defining enums, since runtime_type is the enum itself, but we'd
# annotate it with the type of runtime.value
if isinstance(runtime, enum.Enum):
runtime_type = get_mypy_type_of_runtime_value(runtime.value)
if runtime_type is not None and is_subtype_helper(runtime_type, stub.type):
should_error = False
if should_error:
yield Error(
object_path,
"variable differs from runtime type {}".format(runtime_type),
stub,
runtime,
)
@verify.register(nodes.OverloadedFuncDef)
def verify_overloadedfuncdef(
stub: nodes.OverloadedFuncDef, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if stub.is_property:
# We get here in cases of overloads from property.setter
return
if (
not isinstance(runtime, (types.FunctionType, types.BuiltinFunctionType))
and not isinstance(runtime, (types.MethodType, types.BuiltinMethodType))
and not inspect.ismethoddescriptor(runtime)
):
yield Error(object_path, "is not a function", stub, runtime)
if not callable(runtime):
return
for message in _verify_static_class_methods(stub, runtime, object_path):
yield Error(object_path, "is inconsistent, " + message, stub, runtime)
try:
signature = inspect.signature(runtime)
except ValueError:
return
stub_sig = Signature.from_overloadedfuncdef(stub)
runtime_sig = Signature.from_inspect_signature(signature)
for message in _verify_signature(stub_sig, runtime_sig, function_name=stub.name):
# TODO: This is a little hacky, but the addition here is super useful
if "has a default value of type" in message:
message += (
". This is often caused by overloads failing to account for explicitly passing "
"in the default value."
)
yield Error(
object_path,
"is inconsistent, " + message,
stub,
runtime,
stub_desc=str(stub.type) + "\nInferred signature: {}".format(stub_sig),
runtime_desc="def " + str(signature),
)
@verify.register(nodes.TypeVarExpr)
def verify_typevarexpr(
stub: nodes.TypeVarExpr, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if False:
yield None
def _verify_property(stub: nodes.Decorator, runtime: Any) -> Iterator[str]:
assert stub.func.is_property
if isinstance(runtime, property):
return
if inspect.isdatadescriptor(runtime):
# It's enough like a property...
return
# Sometimes attributes pretend to be properties, for instance, to express that they
# are read only. So allowlist if runtime_type matches the return type of stub.
runtime_type = get_mypy_type_of_runtime_value(runtime)
func_type = (
stub.func.type.ret_type if isinstance(stub.func.type, mypy.types.CallableType) else None
)
if (
runtime_type is not None
and func_type is not None
and is_subtype_helper(runtime_type, func_type)
):
return
yield "is inconsistent, cannot reconcile @property on stub with runtime object"
def _resolve_funcitem_from_decorator(dec: nodes.OverloadPart) -> Optional[nodes.FuncItem]:
"""Returns a FuncItem that corresponds to the output of the decorator.
Returns None if we can't figure out what that would be. For convenience, this function also
accepts FuncItems.
"""
if isinstance(dec, nodes.FuncItem):
return dec
if dec.func.is_property:
return None
def apply_decorator_to_funcitem(
decorator: nodes.Expression, func: nodes.FuncItem
) -> Optional[nodes.FuncItem]:
if not isinstance(decorator, nodes.RefExpr):
return None
if decorator.fullname is None:
# Happens with namedtuple
return None
if decorator.fullname in (
"builtins.staticmethod",
"typing.overload",
"abc.abstractmethod",
):
return func
if decorator.fullname == "builtins.classmethod":
assert func.arguments[0].variable.name in ("cls", "metacls")
ret = copy.copy(func)
# Remove the cls argument, since it's not present in inspect.signature of classmethods
ret.arguments = ret.arguments[1:]
return ret
# Just give up on any other decorators. After excluding properties, we don't run into
# anything else when running on typeshed's stdlib.
return None
func = dec.func # type: nodes.FuncItem
for decorator in dec.original_decorators:
resulting_func = apply_decorator_to_funcitem(decorator, func)
if resulting_func is None:
return None
func = resulting_func
return func
@verify.register(nodes.Decorator)
def verify_decorator(
stub: nodes.Decorator, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if stub.func.is_property:
for message in _verify_property(stub, runtime):
yield Error(object_path, message, stub, runtime)
return
func = _resolve_funcitem_from_decorator(stub)
if func is not None:
yield from verify(func, runtime, object_path)
@verify.register(nodes.TypeAlias)
def verify_typealias(
stub: nodes.TypeAlias, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if False:
yield None
SPECIAL_DUNDERS = ("__init__", "__new__", "__call__", "__init_subclass__", "__class_getitem__")
def is_dunder(name: str, exclude_special: bool = False) -> bool:
"""Returns whether name is a dunder name.
:param exclude_special: Whether to return False for a couple special dunder methods.
"""
if exclude_special and name in SPECIAL_DUNDERS:
return False
return name.startswith("__") and name.endswith("__")
def is_subtype_helper(left: mypy.types.Type, right: mypy.types.Type) -> bool:
"""Checks whether ``left`` is a subtype of ``right``."""
left = mypy.types.get_proper_type(left)
right = mypy.types.get_proper_type(right)
if (
isinstance(left, mypy.types.LiteralType)
and isinstance(left.value, int)
and left.value in (0, 1)
and isinstance(right, mypy.types.Instance)
and right.type.fullname == "builtins.bool"
):
# Pretend Literal[0, 1] is a subtype of bool to avoid unhelpful errors.
return True
with mypy.state.strict_optional_set(True):
return mypy.subtypes.is_subtype(left, right)
def get_mypy_type_of_runtime_value(runtime: Any) -> Optional[mypy.types.Type]:
"""Returns a mypy type object representing the type of ``runtime``.
Returns None if we can't find something that works.
"""
if runtime is None:
return mypy.types.NoneType()
if isinstance(runtime, property):
# Give up on properties to avoid issues with things that are typed as attributes.
return None
def anytype() -> mypy.types.AnyType:
return mypy.types.AnyType(mypy.types.TypeOfAny.unannotated)
if isinstance(
runtime,
(types.FunctionType, types.BuiltinFunctionType,
types.MethodType, types.BuiltinMethodType)
):
builtins = get_stub("builtins")
assert builtins is not None
type_info = builtins.names["function"].node
assert isinstance(type_info, nodes.TypeInfo)
fallback = mypy.types.Instance(type_info, [anytype()])
try:
signature = inspect.signature(runtime)
arg_types = []
arg_kinds = []
arg_names = []
for arg in signature.parameters.values():
arg_types.append(anytype())
arg_names.append(
None if arg.kind == inspect.Parameter.POSITIONAL_ONLY else arg.name
)
has_default = arg.default == inspect.Parameter.empty
if arg.kind == inspect.Parameter.POSITIONAL_ONLY:
arg_kinds.append(nodes.ARG_POS if has_default else nodes.ARG_OPT)
elif arg.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:
arg_kinds.append(nodes.ARG_POS if has_default else nodes.ARG_OPT)
elif arg.kind == inspect.Parameter.KEYWORD_ONLY:
arg_kinds.append(nodes.ARG_NAMED if has_default else nodes.ARG_NAMED_OPT)
elif arg.kind == inspect.Parameter.VAR_POSITIONAL:
arg_kinds.append(nodes.ARG_STAR)
elif arg.kind == inspect.Parameter.VAR_KEYWORD:
arg_kinds.append(nodes.ARG_STAR2)
else:
raise AssertionError
except ValueError:
arg_types = [anytype(), anytype()]
arg_kinds = [nodes.ARG_STAR, nodes.ARG_STAR2]
arg_names = [None, None]
return mypy.types.CallableType(
arg_types,
arg_kinds,
arg_names,
ret_type=anytype(),
fallback=fallback,
is_ellipsis_args=True,
)
# Try and look up a stub for the runtime object
stub = get_stub(type(runtime).__module__)
if stub is None:
return None
type_name = type(runtime).__name__
if type_name not in stub.names:
return None
type_info = stub.names[type_name].node
if isinstance(type_info, nodes.Var):
return type_info.type
if not isinstance(type_info, nodes.TypeInfo):
return None
if isinstance(runtime, tuple):
# Special case tuples so we construct a valid mypy.types.TupleType
optional_items = [get_mypy_type_of_runtime_value(v) for v in runtime]
items = [(i if i is not None else anytype()) for i in optional_items]
fallback = mypy.types.Instance(type_info, [anytype()])
return mypy.types.TupleType(items, fallback)
fallback = mypy.types.Instance(type_info, [anytype() for _ in type_info.type_vars])
try:
# Literals are supposed to be only bool, int, str, bytes or enums, but this seems to work
# well (when not using mypyc, for which bytes and enums are also problematic).
return mypy.types.LiteralType(
value=runtime,
fallback=fallback,
)
except TypeError:
# Ask for forgiveness if we're using mypyc.
return fallback
_all_stubs = {} # type: Dict[str, nodes.MypyFile]
def build_stubs(modules: List[str], options: Options, find_submodules: bool = False) -> List[str]:
"""Uses mypy to construct stub objects for the given modules.
This sets global state that ``get_stub`` can access.
Returns all modules we might want to check. If ``find_submodules`` is False, this is equal
to ``modules``.
:param modules: List of modules to build stubs for.
:param options: Mypy options for finding and building stubs.
:param find_submodules: Whether to attempt to find submodules of the given modules as well.
"""
data_dir = mypy.build.default_data_dir()
search_path = mypy.modulefinder.compute_search_paths([], options, data_dir)
find_module_cache = mypy.modulefinder.FindModuleCache(
search_path, fscache=None, options=options
)
all_modules = []
sources = []
for module in modules:
all_modules.append(module)
if not find_submodules:
module_path = find_module_cache.find_module(module)
if not isinstance(module_path, str):
# test_module will yield an error later when it can't find stubs
continue
sources.append(mypy.modulefinder.BuildSource(module_path, module, None))
else:
found_sources = find_module_cache.find_modules_recursive(module)
sources.extend(found_sources)
all_modules.extend(s.module for s in found_sources if s.module not in all_modules)
try:
res = mypy.build.build(sources=sources, options=options)
except mypy.errors.CompileError as e:
output = [
_style("error: ", color="red", bold=True),
"not checking stubs due to failed mypy compile:\n",
str(e),
]
print("".join(output))
raise RuntimeError from e
if res.errors:
output = [
_style("error: ", color="red", bold=True),
"not checking stubs due to mypy build errors:\n",
]
print("".join(output) + "\n".join(res.errors))
raise RuntimeError
global _all_stubs
_all_stubs = res.files
return all_modules
def get_stub(module: str) -> Optional[nodes.MypyFile]:
"""Returns a stub object for the given module, if we've built one."""
return _all_stubs.get(module)
def get_typeshed_stdlib_modules(custom_typeshed_dir: Optional[str]) -> List[str]:
"""Returns a list of stdlib modules in typeshed (for current Python version)."""
stdlib_py_versions = mypy.modulefinder.load_stdlib_py_versions(custom_typeshed_dir)
packages = set()
# Typeshed doesn't cover Python 3.5.
if sys.version_info < (3, 6):
version_info = (3, 6)
else:
version_info = sys.version_info[0:2]
for module, versions in stdlib_py_versions.items():
minver, maxver = versions
if version_info >= minver and (maxver is None or version_info <= maxver):
packages.add(module)
if custom_typeshed_dir:
typeshed_dir = Path(custom_typeshed_dir)
else:
typeshed_dir = Path(mypy.build.default_data_dir()) / "typeshed"
stdlib_dir = typeshed_dir / "stdlib"
modules = []
for path in stdlib_dir.rglob("*.pyi"):
if path.stem == "__init__":
path = path.parent
module = ".".join(path.relative_to(stdlib_dir).parts[:-1] + (path.stem,))
if module.split(".")[0] in packages:
modules.append(module)
return sorted(modules)
def get_allowlist_entries(allowlist_file: str) -> Iterator[str]:
def strip_comments(s: str) -> str:
try:
return s[: s.index("#")].strip()
except ValueError:
return s.strip()
with open(allowlist_file) as f:
for line in f.readlines():
entry = strip_comments(line)
if entry:
yield entry
def test_stubs(args: argparse.Namespace, use_builtins_fixtures: bool = False) -> int:
"""This is stubtest! It's time to test the stubs!"""
# Load the allowlist. This is a series of strings corresponding to Error.object_desc
# Values in the dict will store whether we used the allowlist entry or not.
allowlist = {
entry: False
for allowlist_file in args.allowlist
for entry in get_allowlist_entries(allowlist_file)
}
allowlist_regexes = {entry: re.compile(entry) for entry in allowlist}
# If we need to generate an allowlist, we store Error.object_desc for each error here.
generated_allowlist = set()
modules = args.modules
if args.check_typeshed:
assert not args.modules, "Cannot pass both --check-typeshed and a list of modules"
modules = get_typeshed_stdlib_modules(args.custom_typeshed_dir)
annoying_modules = {"antigravity", "this"}
modules = [m for m in modules if m not in annoying_modules]
assert modules, "No modules to check"
options = Options()
options.incremental = False
options.custom_typeshed_dir = args.custom_typeshed_dir
options.config_file = args.mypy_config_file
options.use_builtins_fixtures = use_builtins_fixtures
if options.config_file:
def set_strict_flags() -> None: # not needed yet
return
parse_config_file(options, set_strict_flags, options.config_file, sys.stdout, sys.stderr)
try:
modules = build_stubs(modules, options, find_submodules=not args.check_typeshed)
except RuntimeError:
return 1
exit_code = 0
for module in modules:
for error in test_module(module):
# Filter errors
if args.ignore_missing_stub and error.is_missing_stub():
continue
if args.ignore_positional_only and error.is_positional_only_related():
continue
if error.object_desc in allowlist:
allowlist[error.object_desc] = True
continue
is_allowlisted = False
for w in allowlist:
if allowlist_regexes[w].fullmatch(error.object_desc):
allowlist[w] = True
is_allowlisted = True
break
if is_allowlisted:
continue
# We have errors, so change exit code, and output whatever necessary
exit_code = 1
if args.generate_allowlist:
generated_allowlist.add(error.object_desc)
continue
print(error.get_description(concise=args.concise))
# Print unused allowlist entries
if not args.ignore_unused_allowlist:
for w in allowlist:
# Don't consider an entry unused if it regex-matches the empty string
# This lets us allowlist errors that don't manifest at all on some systems
if not allowlist[w] and not allowlist_regexes[w].fullmatch(""):
exit_code = 1
print("note: unused allowlist entry {}".format(w))
# Print the generated allowlist
if args.generate_allowlist:
for e in sorted(generated_allowlist):
print(e)
exit_code = 0
return exit_code
def parse_options(args: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Compares stubs to objects introspected from the runtime."
)
parser.add_argument("modules", nargs="*", help="Modules to test")
parser.add_argument("--concise", action="store_true", help="Make output concise")
parser.add_argument(
"--ignore-missing-stub",
action="store_true",
help="Ignore errors for stub missing things that are present at runtime",
)
parser.add_argument(
"--ignore-positional-only",
action="store_true",
help="Ignore errors for whether an argument should or shouldn't be positional-only",
)
parser.add_argument(
"--custom-typeshed-dir", metavar="DIR", help="Use the custom typeshed in DIR"
)
parser.add_argument(
"--check-typeshed", action="store_true", help="Check all stdlib modules in typeshed"
)
parser.add_argument(
"--allowlist",
"--whitelist",
action="append",
metavar="FILE",
default=[],
help=(
"Use file as an allowlist. Can be passed multiple times to combine multiple "
"allowlists. Allowlists can be created with --generate-allowlist"
),
)
parser.add_argument(
"--generate-allowlist",
"--generate-whitelist",
action="store_true",
help="Print an allowlist (to stdout) to be used with --allowlist",
)
parser.add_argument(
"--ignore-unused-allowlist",
"--ignore-unused-whitelist",
action="store_true",
help="Ignore unused allowlist entries",
)
config_group = parser.add_argument_group(
title='mypy config file',
description="Use a config file instead of command line arguments. "
"Plugins and mypy path are the only supported "
"configurations.",
)
config_group.add_argument(
'--mypy-config-file',
help=(
"An existing mypy configuration file, currently used by stubtest to help "
"determine mypy path and plugins"
),
)
return parser.parse_args(args)
def main() -> int:
mypy.util.check_python_version("stubtest")
return test_stubs(parse_options(sys.argv[1:]))
if __name__ == "__main__":
sys.exit(main())
|
def test_module(module_name: str) -> Iterator[Error]:
"""Tests a given module's stub against introspecting it at runtime.
Requires the stub to have been built already, accomplished by a call to ``build_stubs``.
:param module_name: The module to test
"""
stub = get_stub(module_name)
if stub is None:
yield Error([module_name], "failed to find stubs", MISSING, None)
return
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
runtime = importlib.import_module(module_name)
except Exception as e:
yield Error([module_name], "failed to import: {}".format(e), stub, MISSING)
return
with warnings.catch_warnings():
warnings.simplefilter("ignore")
yield from verify(stub, runtime, [module_name])
| 157 | 180 |
"""Tests for stubs.
Verify that various things in stubs are consistent with how things behave at runtime.
"""
import argparse
import copy
import enum
import importlib
import inspect
import re
import sys
import types
import warnings
from functools import singledispatch
from pathlib import Path
from typing import Any, Dict, Generic, Iterator, List, Optional, Tuple, TypeVar, Union, cast
from typing_extensions import Type
import mypy.build
import mypy.modulefinder
import mypy.types
from mypy import nodes
from mypy.config_parser import parse_config_file
from mypy.options import Options
from mypy.util import FancyFormatter
class Missing:
"""Marker object for things that are missing (from a stub or the runtime)."""
def __repr__(self) -> str:
return "MISSING"
MISSING = Missing()
T = TypeVar("T")
if sys.version_info >= (3, 5, 3):
MaybeMissing = Union[T, Missing]
else:
# work around a bug in 3.5.2 and earlier's typing.py
class MaybeMissingMeta(type):
def __getitem__(self, arg: Any) -> Any:
return Union[arg, Missing]
class MaybeMissing(metaclass=MaybeMissingMeta): # type: ignore
pass
_formatter = FancyFormatter(sys.stdout, sys.stderr, False)
def _style(message: str, **kwargs: Any) -> str:
"""Wrapper around mypy.util for fancy formatting."""
kwargs.setdefault("color", "none")
return _formatter.style(message, **kwargs)
class Error:
def __init__(
self,
object_path: List[str],
message: str,
stub_object: MaybeMissing[nodes.Node],
runtime_object: MaybeMissing[Any],
*,
stub_desc: Optional[str] = None,
runtime_desc: Optional[str] = None
) -> None:
"""Represents an error found by stubtest.
:param object_path: Location of the object with the error,
e.g. ``["module", "Class", "method"]``
:param message: Error message
:param stub_object: The mypy node representing the stub
:param runtime_object: Actual object obtained from the runtime
:param stub_desc: Specialised description for the stub object, should you wish
:param runtime_desc: Specialised description for the runtime object, should you wish
"""
self.object_desc = ".".join(object_path)
self.message = message
self.stub_object = stub_object
self.runtime_object = runtime_object
self.stub_desc = stub_desc or str(getattr(stub_object, "type", stub_object))
self.runtime_desc = runtime_desc or str(runtime_object)
def is_missing_stub(self) -> bool:
"""Whether or not the error is for something missing from the stub."""
return isinstance(self.stub_object, Missing)
def is_positional_only_related(self) -> bool:
"""Whether or not the error is for something being (or not being) positional-only."""
# TODO: This is hacky, use error codes or something more resilient
return "leading double underscore" in self.message
def get_description(self, concise: bool = False) -> str:
"""Returns a description of the error.
:param concise: Whether to return a concise, one-line description
"""
if concise:
return _style(self.object_desc, bold=True) + " " + self.message
stub_line = None
stub_file = None # type: None
if not isinstance(self.stub_object, Missing):
stub_line = self.stub_object.line
# TODO: Find a way of getting the stub file
stub_loc_str = ""
if stub_line:
stub_loc_str += " at line {}".format(stub_line)
if stub_file:
stub_loc_str += " in file {}".format(Path(stub_file))
runtime_line = None
runtime_file = None
if not isinstance(self.runtime_object, Missing):
try:
runtime_line = inspect.getsourcelines(self.runtime_object)[1]
except (OSError, TypeError):
pass
try:
runtime_file = inspect.getsourcefile(self.runtime_object)
except TypeError:
pass
runtime_loc_str = ""
if runtime_line:
runtime_loc_str += " at line {}".format(runtime_line)
if runtime_file:
runtime_loc_str += " in file {}".format(Path(runtime_file))
output = [
_style("error: ", color="red", bold=True),
_style(self.object_desc, bold=True),
" ",
self.message,
"\n",
"Stub:",
_style(stub_loc_str, dim=True),
"\n",
_style(self.stub_desc + "\n", color="blue", dim=True),
"Runtime:",
_style(runtime_loc_str, dim=True),
"\n",
_style(self.runtime_desc + "\n", color="blue", dim=True),
]
return "".join(output)
def test_module(module_name: str) -> Iterator[Error]:
"""Tests a given module's stub against introspecting it at runtime.
Requires the stub to have been built already, accomplished by a call to ``build_stubs``.
:param module_name: The module to test
"""
stub = get_stub(module_name)
if stub is None:
yield Error([module_name], "failed to find stubs", MISSING, None)
return
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
runtime = importlib.import_module(module_name)
except Exception as e:
yield Error([module_name], "failed to import: {}".format(e), stub, MISSING)
return
with warnings.catch_warnings():
warnings.simplefilter("ignore")
yield from verify(stub, runtime, [module_name])
@singledispatch
def verify(
stub: nodes.Node, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
"""Entry point for comparing a stub to a runtime object.
We use single dispatch based on the type of ``stub``.
:param stub: The mypy node representing a part of the stub
:param runtime: The runtime object corresponding to ``stub``
"""
yield Error(object_path, "is an unknown mypy node", stub, runtime)
@verify.register(nodes.MypyFile)
def verify_mypyfile(
stub: nodes.MypyFile, runtime: MaybeMissing[types.ModuleType], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if not isinstance(runtime, types.ModuleType):
yield Error(object_path, "is not a module", stub, runtime)
return
# Check things in the stub that are public
to_check = set(
m
for m, o in stub.names.items()
if o.module_public and (not m.startswith("_") or hasattr(runtime, m))
)
runtime_public_contents = [
m
for m in dir(runtime)
if not m.startswith("_")
# Ensure that the object's module is `runtime`, e.g. so that we don't pick up reexported
# modules and infinitely recurse. Unfortunately, there's no way to detect an explicit
# reexport missing from the stubs (that isn't specified in __all__)
and getattr(getattr(runtime, m), "__module__", None) == runtime.__name__
]
# Check all things declared in module's __all__, falling back to runtime_public_contents
to_check.update(getattr(runtime, "__all__", runtime_public_contents))
to_check.difference_update({"__file__", "__doc__", "__name__", "__builtins__", "__package__"})
for entry in sorted(to_check):
yield from verify(
stub.names[entry].node if entry in stub.names else MISSING,
getattr(runtime, entry, MISSING),
object_path + [entry],
)
@verify.register(nodes.TypeInfo)
def verify_typeinfo(
stub: nodes.TypeInfo, runtime: MaybeMissing[Type[Any]], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime, stub_desc=repr(stub))
return
if not isinstance(runtime, type):
yield Error(object_path, "is not a type", stub, runtime, stub_desc=repr(stub))
return
# Check everything already defined in the stub
to_check = set(stub.names)
# There's a reasonable case to be made that we should always check all dunders, but it's
# currently quite noisy. We could turn this into a denylist instead of an allowlist.
to_check.update(
# cast to workaround mypyc complaints
m for m in cast(Any, vars)(runtime) if not m.startswith("_") or m in SPECIAL_DUNDERS
)
for entry in sorted(to_check):
mangled_entry = entry
if entry.startswith("__") and not entry.endswith("__"):
mangled_entry = "_{}{}".format(stub.name, entry)
yield from verify(
next((t.names[entry].node for t in stub.mro if entry in t.names), MISSING),
getattr(runtime, mangled_entry, MISSING),
object_path + [entry],
)
def _verify_static_class_methods(
stub: nodes.FuncBase, runtime: Any, object_path: List[str]
) -> Iterator[str]:
if stub.name in ("__new__", "__init_subclass__", "__class_getitem__"):
# Special cased by Python, so don't bother checking
return
if inspect.isbuiltin(runtime):
# The isinstance checks don't work reliably for builtins, e.g. datetime.datetime.now, so do
# something a little hacky that seems to work well
probably_class_method = isinstance(getattr(runtime, "__self__", None), type)
if probably_class_method and not stub.is_class:
yield "runtime is a classmethod but stub is not"
if not probably_class_method and stub.is_class:
yield "stub is a classmethod but runtime is not"
return
# Look the object up statically, to avoid binding by the descriptor protocol
static_runtime = importlib.import_module(object_path[0])
for entry in object_path[1:]:
try:
static_runtime = inspect.getattr_static(static_runtime, entry)
except AttributeError:
# This can happen with mangled names, ignore for now.
# TODO: pass more information about ancestors of nodes/objects to verify, so we don't
# have to do this hacky lookup. Would be useful in a couple other places too.
return
if isinstance(static_runtime, classmethod) and not stub.is_class:
yield "runtime is a classmethod but stub is not"
if not isinstance(static_runtime, classmethod) and stub.is_class:
yield "stub is a classmethod but runtime is not"
if isinstance(static_runtime, staticmethod) and not stub.is_static:
yield "runtime is a staticmethod but stub is not"
if not isinstance(static_runtime, staticmethod) and stub.is_static:
yield "stub is a staticmethod but runtime is not"
def _verify_arg_name(
stub_arg: nodes.Argument, runtime_arg: inspect.Parameter, function_name: str
) -> Iterator[str]:
"""Checks whether argument names match."""
# Ignore exact names for most dunder methods
if is_dunder(function_name, exclude_special=True):
return
def strip_prefix(s: str, prefix: str) -> str:
return s[len(prefix):] if s.startswith(prefix) else s
if strip_prefix(stub_arg.variable.name, "__") == runtime_arg.name:
return
def names_approx_match(a: str, b: str) -> bool:
a = a.strip("_")
b = b.strip("_")
return a.startswith(b) or b.startswith(a) or len(a) == 1 or len(b) == 1
# Be more permissive about names matching for positional-only arguments
if runtime_arg.kind == inspect.Parameter.POSITIONAL_ONLY and names_approx_match(
stub_arg.variable.name, runtime_arg.name
):
return
# This comes up with namedtuples, so ignore
if stub_arg.variable.name == "_self":
return
yield (
'stub argument "{}" differs from runtime argument "{}"'.format(
stub_arg.variable.name, runtime_arg.name
)
)
def _verify_arg_default_value(
stub_arg: nodes.Argument, runtime_arg: inspect.Parameter
) -> Iterator[str]:
"""Checks whether argument default values are compatible."""
if runtime_arg.default != inspect.Parameter.empty:
if stub_arg.kind not in (nodes.ARG_OPT, nodes.ARG_NAMED_OPT):
yield (
'runtime argument "{}" has a default value but stub argument does not'.format(
runtime_arg.name
)
)
else:
runtime_type = get_mypy_type_of_runtime_value(runtime_arg.default)
# Fallback to the type annotation type if var type is missing. The type annotation
# is an UnboundType, but I don't know enough to know what the pros and cons here are.
# UnboundTypes have ugly question marks following them, so default to var type.
# Note we do this same fallback when constructing signatures in from_overloadedfuncdef
stub_type = stub_arg.variable.type or stub_arg.type_annotation
if isinstance(stub_type, mypy.types.TypeVarType):
stub_type = stub_type.upper_bound
if (
runtime_type is not None
and stub_type is not None
# Avoid false positives for marker objects
and type(runtime_arg.default) != object
and not is_subtype_helper(runtime_type, stub_type)
):
yield (
'runtime argument "{}" has a default value of type {}, '
"which is incompatible with stub argument type {}".format(
runtime_arg.name, runtime_type, stub_type
)
)
else:
if stub_arg.kind in (nodes.ARG_OPT, nodes.ARG_NAMED_OPT):
yield (
'stub argument "{}" has a default value but runtime argument does not'.format(
stub_arg.variable.name
)
)
def maybe_strip_cls(name: str, args: List[nodes.Argument]) -> List[nodes.Argument]:
if name in ("__init_subclass__", "__class_getitem__"):
# These are implicitly classmethods. If the stub chooses not to have @classmethod, we
# should remove the cls argument
if args[0].variable.name == "cls":
return args[1:]
return args
class Signature(Generic[T]):
def __init__(self) -> None:
self.pos = [] # type: List[T]
self.kwonly = {} # type: Dict[str, T]
self.varpos = None # type: Optional[T]
self.varkw = None # type: Optional[T]
def __str__(self) -> str:
def get_name(arg: Any) -> str:
if isinstance(arg, inspect.Parameter):
return arg.name
if isinstance(arg, nodes.Argument):
return arg.variable.name
raise AssertionError
def get_type(arg: Any) -> Optional[str]:
if isinstance(arg, inspect.Parameter):
return None
if isinstance(arg, nodes.Argument):
return str(arg.variable.type or arg.type_annotation)
raise AssertionError
def has_default(arg: Any) -> bool:
if isinstance(arg, inspect.Parameter):
return arg.default != inspect.Parameter.empty
if isinstance(arg, nodes.Argument):
return arg.kind in (nodes.ARG_OPT, nodes.ARG_NAMED_OPT)
raise AssertionError
def get_desc(arg: Any) -> str:
arg_type = get_type(arg)
return (
get_name(arg)
+ (": {}".format(arg_type) if arg_type else "")
+ (" = ..." if has_default(arg) else "")
)
kw_only = sorted(self.kwonly.values(), key=lambda a: (has_default(a), get_name(a)))
ret = "def ("
ret += ", ".join(
[get_desc(arg) for arg in self.pos]
+ (["*" + get_name(self.varpos)] if self.varpos else (["*"] if self.kwonly else []))
+ [get_desc(arg) for arg in kw_only]
+ (["**" + get_name(self.varkw)] if self.varkw else [])
)
ret += ")"
return ret
@staticmethod
def from_funcitem(stub: nodes.FuncItem) -> "Signature[nodes.Argument]":
stub_sig = Signature() # type: Signature[nodes.Argument]
stub_args = maybe_strip_cls(stub.name, stub.arguments)
for stub_arg in stub_args:
if stub_arg.kind in (nodes.ARG_POS, nodes.ARG_OPT):
stub_sig.pos.append(stub_arg)
elif stub_arg.kind in (nodes.ARG_NAMED, nodes.ARG_NAMED_OPT):
stub_sig.kwonly[stub_arg.variable.name] = stub_arg
elif stub_arg.kind == nodes.ARG_STAR:
stub_sig.varpos = stub_arg
elif stub_arg.kind == nodes.ARG_STAR2:
stub_sig.varkw = stub_arg
else:
raise AssertionError
return stub_sig
@staticmethod
def from_inspect_signature(signature: inspect.Signature) -> "Signature[inspect.Parameter]":
runtime_sig = Signature() # type: Signature[inspect.Parameter]
for runtime_arg in signature.parameters.values():
if runtime_arg.kind in (
inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD,
):
runtime_sig.pos.append(runtime_arg)
elif runtime_arg.kind == inspect.Parameter.KEYWORD_ONLY:
runtime_sig.kwonly[runtime_arg.name] = runtime_arg
elif runtime_arg.kind == inspect.Parameter.VAR_POSITIONAL:
runtime_sig.varpos = runtime_arg
elif runtime_arg.kind == inspect.Parameter.VAR_KEYWORD:
runtime_sig.varkw = runtime_arg
else:
raise AssertionError
return runtime_sig
@staticmethod
def from_overloadedfuncdef(stub: nodes.OverloadedFuncDef) -> "Signature[nodes.Argument]":
"""Returns a Signature from an OverloadedFuncDef.
If life were simple, to verify_overloadedfuncdef, we'd just verify_funcitem for each of its
items. Unfortunately, life isn't simple and overloads are pretty deceitful. So instead, we
try and combine the overload's items into a single signature that is compatible with any
lies it might try to tell.
"""
# For most dunder methods, just assume all args are positional-only
assume_positional_only = is_dunder(stub.name, exclude_special=True)
all_args = {} # type: Dict[str, List[Tuple[nodes.Argument, int]]]
for func in map(_resolve_funcitem_from_decorator, stub.items):
assert func is not None
args = maybe_strip_cls(stub.name, func.arguments)
for index, arg in enumerate(args):
# For positional-only args, we allow overloads to have different names for the same
# argument. To accomplish this, we just make up a fake index-based name.
name = (
"__{}".format(index)
if arg.variable.name.startswith("__") or assume_positional_only
else arg.variable.name
)
all_args.setdefault(name, []).append((arg, index))
def get_position(arg_name: str) -> int:
# We just need this to return the positional args in the correct order.
return max(index for _, index in all_args[arg_name])
def get_type(arg_name: str) -> mypy.types.ProperType:
with mypy.state.strict_optional_set(True):
all_types = [
arg.variable.type or arg.type_annotation for arg, _ in all_args[arg_name]
]
return mypy.typeops.make_simplified_union([t for t in all_types if t])
def get_kind(arg_name: str) -> int:
kinds = {arg.kind for arg, _ in all_args[arg_name]}
if nodes.ARG_STAR in kinds:
return nodes.ARG_STAR
if nodes.ARG_STAR2 in kinds:
return nodes.ARG_STAR2
# The logic here is based on two tenets:
# 1) If an arg is ever optional (or unspecified), it is optional
# 2) If an arg is ever positional, it is positional
is_opt = (
len(all_args[arg_name]) < len(stub.items)
or nodes.ARG_OPT in kinds
or nodes.ARG_NAMED_OPT in kinds
)
is_pos = nodes.ARG_OPT in kinds or nodes.ARG_POS in kinds
if is_opt:
return nodes.ARG_OPT if is_pos else nodes.ARG_NAMED_OPT
return nodes.ARG_POS if is_pos else nodes.ARG_NAMED
sig = Signature() # type: Signature[nodes.Argument]
for arg_name in sorted(all_args, key=get_position):
# example_arg_name gives us a real name (in case we had a fake index-based name)
example_arg_name = all_args[arg_name][0][0].variable.name
arg = nodes.Argument(
nodes.Var(example_arg_name, get_type(arg_name)),
type_annotation=None,
initializer=None,
kind=get_kind(arg_name),
)
if arg.kind in (nodes.ARG_POS, nodes.ARG_OPT):
sig.pos.append(arg)
elif arg.kind in (nodes.ARG_NAMED, nodes.ARG_NAMED_OPT):
sig.kwonly[arg.variable.name] = arg
elif arg.kind == nodes.ARG_STAR:
sig.varpos = arg
elif arg.kind == nodes.ARG_STAR2:
sig.varkw = arg
else:
raise AssertionError
return sig
def _verify_signature(
stub: Signature[nodes.Argument], runtime: Signature[inspect.Parameter], function_name: str
) -> Iterator[str]:
# Check positional arguments match up
for stub_arg, runtime_arg in zip(stub.pos, runtime.pos):
yield from _verify_arg_name(stub_arg, runtime_arg, function_name)
yield from _verify_arg_default_value(stub_arg, runtime_arg)
if (
runtime_arg.kind == inspect.Parameter.POSITIONAL_ONLY
and not stub_arg.variable.name.startswith("__")
and not stub_arg.variable.name.strip("_") == "self"
and not is_dunder(function_name, exclude_special=True) # noisy for dunder methods
):
yield (
'stub argument "{}" should be positional-only '
'(rename with a leading double underscore, i.e. "__{}")'.format(
stub_arg.variable.name, runtime_arg.name
)
)
if (
runtime_arg.kind != inspect.Parameter.POSITIONAL_ONLY
and stub_arg.variable.name.startswith("__")
):
yield (
'stub argument "{}" should be positional or keyword '
"(remove leading double underscore)".format(stub_arg.variable.name)
)
# Check unmatched positional args
if len(stub.pos) > len(runtime.pos):
# There are cases where the stub exhaustively lists out the extra parameters the function
# would take through *args. Hence, a) we can't check that the runtime actually takes those
# parameters and b) below, we don't enforce that the stub takes *args, since runtime logic
# may prevent those arguments from actually being accepted.
if runtime.varpos is None:
for stub_arg in stub.pos[len(runtime.pos):]:
# If the variable is in runtime.kwonly, it's just mislabelled as not a
# keyword-only argument
if stub_arg.variable.name not in runtime.kwonly:
yield 'runtime does not have argument "{}"'.format(stub_arg.variable.name)
else:
yield 'stub argument "{}" is not keyword-only'.format(stub_arg.variable.name)
if stub.varpos is not None:
yield 'runtime does not have *args argument "{}"'.format(stub.varpos.variable.name)
elif len(stub.pos) < len(runtime.pos):
for runtime_arg in runtime.pos[len(stub.pos):]:
if runtime_arg.name not in stub.kwonly:
yield 'stub does not have argument "{}"'.format(runtime_arg.name)
else:
yield 'runtime argument "{}" is not keyword-only'.format(runtime_arg.name)
# Checks involving *args
if len(stub.pos) <= len(runtime.pos) or runtime.varpos is None:
if stub.varpos is None and runtime.varpos is not None:
yield 'stub does not have *args argument "{}"'.format(runtime.varpos.name)
if stub.varpos is not None and runtime.varpos is None:
yield 'runtime does not have *args argument "{}"'.format(stub.varpos.variable.name)
# Check keyword-only args
for arg in sorted(set(stub.kwonly) & set(runtime.kwonly)):
stub_arg, runtime_arg = stub.kwonly[arg], runtime.kwonly[arg]
yield from _verify_arg_name(stub_arg, runtime_arg, function_name)
yield from _verify_arg_default_value(stub_arg, runtime_arg)
# Check unmatched keyword-only args
if runtime.varkw is None or not set(runtime.kwonly).issubset(set(stub.kwonly)):
# There are cases where the stub exhaustively lists out the extra parameters the function
# would take through *kwargs. Hence, a) we only check if the runtime actually takes those
# parameters when the above condition holds and b) below, we don't enforce that the stub
# takes *kwargs, since runtime logic may prevent additional arguments from actually being
# accepted.
for arg in sorted(set(stub.kwonly) - set(runtime.kwonly)):
yield 'runtime does not have argument "{}"'.format(arg)
for arg in sorted(set(runtime.kwonly) - set(stub.kwonly)):
if arg in set(stub_arg.variable.name for stub_arg in stub.pos):
# Don't report this if we've reported it before
if len(stub.pos) > len(runtime.pos) and runtime.varpos is not None:
yield 'stub argument "{}" is not keyword-only'.format(arg)
else:
yield 'stub does not have argument "{}"'.format(arg)
# Checks involving **kwargs
if stub.varkw is None and runtime.varkw is not None:
# As mentioned above, don't enforce that the stub takes **kwargs.
# Also check against positional parameters, to avoid a nitpicky message when an argument
# isn't marked as keyword-only
stub_pos_names = set(stub_arg.variable.name for stub_arg in stub.pos)
# Ideally we'd do a strict subset check, but in practice the errors from that aren't useful
if not set(runtime.kwonly).issubset(set(stub.kwonly) | stub_pos_names):
yield 'stub does not have **kwargs argument "{}"'.format(runtime.varkw.name)
if stub.varkw is not None and runtime.varkw is None:
yield 'runtime does not have **kwargs argument "{}"'.format(stub.varkw.variable.name)
@verify.register(nodes.FuncItem)
def verify_funcitem(
stub: nodes.FuncItem, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if (
not isinstance(runtime, (types.FunctionType, types.BuiltinFunctionType))
and not isinstance(runtime, (types.MethodType, types.BuiltinMethodType))
and not inspect.ismethoddescriptor(runtime)
):
yield Error(object_path, "is not a function", stub, runtime)
if not callable(runtime):
return
for message in _verify_static_class_methods(stub, runtime, object_path):
yield Error(object_path, "is inconsistent, " + message, stub, runtime)
try:
signature = inspect.signature(runtime)
except (ValueError, RuntimeError):
# inspect.signature throws sometimes
# catch RuntimeError because of https://bugs.python.org/issue39504
return
stub_sig = Signature.from_funcitem(stub)
runtime_sig = Signature.from_inspect_signature(signature)
for message in _verify_signature(stub_sig, runtime_sig, function_name=stub.name):
yield Error(
object_path,
"is inconsistent, " + message,
stub,
runtime,
runtime_desc="def " + str(signature),
)
@verify.register(Missing)
def verify_none(
stub: Missing, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
yield Error(object_path, "is not present in stub", stub, runtime)
@verify.register(nodes.Var)
def verify_var(
stub: nodes.Var, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
# Don't always yield an error here, because we often can't find instance variables
if len(object_path) <= 2:
yield Error(object_path, "is not present at runtime", stub, runtime)
return
runtime_type = get_mypy_type_of_runtime_value(runtime)
if (
runtime_type is not None
and stub.type is not None
and not is_subtype_helper(runtime_type, stub.type)
):
should_error = True
# Avoid errors when defining enums, since runtime_type is the enum itself, but we'd
# annotate it with the type of runtime.value
if isinstance(runtime, enum.Enum):
runtime_type = get_mypy_type_of_runtime_value(runtime.value)
if runtime_type is not None and is_subtype_helper(runtime_type, stub.type):
should_error = False
if should_error:
yield Error(
object_path,
"variable differs from runtime type {}".format(runtime_type),
stub,
runtime,
)
@verify.register(nodes.OverloadedFuncDef)
def verify_overloadedfuncdef(
stub: nodes.OverloadedFuncDef, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if stub.is_property:
# We get here in cases of overloads from property.setter
return
if (
not isinstance(runtime, (types.FunctionType, types.BuiltinFunctionType))
and not isinstance(runtime, (types.MethodType, types.BuiltinMethodType))
and not inspect.ismethoddescriptor(runtime)
):
yield Error(object_path, "is not a function", stub, runtime)
if not callable(runtime):
return
for message in _verify_static_class_methods(stub, runtime, object_path):
yield Error(object_path, "is inconsistent, " + message, stub, runtime)
try:
signature = inspect.signature(runtime)
except ValueError:
return
stub_sig = Signature.from_overloadedfuncdef(stub)
runtime_sig = Signature.from_inspect_signature(signature)
for message in _verify_signature(stub_sig, runtime_sig, function_name=stub.name):
# TODO: This is a little hacky, but the addition here is super useful
if "has a default value of type" in message:
message += (
". This is often caused by overloads failing to account for explicitly passing "
"in the default value."
)
yield Error(
object_path,
"is inconsistent, " + message,
stub,
runtime,
stub_desc=str(stub.type) + "\nInferred signature: {}".format(stub_sig),
runtime_desc="def " + str(signature),
)
@verify.register(nodes.TypeVarExpr)
def verify_typevarexpr(
stub: nodes.TypeVarExpr, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if False:
yield None
def _verify_property(stub: nodes.Decorator, runtime: Any) -> Iterator[str]:
assert stub.func.is_property
if isinstance(runtime, property):
return
if inspect.isdatadescriptor(runtime):
# It's enough like a property...
return
# Sometimes attributes pretend to be properties, for instance, to express that they
# are read only. So allowlist if runtime_type matches the return type of stub.
runtime_type = get_mypy_type_of_runtime_value(runtime)
func_type = (
stub.func.type.ret_type if isinstance(stub.func.type, mypy.types.CallableType) else None
)
if (
runtime_type is not None
and func_type is not None
and is_subtype_helper(runtime_type, func_type)
):
return
yield "is inconsistent, cannot reconcile @property on stub with runtime object"
def _resolve_funcitem_from_decorator(dec: nodes.OverloadPart) -> Optional[nodes.FuncItem]:
"""Returns a FuncItem that corresponds to the output of the decorator.
Returns None if we can't figure out what that would be. For convenience, this function also
accepts FuncItems.
"""
if isinstance(dec, nodes.FuncItem):
return dec
if dec.func.is_property:
return None
def apply_decorator_to_funcitem(
decorator: nodes.Expression, func: nodes.FuncItem
) -> Optional[nodes.FuncItem]:
if not isinstance(decorator, nodes.RefExpr):
return None
if decorator.fullname is None:
# Happens with namedtuple
return None
if decorator.fullname in (
"builtins.staticmethod",
"typing.overload",
"abc.abstractmethod",
):
return func
if decorator.fullname == "builtins.classmethod":
assert func.arguments[0].variable.name in ("cls", "metacls")
ret = copy.copy(func)
# Remove the cls argument, since it's not present in inspect.signature of classmethods
ret.arguments = ret.arguments[1:]
return ret
# Just give up on any other decorators. After excluding properties, we don't run into
# anything else when running on typeshed's stdlib.
return None
func = dec.func # type: nodes.FuncItem
for decorator in dec.original_decorators:
resulting_func = apply_decorator_to_funcitem(decorator, func)
if resulting_func is None:
return None
func = resulting_func
return func
@verify.register(nodes.Decorator)
def verify_decorator(
stub: nodes.Decorator, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if stub.func.is_property:
for message in _verify_property(stub, runtime):
yield Error(object_path, message, stub, runtime)
return
func = _resolve_funcitem_from_decorator(stub)
if func is not None:
yield from verify(func, runtime, object_path)
@verify.register(nodes.TypeAlias)
def verify_typealias(
stub: nodes.TypeAlias, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if False:
yield None
SPECIAL_DUNDERS = ("__init__", "__new__", "__call__", "__init_subclass__", "__class_getitem__")
def is_dunder(name: str, exclude_special: bool = False) -> bool:
"""Returns whether name is a dunder name.
:param exclude_special: Whether to return False for a couple special dunder methods.
"""
if exclude_special and name in SPECIAL_DUNDERS:
return False
return name.startswith("__") and name.endswith("__")
def is_subtype_helper(left: mypy.types.Type, right: mypy.types.Type) -> bool:
"""Checks whether ``left`` is a subtype of ``right``."""
left = mypy.types.get_proper_type(left)
right = mypy.types.get_proper_type(right)
if (
isinstance(left, mypy.types.LiteralType)
and isinstance(left.value, int)
and left.value in (0, 1)
and isinstance(right, mypy.types.Instance)
and right.type.fullname == "builtins.bool"
):
# Pretend Literal[0, 1] is a subtype of bool to avoid unhelpful errors.
return True
with mypy.state.strict_optional_set(True):
return mypy.subtypes.is_subtype(left, right)
def get_mypy_type_of_runtime_value(runtime: Any) -> Optional[mypy.types.Type]:
"""Returns a mypy type object representing the type of ``runtime``.
Returns None if we can't find something that works.
"""
if runtime is None:
return mypy.types.NoneType()
if isinstance(runtime, property):
# Give up on properties to avoid issues with things that are typed as attributes.
return None
def anytype() -> mypy.types.AnyType:
return mypy.types.AnyType(mypy.types.TypeOfAny.unannotated)
if isinstance(
runtime,
(types.FunctionType, types.BuiltinFunctionType,
types.MethodType, types.BuiltinMethodType)
):
builtins = get_stub("builtins")
assert builtins is not None
type_info = builtins.names["function"].node
assert isinstance(type_info, nodes.TypeInfo)
fallback = mypy.types.Instance(type_info, [anytype()])
try:
signature = inspect.signature(runtime)
arg_types = []
arg_kinds = []
arg_names = []
for arg in signature.parameters.values():
arg_types.append(anytype())
arg_names.append(
None if arg.kind == inspect.Parameter.POSITIONAL_ONLY else arg.name
)
has_default = arg.default == inspect.Parameter.empty
if arg.kind == inspect.Parameter.POSITIONAL_ONLY:
arg_kinds.append(nodes.ARG_POS if has_default else nodes.ARG_OPT)
elif arg.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:
arg_kinds.append(nodes.ARG_POS if has_default else nodes.ARG_OPT)
elif arg.kind == inspect.Parameter.KEYWORD_ONLY:
arg_kinds.append(nodes.ARG_NAMED if has_default else nodes.ARG_NAMED_OPT)
elif arg.kind == inspect.Parameter.VAR_POSITIONAL:
arg_kinds.append(nodes.ARG_STAR)
elif arg.kind == inspect.Parameter.VAR_KEYWORD:
arg_kinds.append(nodes.ARG_STAR2)
else:
raise AssertionError
except ValueError:
arg_types = [anytype(), anytype()]
arg_kinds = [nodes.ARG_STAR, nodes.ARG_STAR2]
arg_names = [None, None]
return mypy.types.CallableType(
arg_types,
arg_kinds,
arg_names,
ret_type=anytype(),
fallback=fallback,
is_ellipsis_args=True,
)
# Try and look up a stub for the runtime object
stub = get_stub(type(runtime).__module__)
if stub is None:
return None
type_name = type(runtime).__name__
if type_name not in stub.names:
return None
type_info = stub.names[type_name].node
if isinstance(type_info, nodes.Var):
return type_info.type
if not isinstance(type_info, nodes.TypeInfo):
return None
if isinstance(runtime, tuple):
# Special case tuples so we construct a valid mypy.types.TupleType
optional_items = [get_mypy_type_of_runtime_value(v) for v in runtime]
items = [(i if i is not None else anytype()) for i in optional_items]
fallback = mypy.types.Instance(type_info, [anytype()])
return mypy.types.TupleType(items, fallback)
fallback = mypy.types.Instance(type_info, [anytype() for _ in type_info.type_vars])
try:
# Literals are supposed to be only bool, int, str, bytes or enums, but this seems to work
# well (when not using mypyc, for which bytes and enums are also problematic).
return mypy.types.LiteralType(
value=runtime,
fallback=fallback,
)
except TypeError:
# Ask for forgiveness if we're using mypyc.
return fallback
_all_stubs = {} # type: Dict[str, nodes.MypyFile]
def build_stubs(modules: List[str], options: Options, find_submodules: bool = False) -> List[str]:
"""Uses mypy to construct stub objects for the given modules.
This sets global state that ``get_stub`` can access.
Returns all modules we might want to check. If ``find_submodules`` is False, this is equal
to ``modules``.
:param modules: List of modules to build stubs for.
:param options: Mypy options for finding and building stubs.
:param find_submodules: Whether to attempt to find submodules of the given modules as well.
"""
data_dir = mypy.build.default_data_dir()
search_path = mypy.modulefinder.compute_search_paths([], options, data_dir)
find_module_cache = mypy.modulefinder.FindModuleCache(
search_path, fscache=None, options=options
)
all_modules = []
sources = []
for module in modules:
all_modules.append(module)
if not find_submodules:
module_path = find_module_cache.find_module(module)
if not isinstance(module_path, str):
# test_module will yield an error later when it can't find stubs
continue
sources.append(mypy.modulefinder.BuildSource(module_path, module, None))
else:
found_sources = find_module_cache.find_modules_recursive(module)
sources.extend(found_sources)
all_modules.extend(s.module for s in found_sources if s.module not in all_modules)
try:
res = mypy.build.build(sources=sources, options=options)
except mypy.errors.CompileError as e:
output = [
_style("error: ", color="red", bold=True),
"not checking stubs due to failed mypy compile:\n",
str(e),
]
print("".join(output))
raise RuntimeError from e
if res.errors:
output = [
_style("error: ", color="red", bold=True),
"not checking stubs due to mypy build errors:\n",
]
print("".join(output) + "\n".join(res.errors))
raise RuntimeError
global _all_stubs
_all_stubs = res.files
return all_modules
def get_stub(module: str) -> Optional[nodes.MypyFile]:
"""Returns a stub object for the given module, if we've built one."""
return _all_stubs.get(module)
def get_typeshed_stdlib_modules(custom_typeshed_dir: Optional[str]) -> List[str]:
"""Returns a list of stdlib modules in typeshed (for current Python version)."""
stdlib_py_versions = mypy.modulefinder.load_stdlib_py_versions(custom_typeshed_dir)
packages = set()
# Typeshed doesn't cover Python 3.5.
if sys.version_info < (3, 6):
version_info = (3, 6)
else:
version_info = sys.version_info[0:2]
for module, versions in stdlib_py_versions.items():
minver, maxver = versions
if version_info >= minver and (maxver is None or version_info <= maxver):
packages.add(module)
if custom_typeshed_dir:
typeshed_dir = Path(custom_typeshed_dir)
else:
typeshed_dir = Path(mypy.build.default_data_dir()) / "typeshed"
stdlib_dir = typeshed_dir / "stdlib"
modules = []
for path in stdlib_dir.rglob("*.pyi"):
if path.stem == "__init__":
path = path.parent
module = ".".join(path.relative_to(stdlib_dir).parts[:-1] + (path.stem,))
if module.split(".")[0] in packages:
modules.append(module)
return sorted(modules)
def get_allowlist_entries(allowlist_file: str) -> Iterator[str]:
def strip_comments(s: str) -> str:
try:
return s[: s.index("#")].strip()
except ValueError:
return s.strip()
with open(allowlist_file) as f:
for line in f.readlines():
entry = strip_comments(line)
if entry:
yield entry
def test_stubs(args: argparse.Namespace, use_builtins_fixtures: bool = False) -> int:
"""This is stubtest! It's time to test the stubs!"""
# Load the allowlist. This is a series of strings corresponding to Error.object_desc
# Values in the dict will store whether we used the allowlist entry or not.
allowlist = {
entry: False
for allowlist_file in args.allowlist
for entry in get_allowlist_entries(allowlist_file)
}
allowlist_regexes = {entry: re.compile(entry) for entry in allowlist}
# If we need to generate an allowlist, we store Error.object_desc for each error here.
generated_allowlist = set()
modules = args.modules
if args.check_typeshed:
assert not args.modules, "Cannot pass both --check-typeshed and a list of modules"
modules = get_typeshed_stdlib_modules(args.custom_typeshed_dir)
annoying_modules = {"antigravity", "this"}
modules = [m for m in modules if m not in annoying_modules]
assert modules, "No modules to check"
options = Options()
options.incremental = False
options.custom_typeshed_dir = args.custom_typeshed_dir
options.config_file = args.mypy_config_file
options.use_builtins_fixtures = use_builtins_fixtures
if options.config_file:
def set_strict_flags() -> None: # not needed yet
return
parse_config_file(options, set_strict_flags, options.config_file, sys.stdout, sys.stderr)
try:
modules = build_stubs(modules, options, find_submodules=not args.check_typeshed)
except RuntimeError:
return 1
exit_code = 0
for module in modules:
for error in test_module(module):
# Filter errors
if args.ignore_missing_stub and error.is_missing_stub():
continue
if args.ignore_positional_only and error.is_positional_only_related():
continue
if error.object_desc in allowlist:
allowlist[error.object_desc] = True
continue
is_allowlisted = False
for w in allowlist:
if allowlist_regexes[w].fullmatch(error.object_desc):
allowlist[w] = True
is_allowlisted = True
break
if is_allowlisted:
continue
# We have errors, so change exit code, and output whatever necessary
exit_code = 1
if args.generate_allowlist:
generated_allowlist.add(error.object_desc)
continue
print(error.get_description(concise=args.concise))
# Print unused allowlist entries
if not args.ignore_unused_allowlist:
for w in allowlist:
# Don't consider an entry unused if it regex-matches the empty string
# This lets us allowlist errors that don't manifest at all on some systems
if not allowlist[w] and not allowlist_regexes[w].fullmatch(""):
exit_code = 1
print("note: unused allowlist entry {}".format(w))
# Print the generated allowlist
if args.generate_allowlist:
for e in sorted(generated_allowlist):
print(e)
exit_code = 0
return exit_code
def parse_options(args: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Compares stubs to objects introspected from the runtime."
)
parser.add_argument("modules", nargs="*", help="Modules to test")
parser.add_argument("--concise", action="store_true", help="Make output concise")
parser.add_argument(
"--ignore-missing-stub",
action="store_true",
help="Ignore errors for stub missing things that are present at runtime",
)
parser.add_argument(
"--ignore-positional-only",
action="store_true",
help="Ignore errors for whether an argument should or shouldn't be positional-only",
)
parser.add_argument(
"--custom-typeshed-dir", metavar="DIR", help="Use the custom typeshed in DIR"
)
parser.add_argument(
"--check-typeshed", action="store_true", help="Check all stdlib modules in typeshed"
)
parser.add_argument(
"--allowlist",
"--whitelist",
action="append",
metavar="FILE",
default=[],
help=(
"Use file as an allowlist. Can be passed multiple times to combine multiple "
"allowlists. Allowlists can be created with --generate-allowlist"
),
)
parser.add_argument(
"--generate-allowlist",
"--generate-whitelist",
action="store_true",
help="Print an allowlist (to stdout) to be used with --allowlist",
)
parser.add_argument(
"--ignore-unused-allowlist",
"--ignore-unused-whitelist",
action="store_true",
help="Ignore unused allowlist entries",
)
config_group = parser.add_argument_group(
title='mypy config file',
description="Use a config file instead of command line arguments. "
"Plugins and mypy path are the only supported "
"configurations.",
)
config_group.add_argument(
'--mypy-config-file',
help=(
"An existing mypy configuration file, currently used by stubtest to help "
"determine mypy path and plugins"
),
)
return parser.parse_args(args)
def main() -> int:
mypy.util.check_python_version("stubtest")
return test_stubs(parse_options(sys.argv[1:]))
if __name__ == "__main__":
sys.exit(main())
|
_resolve_funcitem_from_decorator
|
Returns a FuncItem that corresponds to the output of the decorator.
Returns None if we can't figure out what that would be. For convenience, this function also
accepts FuncItems.
|
"""Tests for stubs.
Verify that various things in stubs are consistent with how things behave at runtime.
"""
import argparse
import copy
import enum
import importlib
import inspect
import re
import sys
import types
import warnings
from functools import singledispatch
from pathlib import Path
from typing import Any, Dict, Generic, Iterator, List, Optional, Tuple, TypeVar, Union, cast
from typing_extensions import Type
import mypy.build
import mypy.modulefinder
import mypy.types
from mypy import nodes
from mypy.config_parser import parse_config_file
from mypy.options import Options
from mypy.util import FancyFormatter
class Missing:
"""Marker object for things that are missing (from a stub or the runtime)."""
def __repr__(self) -> str:
return "MISSING"
MISSING = Missing()
T = TypeVar("T")
if sys.version_info >= (3, 5, 3):
MaybeMissing = Union[T, Missing]
else:
# work around a bug in 3.5.2 and earlier's typing.py
class MaybeMissingMeta(type):
def __getitem__(self, arg: Any) -> Any:
return Union[arg, Missing]
class MaybeMissing(metaclass=MaybeMissingMeta): # type: ignore
pass
_formatter = FancyFormatter(sys.stdout, sys.stderr, False)
def _style(message: str, **kwargs: Any) -> str:
"""Wrapper around mypy.util for fancy formatting."""
kwargs.setdefault("color", "none")
return _formatter.style(message, **kwargs)
class Error:
def __init__(
self,
object_path: List[str],
message: str,
stub_object: MaybeMissing[nodes.Node],
runtime_object: MaybeMissing[Any],
*,
stub_desc: Optional[str] = None,
runtime_desc: Optional[str] = None
) -> None:
"""Represents an error found by stubtest.
:param object_path: Location of the object with the error,
e.g. ``["module", "Class", "method"]``
:param message: Error message
:param stub_object: The mypy node representing the stub
:param runtime_object: Actual object obtained from the runtime
:param stub_desc: Specialised description for the stub object, should you wish
:param runtime_desc: Specialised description for the runtime object, should you wish
"""
self.object_desc = ".".join(object_path)
self.message = message
self.stub_object = stub_object
self.runtime_object = runtime_object
self.stub_desc = stub_desc or str(getattr(stub_object, "type", stub_object))
self.runtime_desc = runtime_desc or str(runtime_object)
def is_missing_stub(self) -> bool:
"""Whether or not the error is for something missing from the stub."""
return isinstance(self.stub_object, Missing)
def is_positional_only_related(self) -> bool:
"""Whether or not the error is for something being (or not being) positional-only."""
# TODO: This is hacky, use error codes or something more resilient
return "leading double underscore" in self.message
def get_description(self, concise: bool = False) -> str:
"""Returns a description of the error.
:param concise: Whether to return a concise, one-line description
"""
if concise:
return _style(self.object_desc, bold=True) + " " + self.message
stub_line = None
stub_file = None # type: None
if not isinstance(self.stub_object, Missing):
stub_line = self.stub_object.line
# TODO: Find a way of getting the stub file
stub_loc_str = ""
if stub_line:
stub_loc_str += " at line {}".format(stub_line)
if stub_file:
stub_loc_str += " in file {}".format(Path(stub_file))
runtime_line = None
runtime_file = None
if not isinstance(self.runtime_object, Missing):
try:
runtime_line = inspect.getsourcelines(self.runtime_object)[1]
except (OSError, TypeError):
pass
try:
runtime_file = inspect.getsourcefile(self.runtime_object)
except TypeError:
pass
runtime_loc_str = ""
if runtime_line:
runtime_loc_str += " at line {}".format(runtime_line)
if runtime_file:
runtime_loc_str += " in file {}".format(Path(runtime_file))
output = [
_style("error: ", color="red", bold=True),
_style(self.object_desc, bold=True),
" ",
self.message,
"\n",
"Stub:",
_style(stub_loc_str, dim=True),
"\n",
_style(self.stub_desc + "\n", color="blue", dim=True),
"Runtime:",
_style(runtime_loc_str, dim=True),
"\n",
_style(self.runtime_desc + "\n", color="blue", dim=True),
]
return "".join(output)
def test_module(module_name: str) -> Iterator[Error]:
"""Tests a given module's stub against introspecting it at runtime.
Requires the stub to have been built already, accomplished by a call to ``build_stubs``.
:param module_name: The module to test
"""
stub = get_stub(module_name)
if stub is None:
yield Error([module_name], "failed to find stubs", MISSING, None)
return
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
runtime = importlib.import_module(module_name)
except Exception as e:
yield Error([module_name], "failed to import: {}".format(e), stub, MISSING)
return
with warnings.catch_warnings():
warnings.simplefilter("ignore")
yield from verify(stub, runtime, [module_name])
@singledispatch
def verify(
stub: nodes.Node, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
"""Entry point for comparing a stub to a runtime object.
We use single dispatch based on the type of ``stub``.
:param stub: The mypy node representing a part of the stub
:param runtime: The runtime object corresponding to ``stub``
"""
yield Error(object_path, "is an unknown mypy node", stub, runtime)
@verify.register(nodes.MypyFile)
def verify_mypyfile(
stub: nodes.MypyFile, runtime: MaybeMissing[types.ModuleType], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if not isinstance(runtime, types.ModuleType):
yield Error(object_path, "is not a module", stub, runtime)
return
# Check things in the stub that are public
to_check = set(
m
for m, o in stub.names.items()
if o.module_public and (not m.startswith("_") or hasattr(runtime, m))
)
runtime_public_contents = [
m
for m in dir(runtime)
if not m.startswith("_")
# Ensure that the object's module is `runtime`, e.g. so that we don't pick up reexported
# modules and infinitely recurse. Unfortunately, there's no way to detect an explicit
# reexport missing from the stubs (that isn't specified in __all__)
and getattr(getattr(runtime, m), "__module__", None) == runtime.__name__
]
# Check all things declared in module's __all__, falling back to runtime_public_contents
to_check.update(getattr(runtime, "__all__", runtime_public_contents))
to_check.difference_update({"__file__", "__doc__", "__name__", "__builtins__", "__package__"})
for entry in sorted(to_check):
yield from verify(
stub.names[entry].node if entry in stub.names else MISSING,
getattr(runtime, entry, MISSING),
object_path + [entry],
)
@verify.register(nodes.TypeInfo)
def verify_typeinfo(
stub: nodes.TypeInfo, runtime: MaybeMissing[Type[Any]], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime, stub_desc=repr(stub))
return
if not isinstance(runtime, type):
yield Error(object_path, "is not a type", stub, runtime, stub_desc=repr(stub))
return
# Check everything already defined in the stub
to_check = set(stub.names)
# There's a reasonable case to be made that we should always check all dunders, but it's
# currently quite noisy. We could turn this into a denylist instead of an allowlist.
to_check.update(
# cast to workaround mypyc complaints
m for m in cast(Any, vars)(runtime) if not m.startswith("_") or m in SPECIAL_DUNDERS
)
for entry in sorted(to_check):
mangled_entry = entry
if entry.startswith("__") and not entry.endswith("__"):
mangled_entry = "_{}{}".format(stub.name, entry)
yield from verify(
next((t.names[entry].node for t in stub.mro if entry in t.names), MISSING),
getattr(runtime, mangled_entry, MISSING),
object_path + [entry],
)
def _verify_static_class_methods(
stub: nodes.FuncBase, runtime: Any, object_path: List[str]
) -> Iterator[str]:
if stub.name in ("__new__", "__init_subclass__", "__class_getitem__"):
# Special cased by Python, so don't bother checking
return
if inspect.isbuiltin(runtime):
# The isinstance checks don't work reliably for builtins, e.g. datetime.datetime.now, so do
# something a little hacky that seems to work well
probably_class_method = isinstance(getattr(runtime, "__self__", None), type)
if probably_class_method and not stub.is_class:
yield "runtime is a classmethod but stub is not"
if not probably_class_method and stub.is_class:
yield "stub is a classmethod but runtime is not"
return
# Look the object up statically, to avoid binding by the descriptor protocol
static_runtime = importlib.import_module(object_path[0])
for entry in object_path[1:]:
try:
static_runtime = inspect.getattr_static(static_runtime, entry)
except AttributeError:
# This can happen with mangled names, ignore for now.
# TODO: pass more information about ancestors of nodes/objects to verify, so we don't
# have to do this hacky lookup. Would be useful in a couple other places too.
return
if isinstance(static_runtime, classmethod) and not stub.is_class:
yield "runtime is a classmethod but stub is not"
if not isinstance(static_runtime, classmethod) and stub.is_class:
yield "stub is a classmethod but runtime is not"
if isinstance(static_runtime, staticmethod) and not stub.is_static:
yield "runtime is a staticmethod but stub is not"
if not isinstance(static_runtime, staticmethod) and stub.is_static:
yield "stub is a staticmethod but runtime is not"
def _verify_arg_name(
stub_arg: nodes.Argument, runtime_arg: inspect.Parameter, function_name: str
) -> Iterator[str]:
"""Checks whether argument names match."""
# Ignore exact names for most dunder methods
if is_dunder(function_name, exclude_special=True):
return
def strip_prefix(s: str, prefix: str) -> str:
return s[len(prefix):] if s.startswith(prefix) else s
if strip_prefix(stub_arg.variable.name, "__") == runtime_arg.name:
return
def names_approx_match(a: str, b: str) -> bool:
a = a.strip("_")
b = b.strip("_")
return a.startswith(b) or b.startswith(a) or len(a) == 1 or len(b) == 1
# Be more permissive about names matching for positional-only arguments
if runtime_arg.kind == inspect.Parameter.POSITIONAL_ONLY and names_approx_match(
stub_arg.variable.name, runtime_arg.name
):
return
# This comes up with namedtuples, so ignore
if stub_arg.variable.name == "_self":
return
yield (
'stub argument "{}" differs from runtime argument "{}"'.format(
stub_arg.variable.name, runtime_arg.name
)
)
def _verify_arg_default_value(
stub_arg: nodes.Argument, runtime_arg: inspect.Parameter
) -> Iterator[str]:
"""Checks whether argument default values are compatible."""
if runtime_arg.default != inspect.Parameter.empty:
if stub_arg.kind not in (nodes.ARG_OPT, nodes.ARG_NAMED_OPT):
yield (
'runtime argument "{}" has a default value but stub argument does not'.format(
runtime_arg.name
)
)
else:
runtime_type = get_mypy_type_of_runtime_value(runtime_arg.default)
# Fallback to the type annotation type if var type is missing. The type annotation
# is an UnboundType, but I don't know enough to know what the pros and cons here are.
# UnboundTypes have ugly question marks following them, so default to var type.
# Note we do this same fallback when constructing signatures in from_overloadedfuncdef
stub_type = stub_arg.variable.type or stub_arg.type_annotation
if isinstance(stub_type, mypy.types.TypeVarType):
stub_type = stub_type.upper_bound
if (
runtime_type is not None
and stub_type is not None
# Avoid false positives for marker objects
and type(runtime_arg.default) != object
and not is_subtype_helper(runtime_type, stub_type)
):
yield (
'runtime argument "{}" has a default value of type {}, '
"which is incompatible with stub argument type {}".format(
runtime_arg.name, runtime_type, stub_type
)
)
else:
if stub_arg.kind in (nodes.ARG_OPT, nodes.ARG_NAMED_OPT):
yield (
'stub argument "{}" has a default value but runtime argument does not'.format(
stub_arg.variable.name
)
)
def maybe_strip_cls(name: str, args: List[nodes.Argument]) -> List[nodes.Argument]:
if name in ("__init_subclass__", "__class_getitem__"):
# These are implicitly classmethods. If the stub chooses not to have @classmethod, we
# should remove the cls argument
if args[0].variable.name == "cls":
return args[1:]
return args
class Signature(Generic[T]):
def __init__(self) -> None:
self.pos = [] # type: List[T]
self.kwonly = {} # type: Dict[str, T]
self.varpos = None # type: Optional[T]
self.varkw = None # type: Optional[T]
def __str__(self) -> str:
def get_name(arg: Any) -> str:
if isinstance(arg, inspect.Parameter):
return arg.name
if isinstance(arg, nodes.Argument):
return arg.variable.name
raise AssertionError
def get_type(arg: Any) -> Optional[str]:
if isinstance(arg, inspect.Parameter):
return None
if isinstance(arg, nodes.Argument):
return str(arg.variable.type or arg.type_annotation)
raise AssertionError
def has_default(arg: Any) -> bool:
if isinstance(arg, inspect.Parameter):
return arg.default != inspect.Parameter.empty
if isinstance(arg, nodes.Argument):
return arg.kind in (nodes.ARG_OPT, nodes.ARG_NAMED_OPT)
raise AssertionError
def get_desc(arg: Any) -> str:
arg_type = get_type(arg)
return (
get_name(arg)
+ (": {}".format(arg_type) if arg_type else "")
+ (" = ..." if has_default(arg) else "")
)
kw_only = sorted(self.kwonly.values(), key=lambda a: (has_default(a), get_name(a)))
ret = "def ("
ret += ", ".join(
[get_desc(arg) for arg in self.pos]
+ (["*" + get_name(self.varpos)] if self.varpos else (["*"] if self.kwonly else []))
+ [get_desc(arg) for arg in kw_only]
+ (["**" + get_name(self.varkw)] if self.varkw else [])
)
ret += ")"
return ret
@staticmethod
def from_funcitem(stub: nodes.FuncItem) -> "Signature[nodes.Argument]":
stub_sig = Signature() # type: Signature[nodes.Argument]
stub_args = maybe_strip_cls(stub.name, stub.arguments)
for stub_arg in stub_args:
if stub_arg.kind in (nodes.ARG_POS, nodes.ARG_OPT):
stub_sig.pos.append(stub_arg)
elif stub_arg.kind in (nodes.ARG_NAMED, nodes.ARG_NAMED_OPT):
stub_sig.kwonly[stub_arg.variable.name] = stub_arg
elif stub_arg.kind == nodes.ARG_STAR:
stub_sig.varpos = stub_arg
elif stub_arg.kind == nodes.ARG_STAR2:
stub_sig.varkw = stub_arg
else:
raise AssertionError
return stub_sig
@staticmethod
def from_inspect_signature(signature: inspect.Signature) -> "Signature[inspect.Parameter]":
runtime_sig = Signature() # type: Signature[inspect.Parameter]
for runtime_arg in signature.parameters.values():
if runtime_arg.kind in (
inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD,
):
runtime_sig.pos.append(runtime_arg)
elif runtime_arg.kind == inspect.Parameter.KEYWORD_ONLY:
runtime_sig.kwonly[runtime_arg.name] = runtime_arg
elif runtime_arg.kind == inspect.Parameter.VAR_POSITIONAL:
runtime_sig.varpos = runtime_arg
elif runtime_arg.kind == inspect.Parameter.VAR_KEYWORD:
runtime_sig.varkw = runtime_arg
else:
raise AssertionError
return runtime_sig
@staticmethod
def from_overloadedfuncdef(stub: nodes.OverloadedFuncDef) -> "Signature[nodes.Argument]":
"""Returns a Signature from an OverloadedFuncDef.
If life were simple, to verify_overloadedfuncdef, we'd just verify_funcitem for each of its
items. Unfortunately, life isn't simple and overloads are pretty deceitful. So instead, we
try and combine the overload's items into a single signature that is compatible with any
lies it might try to tell.
"""
# For most dunder methods, just assume all args are positional-only
assume_positional_only = is_dunder(stub.name, exclude_special=True)
all_args = {} # type: Dict[str, List[Tuple[nodes.Argument, int]]]
for func in map(_resolve_funcitem_from_decorator, stub.items):
assert func is not None
args = maybe_strip_cls(stub.name, func.arguments)
for index, arg in enumerate(args):
# For positional-only args, we allow overloads to have different names for the same
# argument. To accomplish this, we just make up a fake index-based name.
name = (
"__{}".format(index)
if arg.variable.name.startswith("__") or assume_positional_only
else arg.variable.name
)
all_args.setdefault(name, []).append((arg, index))
def get_position(arg_name: str) -> int:
# We just need this to return the positional args in the correct order.
return max(index for _, index in all_args[arg_name])
def get_type(arg_name: str) -> mypy.types.ProperType:
with mypy.state.strict_optional_set(True):
all_types = [
arg.variable.type or arg.type_annotation for arg, _ in all_args[arg_name]
]
return mypy.typeops.make_simplified_union([t for t in all_types if t])
def get_kind(arg_name: str) -> int:
kinds = {arg.kind for arg, _ in all_args[arg_name]}
if nodes.ARG_STAR in kinds:
return nodes.ARG_STAR
if nodes.ARG_STAR2 in kinds:
return nodes.ARG_STAR2
# The logic here is based on two tenets:
# 1) If an arg is ever optional (or unspecified), it is optional
# 2) If an arg is ever positional, it is positional
is_opt = (
len(all_args[arg_name]) < len(stub.items)
or nodes.ARG_OPT in kinds
or nodes.ARG_NAMED_OPT in kinds
)
is_pos = nodes.ARG_OPT in kinds or nodes.ARG_POS in kinds
if is_opt:
return nodes.ARG_OPT if is_pos else nodes.ARG_NAMED_OPT
return nodes.ARG_POS if is_pos else nodes.ARG_NAMED
sig = Signature() # type: Signature[nodes.Argument]
for arg_name in sorted(all_args, key=get_position):
# example_arg_name gives us a real name (in case we had a fake index-based name)
example_arg_name = all_args[arg_name][0][0].variable.name
arg = nodes.Argument(
nodes.Var(example_arg_name, get_type(arg_name)),
type_annotation=None,
initializer=None,
kind=get_kind(arg_name),
)
if arg.kind in (nodes.ARG_POS, nodes.ARG_OPT):
sig.pos.append(arg)
elif arg.kind in (nodes.ARG_NAMED, nodes.ARG_NAMED_OPT):
sig.kwonly[arg.variable.name] = arg
elif arg.kind == nodes.ARG_STAR:
sig.varpos = arg
elif arg.kind == nodes.ARG_STAR2:
sig.varkw = arg
else:
raise AssertionError
return sig
def _verify_signature(
stub: Signature[nodes.Argument], runtime: Signature[inspect.Parameter], function_name: str
) -> Iterator[str]:
# Check positional arguments match up
for stub_arg, runtime_arg in zip(stub.pos, runtime.pos):
yield from _verify_arg_name(stub_arg, runtime_arg, function_name)
yield from _verify_arg_default_value(stub_arg, runtime_arg)
if (
runtime_arg.kind == inspect.Parameter.POSITIONAL_ONLY
and not stub_arg.variable.name.startswith("__")
and not stub_arg.variable.name.strip("_") == "self"
and not is_dunder(function_name, exclude_special=True) # noisy for dunder methods
):
yield (
'stub argument "{}" should be positional-only '
'(rename with a leading double underscore, i.e. "__{}")'.format(
stub_arg.variable.name, runtime_arg.name
)
)
if (
runtime_arg.kind != inspect.Parameter.POSITIONAL_ONLY
and stub_arg.variable.name.startswith("__")
):
yield (
'stub argument "{}" should be positional or keyword '
"(remove leading double underscore)".format(stub_arg.variable.name)
)
# Check unmatched positional args
if len(stub.pos) > len(runtime.pos):
# There are cases where the stub exhaustively lists out the extra parameters the function
# would take through *args. Hence, a) we can't check that the runtime actually takes those
# parameters and b) below, we don't enforce that the stub takes *args, since runtime logic
# may prevent those arguments from actually being accepted.
if runtime.varpos is None:
for stub_arg in stub.pos[len(runtime.pos):]:
# If the variable is in runtime.kwonly, it's just mislabelled as not a
# keyword-only argument
if stub_arg.variable.name not in runtime.kwonly:
yield 'runtime does not have argument "{}"'.format(stub_arg.variable.name)
else:
yield 'stub argument "{}" is not keyword-only'.format(stub_arg.variable.name)
if stub.varpos is not None:
yield 'runtime does not have *args argument "{}"'.format(stub.varpos.variable.name)
elif len(stub.pos) < len(runtime.pos):
for runtime_arg in runtime.pos[len(stub.pos):]:
if runtime_arg.name not in stub.kwonly:
yield 'stub does not have argument "{}"'.format(runtime_arg.name)
else:
yield 'runtime argument "{}" is not keyword-only'.format(runtime_arg.name)
# Checks involving *args
if len(stub.pos) <= len(runtime.pos) or runtime.varpos is None:
if stub.varpos is None and runtime.varpos is not None:
yield 'stub does not have *args argument "{}"'.format(runtime.varpos.name)
if stub.varpos is not None and runtime.varpos is None:
yield 'runtime does not have *args argument "{}"'.format(stub.varpos.variable.name)
# Check keyword-only args
for arg in sorted(set(stub.kwonly) & set(runtime.kwonly)):
stub_arg, runtime_arg = stub.kwonly[arg], runtime.kwonly[arg]
yield from _verify_arg_name(stub_arg, runtime_arg, function_name)
yield from _verify_arg_default_value(stub_arg, runtime_arg)
# Check unmatched keyword-only args
if runtime.varkw is None or not set(runtime.kwonly).issubset(set(stub.kwonly)):
# There are cases where the stub exhaustively lists out the extra parameters the function
# would take through *kwargs. Hence, a) we only check if the runtime actually takes those
# parameters when the above condition holds and b) below, we don't enforce that the stub
# takes *kwargs, since runtime logic may prevent additional arguments from actually being
# accepted.
for arg in sorted(set(stub.kwonly) - set(runtime.kwonly)):
yield 'runtime does not have argument "{}"'.format(arg)
for arg in sorted(set(runtime.kwonly) - set(stub.kwonly)):
if arg in set(stub_arg.variable.name for stub_arg in stub.pos):
# Don't report this if we've reported it before
if len(stub.pos) > len(runtime.pos) and runtime.varpos is not None:
yield 'stub argument "{}" is not keyword-only'.format(arg)
else:
yield 'stub does not have argument "{}"'.format(arg)
# Checks involving **kwargs
if stub.varkw is None and runtime.varkw is not None:
# As mentioned above, don't enforce that the stub takes **kwargs.
# Also check against positional parameters, to avoid a nitpicky message when an argument
# isn't marked as keyword-only
stub_pos_names = set(stub_arg.variable.name for stub_arg in stub.pos)
# Ideally we'd do a strict subset check, but in practice the errors from that aren't useful
if not set(runtime.kwonly).issubset(set(stub.kwonly) | stub_pos_names):
yield 'stub does not have **kwargs argument "{}"'.format(runtime.varkw.name)
if stub.varkw is not None and runtime.varkw is None:
yield 'runtime does not have **kwargs argument "{}"'.format(stub.varkw.variable.name)
@verify.register(nodes.FuncItem)
def verify_funcitem(
stub: nodes.FuncItem, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if (
not isinstance(runtime, (types.FunctionType, types.BuiltinFunctionType))
and not isinstance(runtime, (types.MethodType, types.BuiltinMethodType))
and not inspect.ismethoddescriptor(runtime)
):
yield Error(object_path, "is not a function", stub, runtime)
if not callable(runtime):
return
for message in _verify_static_class_methods(stub, runtime, object_path):
yield Error(object_path, "is inconsistent, " + message, stub, runtime)
try:
signature = inspect.signature(runtime)
except (ValueError, RuntimeError):
# inspect.signature throws sometimes
# catch RuntimeError because of https://bugs.python.org/issue39504
return
stub_sig = Signature.from_funcitem(stub)
runtime_sig = Signature.from_inspect_signature(signature)
for message in _verify_signature(stub_sig, runtime_sig, function_name=stub.name):
yield Error(
object_path,
"is inconsistent, " + message,
stub,
runtime,
runtime_desc="def " + str(signature),
)
@verify.register(Missing)
def verify_none(
stub: Missing, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
yield Error(object_path, "is not present in stub", stub, runtime)
@verify.register(nodes.Var)
def verify_var(
stub: nodes.Var, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
# Don't always yield an error here, because we often can't find instance variables
if len(object_path) <= 2:
yield Error(object_path, "is not present at runtime", stub, runtime)
return
runtime_type = get_mypy_type_of_runtime_value(runtime)
if (
runtime_type is not None
and stub.type is not None
and not is_subtype_helper(runtime_type, stub.type)
):
should_error = True
# Avoid errors when defining enums, since runtime_type is the enum itself, but we'd
# annotate it with the type of runtime.value
if isinstance(runtime, enum.Enum):
runtime_type = get_mypy_type_of_runtime_value(runtime.value)
if runtime_type is not None and is_subtype_helper(runtime_type, stub.type):
should_error = False
if should_error:
yield Error(
object_path,
"variable differs from runtime type {}".format(runtime_type),
stub,
runtime,
)
@verify.register(nodes.OverloadedFuncDef)
def verify_overloadedfuncdef(
stub: nodes.OverloadedFuncDef, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if stub.is_property:
# We get here in cases of overloads from property.setter
return
if (
not isinstance(runtime, (types.FunctionType, types.BuiltinFunctionType))
and not isinstance(runtime, (types.MethodType, types.BuiltinMethodType))
and not inspect.ismethoddescriptor(runtime)
):
yield Error(object_path, "is not a function", stub, runtime)
if not callable(runtime):
return
for message in _verify_static_class_methods(stub, runtime, object_path):
yield Error(object_path, "is inconsistent, " + message, stub, runtime)
try:
signature = inspect.signature(runtime)
except ValueError:
return
stub_sig = Signature.from_overloadedfuncdef(stub)
runtime_sig = Signature.from_inspect_signature(signature)
for message in _verify_signature(stub_sig, runtime_sig, function_name=stub.name):
# TODO: This is a little hacky, but the addition here is super useful
if "has a default value of type" in message:
message += (
". This is often caused by overloads failing to account for explicitly passing "
"in the default value."
)
yield Error(
object_path,
"is inconsistent, " + message,
stub,
runtime,
stub_desc=str(stub.type) + "\nInferred signature: {}".format(stub_sig),
runtime_desc="def " + str(signature),
)
@verify.register(nodes.TypeVarExpr)
def verify_typevarexpr(
stub: nodes.TypeVarExpr, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if False:
yield None
def _verify_property(stub: nodes.Decorator, runtime: Any) -> Iterator[str]:
assert stub.func.is_property
if isinstance(runtime, property):
return
if inspect.isdatadescriptor(runtime):
# It's enough like a property...
return
# Sometimes attributes pretend to be properties, for instance, to express that they
# are read only. So allowlist if runtime_type matches the return type of stub.
runtime_type = get_mypy_type_of_runtime_value(runtime)
func_type = (
stub.func.type.ret_type if isinstance(stub.func.type, mypy.types.CallableType) else None
)
if (
runtime_type is not None
and func_type is not None
and is_subtype_helper(runtime_type, func_type)
):
return
yield "is inconsistent, cannot reconcile @property on stub with runtime object"
# MASKED: _resolve_funcitem_from_decorator function (lines 806-848)
@verify.register(nodes.Decorator)
def verify_decorator(
stub: nodes.Decorator, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if stub.func.is_property:
for message in _verify_property(stub, runtime):
yield Error(object_path, message, stub, runtime)
return
func = _resolve_funcitem_from_decorator(stub)
if func is not None:
yield from verify(func, runtime, object_path)
@verify.register(nodes.TypeAlias)
def verify_typealias(
stub: nodes.TypeAlias, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if False:
yield None
SPECIAL_DUNDERS = ("__init__", "__new__", "__call__", "__init_subclass__", "__class_getitem__")
def is_dunder(name: str, exclude_special: bool = False) -> bool:
"""Returns whether name is a dunder name.
:param exclude_special: Whether to return False for a couple special dunder methods.
"""
if exclude_special and name in SPECIAL_DUNDERS:
return False
return name.startswith("__") and name.endswith("__")
def is_subtype_helper(left: mypy.types.Type, right: mypy.types.Type) -> bool:
"""Checks whether ``left`` is a subtype of ``right``."""
left = mypy.types.get_proper_type(left)
right = mypy.types.get_proper_type(right)
if (
isinstance(left, mypy.types.LiteralType)
and isinstance(left.value, int)
and left.value in (0, 1)
and isinstance(right, mypy.types.Instance)
and right.type.fullname == "builtins.bool"
):
# Pretend Literal[0, 1] is a subtype of bool to avoid unhelpful errors.
return True
with mypy.state.strict_optional_set(True):
return mypy.subtypes.is_subtype(left, right)
def get_mypy_type_of_runtime_value(runtime: Any) -> Optional[mypy.types.Type]:
"""Returns a mypy type object representing the type of ``runtime``.
Returns None if we can't find something that works.
"""
if runtime is None:
return mypy.types.NoneType()
if isinstance(runtime, property):
# Give up on properties to avoid issues with things that are typed as attributes.
return None
def anytype() -> mypy.types.AnyType:
return mypy.types.AnyType(mypy.types.TypeOfAny.unannotated)
if isinstance(
runtime,
(types.FunctionType, types.BuiltinFunctionType,
types.MethodType, types.BuiltinMethodType)
):
builtins = get_stub("builtins")
assert builtins is not None
type_info = builtins.names["function"].node
assert isinstance(type_info, nodes.TypeInfo)
fallback = mypy.types.Instance(type_info, [anytype()])
try:
signature = inspect.signature(runtime)
arg_types = []
arg_kinds = []
arg_names = []
for arg in signature.parameters.values():
arg_types.append(anytype())
arg_names.append(
None if arg.kind == inspect.Parameter.POSITIONAL_ONLY else arg.name
)
has_default = arg.default == inspect.Parameter.empty
if arg.kind == inspect.Parameter.POSITIONAL_ONLY:
arg_kinds.append(nodes.ARG_POS if has_default else nodes.ARG_OPT)
elif arg.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:
arg_kinds.append(nodes.ARG_POS if has_default else nodes.ARG_OPT)
elif arg.kind == inspect.Parameter.KEYWORD_ONLY:
arg_kinds.append(nodes.ARG_NAMED if has_default else nodes.ARG_NAMED_OPT)
elif arg.kind == inspect.Parameter.VAR_POSITIONAL:
arg_kinds.append(nodes.ARG_STAR)
elif arg.kind == inspect.Parameter.VAR_KEYWORD:
arg_kinds.append(nodes.ARG_STAR2)
else:
raise AssertionError
except ValueError:
arg_types = [anytype(), anytype()]
arg_kinds = [nodes.ARG_STAR, nodes.ARG_STAR2]
arg_names = [None, None]
return mypy.types.CallableType(
arg_types,
arg_kinds,
arg_names,
ret_type=anytype(),
fallback=fallback,
is_ellipsis_args=True,
)
# Try and look up a stub for the runtime object
stub = get_stub(type(runtime).__module__)
if stub is None:
return None
type_name = type(runtime).__name__
if type_name not in stub.names:
return None
type_info = stub.names[type_name].node
if isinstance(type_info, nodes.Var):
return type_info.type
if not isinstance(type_info, nodes.TypeInfo):
return None
if isinstance(runtime, tuple):
# Special case tuples so we construct a valid mypy.types.TupleType
optional_items = [get_mypy_type_of_runtime_value(v) for v in runtime]
items = [(i if i is not None else anytype()) for i in optional_items]
fallback = mypy.types.Instance(type_info, [anytype()])
return mypy.types.TupleType(items, fallback)
fallback = mypy.types.Instance(type_info, [anytype() for _ in type_info.type_vars])
try:
# Literals are supposed to be only bool, int, str, bytes or enums, but this seems to work
# well (when not using mypyc, for which bytes and enums are also problematic).
return mypy.types.LiteralType(
value=runtime,
fallback=fallback,
)
except TypeError:
# Ask for forgiveness if we're using mypyc.
return fallback
_all_stubs = {} # type: Dict[str, nodes.MypyFile]
def build_stubs(modules: List[str], options: Options, find_submodules: bool = False) -> List[str]:
"""Uses mypy to construct stub objects for the given modules.
This sets global state that ``get_stub`` can access.
Returns all modules we might want to check. If ``find_submodules`` is False, this is equal
to ``modules``.
:param modules: List of modules to build stubs for.
:param options: Mypy options for finding and building stubs.
:param find_submodules: Whether to attempt to find submodules of the given modules as well.
"""
data_dir = mypy.build.default_data_dir()
search_path = mypy.modulefinder.compute_search_paths([], options, data_dir)
find_module_cache = mypy.modulefinder.FindModuleCache(
search_path, fscache=None, options=options
)
all_modules = []
sources = []
for module in modules:
all_modules.append(module)
if not find_submodules:
module_path = find_module_cache.find_module(module)
if not isinstance(module_path, str):
# test_module will yield an error later when it can't find stubs
continue
sources.append(mypy.modulefinder.BuildSource(module_path, module, None))
else:
found_sources = find_module_cache.find_modules_recursive(module)
sources.extend(found_sources)
all_modules.extend(s.module for s in found_sources if s.module not in all_modules)
try:
res = mypy.build.build(sources=sources, options=options)
except mypy.errors.CompileError as e:
output = [
_style("error: ", color="red", bold=True),
"not checking stubs due to failed mypy compile:\n",
str(e),
]
print("".join(output))
raise RuntimeError from e
if res.errors:
output = [
_style("error: ", color="red", bold=True),
"not checking stubs due to mypy build errors:\n",
]
print("".join(output) + "\n".join(res.errors))
raise RuntimeError
global _all_stubs
_all_stubs = res.files
return all_modules
def get_stub(module: str) -> Optional[nodes.MypyFile]:
"""Returns a stub object for the given module, if we've built one."""
return _all_stubs.get(module)
def get_typeshed_stdlib_modules(custom_typeshed_dir: Optional[str]) -> List[str]:
"""Returns a list of stdlib modules in typeshed (for current Python version)."""
stdlib_py_versions = mypy.modulefinder.load_stdlib_py_versions(custom_typeshed_dir)
packages = set()
# Typeshed doesn't cover Python 3.5.
if sys.version_info < (3, 6):
version_info = (3, 6)
else:
version_info = sys.version_info[0:2]
for module, versions in stdlib_py_versions.items():
minver, maxver = versions
if version_info >= minver and (maxver is None or version_info <= maxver):
packages.add(module)
if custom_typeshed_dir:
typeshed_dir = Path(custom_typeshed_dir)
else:
typeshed_dir = Path(mypy.build.default_data_dir()) / "typeshed"
stdlib_dir = typeshed_dir / "stdlib"
modules = []
for path in stdlib_dir.rglob("*.pyi"):
if path.stem == "__init__":
path = path.parent
module = ".".join(path.relative_to(stdlib_dir).parts[:-1] + (path.stem,))
if module.split(".")[0] in packages:
modules.append(module)
return sorted(modules)
def get_allowlist_entries(allowlist_file: str) -> Iterator[str]:
def strip_comments(s: str) -> str:
try:
return s[: s.index("#")].strip()
except ValueError:
return s.strip()
with open(allowlist_file) as f:
for line in f.readlines():
entry = strip_comments(line)
if entry:
yield entry
def test_stubs(args: argparse.Namespace, use_builtins_fixtures: bool = False) -> int:
"""This is stubtest! It's time to test the stubs!"""
# Load the allowlist. This is a series of strings corresponding to Error.object_desc
# Values in the dict will store whether we used the allowlist entry or not.
allowlist = {
entry: False
for allowlist_file in args.allowlist
for entry in get_allowlist_entries(allowlist_file)
}
allowlist_regexes = {entry: re.compile(entry) for entry in allowlist}
# If we need to generate an allowlist, we store Error.object_desc for each error here.
generated_allowlist = set()
modules = args.modules
if args.check_typeshed:
assert not args.modules, "Cannot pass both --check-typeshed and a list of modules"
modules = get_typeshed_stdlib_modules(args.custom_typeshed_dir)
annoying_modules = {"antigravity", "this"}
modules = [m for m in modules if m not in annoying_modules]
assert modules, "No modules to check"
options = Options()
options.incremental = False
options.custom_typeshed_dir = args.custom_typeshed_dir
options.config_file = args.mypy_config_file
options.use_builtins_fixtures = use_builtins_fixtures
if options.config_file:
def set_strict_flags() -> None: # not needed yet
return
parse_config_file(options, set_strict_flags, options.config_file, sys.stdout, sys.stderr)
try:
modules = build_stubs(modules, options, find_submodules=not args.check_typeshed)
except RuntimeError:
return 1
exit_code = 0
for module in modules:
for error in test_module(module):
# Filter errors
if args.ignore_missing_stub and error.is_missing_stub():
continue
if args.ignore_positional_only and error.is_positional_only_related():
continue
if error.object_desc in allowlist:
allowlist[error.object_desc] = True
continue
is_allowlisted = False
for w in allowlist:
if allowlist_regexes[w].fullmatch(error.object_desc):
allowlist[w] = True
is_allowlisted = True
break
if is_allowlisted:
continue
# We have errors, so change exit code, and output whatever necessary
exit_code = 1
if args.generate_allowlist:
generated_allowlist.add(error.object_desc)
continue
print(error.get_description(concise=args.concise))
# Print unused allowlist entries
if not args.ignore_unused_allowlist:
for w in allowlist:
# Don't consider an entry unused if it regex-matches the empty string
# This lets us allowlist errors that don't manifest at all on some systems
if not allowlist[w] and not allowlist_regexes[w].fullmatch(""):
exit_code = 1
print("note: unused allowlist entry {}".format(w))
# Print the generated allowlist
if args.generate_allowlist:
for e in sorted(generated_allowlist):
print(e)
exit_code = 0
return exit_code
def parse_options(args: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Compares stubs to objects introspected from the runtime."
)
parser.add_argument("modules", nargs="*", help="Modules to test")
parser.add_argument("--concise", action="store_true", help="Make output concise")
parser.add_argument(
"--ignore-missing-stub",
action="store_true",
help="Ignore errors for stub missing things that are present at runtime",
)
parser.add_argument(
"--ignore-positional-only",
action="store_true",
help="Ignore errors for whether an argument should or shouldn't be positional-only",
)
parser.add_argument(
"--custom-typeshed-dir", metavar="DIR", help="Use the custom typeshed in DIR"
)
parser.add_argument(
"--check-typeshed", action="store_true", help="Check all stdlib modules in typeshed"
)
parser.add_argument(
"--allowlist",
"--whitelist",
action="append",
metavar="FILE",
default=[],
help=(
"Use file as an allowlist. Can be passed multiple times to combine multiple "
"allowlists. Allowlists can be created with --generate-allowlist"
),
)
parser.add_argument(
"--generate-allowlist",
"--generate-whitelist",
action="store_true",
help="Print an allowlist (to stdout) to be used with --allowlist",
)
parser.add_argument(
"--ignore-unused-allowlist",
"--ignore-unused-whitelist",
action="store_true",
help="Ignore unused allowlist entries",
)
config_group = parser.add_argument_group(
title='mypy config file',
description="Use a config file instead of command line arguments. "
"Plugins and mypy path are the only supported "
"configurations.",
)
config_group.add_argument(
'--mypy-config-file',
help=(
"An existing mypy configuration file, currently used by stubtest to help "
"determine mypy path and plugins"
),
)
return parser.parse_args(args)
def main() -> int:
mypy.util.check_python_version("stubtest")
return test_stubs(parse_options(sys.argv[1:]))
if __name__ == "__main__":
sys.exit(main())
|
def _resolve_funcitem_from_decorator(dec: nodes.OverloadPart) -> Optional[nodes.FuncItem]:
"""Returns a FuncItem that corresponds to the output of the decorator.
Returns None if we can't figure out what that would be. For convenience, this function also
accepts FuncItems.
"""
if isinstance(dec, nodes.FuncItem):
return dec
if dec.func.is_property:
return None
def apply_decorator_to_funcitem(
decorator: nodes.Expression, func: nodes.FuncItem
) -> Optional[nodes.FuncItem]:
if not isinstance(decorator, nodes.RefExpr):
return None
if decorator.fullname is None:
# Happens with namedtuple
return None
if decorator.fullname in (
"builtins.staticmethod",
"typing.overload",
"abc.abstractmethod",
):
return func
if decorator.fullname == "builtins.classmethod":
assert func.arguments[0].variable.name in ("cls", "metacls")
ret = copy.copy(func)
# Remove the cls argument, since it's not present in inspect.signature of classmethods
ret.arguments = ret.arguments[1:]
return ret
# Just give up on any other decorators. After excluding properties, we don't run into
# anything else when running on typeshed's stdlib.
return None
func = dec.func # type: nodes.FuncItem
for decorator in dec.original_decorators:
resulting_func = apply_decorator_to_funcitem(decorator, func)
if resulting_func is None:
return None
func = resulting_func
return func
| 806 | 848 |
"""Tests for stubs.
Verify that various things in stubs are consistent with how things behave at runtime.
"""
import argparse
import copy
import enum
import importlib
import inspect
import re
import sys
import types
import warnings
from functools import singledispatch
from pathlib import Path
from typing import Any, Dict, Generic, Iterator, List, Optional, Tuple, TypeVar, Union, cast
from typing_extensions import Type
import mypy.build
import mypy.modulefinder
import mypy.types
from mypy import nodes
from mypy.config_parser import parse_config_file
from mypy.options import Options
from mypy.util import FancyFormatter
class Missing:
"""Marker object for things that are missing (from a stub or the runtime)."""
def __repr__(self) -> str:
return "MISSING"
MISSING = Missing()
T = TypeVar("T")
if sys.version_info >= (3, 5, 3):
MaybeMissing = Union[T, Missing]
else:
# work around a bug in 3.5.2 and earlier's typing.py
class MaybeMissingMeta(type):
def __getitem__(self, arg: Any) -> Any:
return Union[arg, Missing]
class MaybeMissing(metaclass=MaybeMissingMeta): # type: ignore
pass
_formatter = FancyFormatter(sys.stdout, sys.stderr, False)
def _style(message: str, **kwargs: Any) -> str:
"""Wrapper around mypy.util for fancy formatting."""
kwargs.setdefault("color", "none")
return _formatter.style(message, **kwargs)
class Error:
def __init__(
self,
object_path: List[str],
message: str,
stub_object: MaybeMissing[nodes.Node],
runtime_object: MaybeMissing[Any],
*,
stub_desc: Optional[str] = None,
runtime_desc: Optional[str] = None
) -> None:
"""Represents an error found by stubtest.
:param object_path: Location of the object with the error,
e.g. ``["module", "Class", "method"]``
:param message: Error message
:param stub_object: The mypy node representing the stub
:param runtime_object: Actual object obtained from the runtime
:param stub_desc: Specialised description for the stub object, should you wish
:param runtime_desc: Specialised description for the runtime object, should you wish
"""
self.object_desc = ".".join(object_path)
self.message = message
self.stub_object = stub_object
self.runtime_object = runtime_object
self.stub_desc = stub_desc or str(getattr(stub_object, "type", stub_object))
self.runtime_desc = runtime_desc or str(runtime_object)
def is_missing_stub(self) -> bool:
"""Whether or not the error is for something missing from the stub."""
return isinstance(self.stub_object, Missing)
def is_positional_only_related(self) -> bool:
"""Whether or not the error is for something being (or not being) positional-only."""
# TODO: This is hacky, use error codes or something more resilient
return "leading double underscore" in self.message
def get_description(self, concise: bool = False) -> str:
"""Returns a description of the error.
:param concise: Whether to return a concise, one-line description
"""
if concise:
return _style(self.object_desc, bold=True) + " " + self.message
stub_line = None
stub_file = None # type: None
if not isinstance(self.stub_object, Missing):
stub_line = self.stub_object.line
# TODO: Find a way of getting the stub file
stub_loc_str = ""
if stub_line:
stub_loc_str += " at line {}".format(stub_line)
if stub_file:
stub_loc_str += " in file {}".format(Path(stub_file))
runtime_line = None
runtime_file = None
if not isinstance(self.runtime_object, Missing):
try:
runtime_line = inspect.getsourcelines(self.runtime_object)[1]
except (OSError, TypeError):
pass
try:
runtime_file = inspect.getsourcefile(self.runtime_object)
except TypeError:
pass
runtime_loc_str = ""
if runtime_line:
runtime_loc_str += " at line {}".format(runtime_line)
if runtime_file:
runtime_loc_str += " in file {}".format(Path(runtime_file))
output = [
_style("error: ", color="red", bold=True),
_style(self.object_desc, bold=True),
" ",
self.message,
"\n",
"Stub:",
_style(stub_loc_str, dim=True),
"\n",
_style(self.stub_desc + "\n", color="blue", dim=True),
"Runtime:",
_style(runtime_loc_str, dim=True),
"\n",
_style(self.runtime_desc + "\n", color="blue", dim=True),
]
return "".join(output)
def test_module(module_name: str) -> Iterator[Error]:
"""Tests a given module's stub against introspecting it at runtime.
Requires the stub to have been built already, accomplished by a call to ``build_stubs``.
:param module_name: The module to test
"""
stub = get_stub(module_name)
if stub is None:
yield Error([module_name], "failed to find stubs", MISSING, None)
return
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
runtime = importlib.import_module(module_name)
except Exception as e:
yield Error([module_name], "failed to import: {}".format(e), stub, MISSING)
return
with warnings.catch_warnings():
warnings.simplefilter("ignore")
yield from verify(stub, runtime, [module_name])
@singledispatch
def verify(
stub: nodes.Node, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
"""Entry point for comparing a stub to a runtime object.
We use single dispatch based on the type of ``stub``.
:param stub: The mypy node representing a part of the stub
:param runtime: The runtime object corresponding to ``stub``
"""
yield Error(object_path, "is an unknown mypy node", stub, runtime)
@verify.register(nodes.MypyFile)
def verify_mypyfile(
stub: nodes.MypyFile, runtime: MaybeMissing[types.ModuleType], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if not isinstance(runtime, types.ModuleType):
yield Error(object_path, "is not a module", stub, runtime)
return
# Check things in the stub that are public
to_check = set(
m
for m, o in stub.names.items()
if o.module_public and (not m.startswith("_") or hasattr(runtime, m))
)
runtime_public_contents = [
m
for m in dir(runtime)
if not m.startswith("_")
# Ensure that the object's module is `runtime`, e.g. so that we don't pick up reexported
# modules and infinitely recurse. Unfortunately, there's no way to detect an explicit
# reexport missing from the stubs (that isn't specified in __all__)
and getattr(getattr(runtime, m), "__module__", None) == runtime.__name__
]
# Check all things declared in module's __all__, falling back to runtime_public_contents
to_check.update(getattr(runtime, "__all__", runtime_public_contents))
to_check.difference_update({"__file__", "__doc__", "__name__", "__builtins__", "__package__"})
for entry in sorted(to_check):
yield from verify(
stub.names[entry].node if entry in stub.names else MISSING,
getattr(runtime, entry, MISSING),
object_path + [entry],
)
@verify.register(nodes.TypeInfo)
def verify_typeinfo(
stub: nodes.TypeInfo, runtime: MaybeMissing[Type[Any]], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime, stub_desc=repr(stub))
return
if not isinstance(runtime, type):
yield Error(object_path, "is not a type", stub, runtime, stub_desc=repr(stub))
return
# Check everything already defined in the stub
to_check = set(stub.names)
# There's a reasonable case to be made that we should always check all dunders, but it's
# currently quite noisy. We could turn this into a denylist instead of an allowlist.
to_check.update(
# cast to workaround mypyc complaints
m for m in cast(Any, vars)(runtime) if not m.startswith("_") or m in SPECIAL_DUNDERS
)
for entry in sorted(to_check):
mangled_entry = entry
if entry.startswith("__") and not entry.endswith("__"):
mangled_entry = "_{}{}".format(stub.name, entry)
yield from verify(
next((t.names[entry].node for t in stub.mro if entry in t.names), MISSING),
getattr(runtime, mangled_entry, MISSING),
object_path + [entry],
)
def _verify_static_class_methods(
stub: nodes.FuncBase, runtime: Any, object_path: List[str]
) -> Iterator[str]:
if stub.name in ("__new__", "__init_subclass__", "__class_getitem__"):
# Special cased by Python, so don't bother checking
return
if inspect.isbuiltin(runtime):
# The isinstance checks don't work reliably for builtins, e.g. datetime.datetime.now, so do
# something a little hacky that seems to work well
probably_class_method = isinstance(getattr(runtime, "__self__", None), type)
if probably_class_method and not stub.is_class:
yield "runtime is a classmethod but stub is not"
if not probably_class_method and stub.is_class:
yield "stub is a classmethod but runtime is not"
return
# Look the object up statically, to avoid binding by the descriptor protocol
static_runtime = importlib.import_module(object_path[0])
for entry in object_path[1:]:
try:
static_runtime = inspect.getattr_static(static_runtime, entry)
except AttributeError:
# This can happen with mangled names, ignore for now.
# TODO: pass more information about ancestors of nodes/objects to verify, so we don't
# have to do this hacky lookup. Would be useful in a couple other places too.
return
if isinstance(static_runtime, classmethod) and not stub.is_class:
yield "runtime is a classmethod but stub is not"
if not isinstance(static_runtime, classmethod) and stub.is_class:
yield "stub is a classmethod but runtime is not"
if isinstance(static_runtime, staticmethod) and not stub.is_static:
yield "runtime is a staticmethod but stub is not"
if not isinstance(static_runtime, staticmethod) and stub.is_static:
yield "stub is a staticmethod but runtime is not"
def _verify_arg_name(
stub_arg: nodes.Argument, runtime_arg: inspect.Parameter, function_name: str
) -> Iterator[str]:
"""Checks whether argument names match."""
# Ignore exact names for most dunder methods
if is_dunder(function_name, exclude_special=True):
return
def strip_prefix(s: str, prefix: str) -> str:
return s[len(prefix):] if s.startswith(prefix) else s
if strip_prefix(stub_arg.variable.name, "__") == runtime_arg.name:
return
def names_approx_match(a: str, b: str) -> bool:
a = a.strip("_")
b = b.strip("_")
return a.startswith(b) or b.startswith(a) or len(a) == 1 or len(b) == 1
# Be more permissive about names matching for positional-only arguments
if runtime_arg.kind == inspect.Parameter.POSITIONAL_ONLY and names_approx_match(
stub_arg.variable.name, runtime_arg.name
):
return
# This comes up with namedtuples, so ignore
if stub_arg.variable.name == "_self":
return
yield (
'stub argument "{}" differs from runtime argument "{}"'.format(
stub_arg.variable.name, runtime_arg.name
)
)
def _verify_arg_default_value(
stub_arg: nodes.Argument, runtime_arg: inspect.Parameter
) -> Iterator[str]:
"""Checks whether argument default values are compatible."""
if runtime_arg.default != inspect.Parameter.empty:
if stub_arg.kind not in (nodes.ARG_OPT, nodes.ARG_NAMED_OPT):
yield (
'runtime argument "{}" has a default value but stub argument does not'.format(
runtime_arg.name
)
)
else:
runtime_type = get_mypy_type_of_runtime_value(runtime_arg.default)
# Fallback to the type annotation type if var type is missing. The type annotation
# is an UnboundType, but I don't know enough to know what the pros and cons here are.
# UnboundTypes have ugly question marks following them, so default to var type.
# Note we do this same fallback when constructing signatures in from_overloadedfuncdef
stub_type = stub_arg.variable.type or stub_arg.type_annotation
if isinstance(stub_type, mypy.types.TypeVarType):
stub_type = stub_type.upper_bound
if (
runtime_type is not None
and stub_type is not None
# Avoid false positives for marker objects
and type(runtime_arg.default) != object
and not is_subtype_helper(runtime_type, stub_type)
):
yield (
'runtime argument "{}" has a default value of type {}, '
"which is incompatible with stub argument type {}".format(
runtime_arg.name, runtime_type, stub_type
)
)
else:
if stub_arg.kind in (nodes.ARG_OPT, nodes.ARG_NAMED_OPT):
yield (
'stub argument "{}" has a default value but runtime argument does not'.format(
stub_arg.variable.name
)
)
def maybe_strip_cls(name: str, args: List[nodes.Argument]) -> List[nodes.Argument]:
if name in ("__init_subclass__", "__class_getitem__"):
# These are implicitly classmethods. If the stub chooses not to have @classmethod, we
# should remove the cls argument
if args[0].variable.name == "cls":
return args[1:]
return args
class Signature(Generic[T]):
def __init__(self) -> None:
self.pos = [] # type: List[T]
self.kwonly = {} # type: Dict[str, T]
self.varpos = None # type: Optional[T]
self.varkw = None # type: Optional[T]
def __str__(self) -> str:
def get_name(arg: Any) -> str:
if isinstance(arg, inspect.Parameter):
return arg.name
if isinstance(arg, nodes.Argument):
return arg.variable.name
raise AssertionError
def get_type(arg: Any) -> Optional[str]:
if isinstance(arg, inspect.Parameter):
return None
if isinstance(arg, nodes.Argument):
return str(arg.variable.type or arg.type_annotation)
raise AssertionError
def has_default(arg: Any) -> bool:
if isinstance(arg, inspect.Parameter):
return arg.default != inspect.Parameter.empty
if isinstance(arg, nodes.Argument):
return arg.kind in (nodes.ARG_OPT, nodes.ARG_NAMED_OPT)
raise AssertionError
def get_desc(arg: Any) -> str:
arg_type = get_type(arg)
return (
get_name(arg)
+ (": {}".format(arg_type) if arg_type else "")
+ (" = ..." if has_default(arg) else "")
)
kw_only = sorted(self.kwonly.values(), key=lambda a: (has_default(a), get_name(a)))
ret = "def ("
ret += ", ".join(
[get_desc(arg) for arg in self.pos]
+ (["*" + get_name(self.varpos)] if self.varpos else (["*"] if self.kwonly else []))
+ [get_desc(arg) for arg in kw_only]
+ (["**" + get_name(self.varkw)] if self.varkw else [])
)
ret += ")"
return ret
@staticmethod
def from_funcitem(stub: nodes.FuncItem) -> "Signature[nodes.Argument]":
stub_sig = Signature() # type: Signature[nodes.Argument]
stub_args = maybe_strip_cls(stub.name, stub.arguments)
for stub_arg in stub_args:
if stub_arg.kind in (nodes.ARG_POS, nodes.ARG_OPT):
stub_sig.pos.append(stub_arg)
elif stub_arg.kind in (nodes.ARG_NAMED, nodes.ARG_NAMED_OPT):
stub_sig.kwonly[stub_arg.variable.name] = stub_arg
elif stub_arg.kind == nodes.ARG_STAR:
stub_sig.varpos = stub_arg
elif stub_arg.kind == nodes.ARG_STAR2:
stub_sig.varkw = stub_arg
else:
raise AssertionError
return stub_sig
@staticmethod
def from_inspect_signature(signature: inspect.Signature) -> "Signature[inspect.Parameter]":
runtime_sig = Signature() # type: Signature[inspect.Parameter]
for runtime_arg in signature.parameters.values():
if runtime_arg.kind in (
inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD,
):
runtime_sig.pos.append(runtime_arg)
elif runtime_arg.kind == inspect.Parameter.KEYWORD_ONLY:
runtime_sig.kwonly[runtime_arg.name] = runtime_arg
elif runtime_arg.kind == inspect.Parameter.VAR_POSITIONAL:
runtime_sig.varpos = runtime_arg
elif runtime_arg.kind == inspect.Parameter.VAR_KEYWORD:
runtime_sig.varkw = runtime_arg
else:
raise AssertionError
return runtime_sig
@staticmethod
def from_overloadedfuncdef(stub: nodes.OverloadedFuncDef) -> "Signature[nodes.Argument]":
"""Returns a Signature from an OverloadedFuncDef.
If life were simple, to verify_overloadedfuncdef, we'd just verify_funcitem for each of its
items. Unfortunately, life isn't simple and overloads are pretty deceitful. So instead, we
try and combine the overload's items into a single signature that is compatible with any
lies it might try to tell.
"""
# For most dunder methods, just assume all args are positional-only
assume_positional_only = is_dunder(stub.name, exclude_special=True)
all_args = {} # type: Dict[str, List[Tuple[nodes.Argument, int]]]
for func in map(_resolve_funcitem_from_decorator, stub.items):
assert func is not None
args = maybe_strip_cls(stub.name, func.arguments)
for index, arg in enumerate(args):
# For positional-only args, we allow overloads to have different names for the same
# argument. To accomplish this, we just make up a fake index-based name.
name = (
"__{}".format(index)
if arg.variable.name.startswith("__") or assume_positional_only
else arg.variable.name
)
all_args.setdefault(name, []).append((arg, index))
def get_position(arg_name: str) -> int:
# We just need this to return the positional args in the correct order.
return max(index for _, index in all_args[arg_name])
def get_type(arg_name: str) -> mypy.types.ProperType:
with mypy.state.strict_optional_set(True):
all_types = [
arg.variable.type or arg.type_annotation for arg, _ in all_args[arg_name]
]
return mypy.typeops.make_simplified_union([t for t in all_types if t])
def get_kind(arg_name: str) -> int:
kinds = {arg.kind for arg, _ in all_args[arg_name]}
if nodes.ARG_STAR in kinds:
return nodes.ARG_STAR
if nodes.ARG_STAR2 in kinds:
return nodes.ARG_STAR2
# The logic here is based on two tenets:
# 1) If an arg is ever optional (or unspecified), it is optional
# 2) If an arg is ever positional, it is positional
is_opt = (
len(all_args[arg_name]) < len(stub.items)
or nodes.ARG_OPT in kinds
or nodes.ARG_NAMED_OPT in kinds
)
is_pos = nodes.ARG_OPT in kinds or nodes.ARG_POS in kinds
if is_opt:
return nodes.ARG_OPT if is_pos else nodes.ARG_NAMED_OPT
return nodes.ARG_POS if is_pos else nodes.ARG_NAMED
sig = Signature() # type: Signature[nodes.Argument]
for arg_name in sorted(all_args, key=get_position):
# example_arg_name gives us a real name (in case we had a fake index-based name)
example_arg_name = all_args[arg_name][0][0].variable.name
arg = nodes.Argument(
nodes.Var(example_arg_name, get_type(arg_name)),
type_annotation=None,
initializer=None,
kind=get_kind(arg_name),
)
if arg.kind in (nodes.ARG_POS, nodes.ARG_OPT):
sig.pos.append(arg)
elif arg.kind in (nodes.ARG_NAMED, nodes.ARG_NAMED_OPT):
sig.kwonly[arg.variable.name] = arg
elif arg.kind == nodes.ARG_STAR:
sig.varpos = arg
elif arg.kind == nodes.ARG_STAR2:
sig.varkw = arg
else:
raise AssertionError
return sig
def _verify_signature(
stub: Signature[nodes.Argument], runtime: Signature[inspect.Parameter], function_name: str
) -> Iterator[str]:
# Check positional arguments match up
for stub_arg, runtime_arg in zip(stub.pos, runtime.pos):
yield from _verify_arg_name(stub_arg, runtime_arg, function_name)
yield from _verify_arg_default_value(stub_arg, runtime_arg)
if (
runtime_arg.kind == inspect.Parameter.POSITIONAL_ONLY
and not stub_arg.variable.name.startswith("__")
and not stub_arg.variable.name.strip("_") == "self"
and not is_dunder(function_name, exclude_special=True) # noisy for dunder methods
):
yield (
'stub argument "{}" should be positional-only '
'(rename with a leading double underscore, i.e. "__{}")'.format(
stub_arg.variable.name, runtime_arg.name
)
)
if (
runtime_arg.kind != inspect.Parameter.POSITIONAL_ONLY
and stub_arg.variable.name.startswith("__")
):
yield (
'stub argument "{}" should be positional or keyword '
"(remove leading double underscore)".format(stub_arg.variable.name)
)
# Check unmatched positional args
if len(stub.pos) > len(runtime.pos):
# There are cases where the stub exhaustively lists out the extra parameters the function
# would take through *args. Hence, a) we can't check that the runtime actually takes those
# parameters and b) below, we don't enforce that the stub takes *args, since runtime logic
# may prevent those arguments from actually being accepted.
if runtime.varpos is None:
for stub_arg in stub.pos[len(runtime.pos):]:
# If the variable is in runtime.kwonly, it's just mislabelled as not a
# keyword-only argument
if stub_arg.variable.name not in runtime.kwonly:
yield 'runtime does not have argument "{}"'.format(stub_arg.variable.name)
else:
yield 'stub argument "{}" is not keyword-only'.format(stub_arg.variable.name)
if stub.varpos is not None:
yield 'runtime does not have *args argument "{}"'.format(stub.varpos.variable.name)
elif len(stub.pos) < len(runtime.pos):
for runtime_arg in runtime.pos[len(stub.pos):]:
if runtime_arg.name not in stub.kwonly:
yield 'stub does not have argument "{}"'.format(runtime_arg.name)
else:
yield 'runtime argument "{}" is not keyword-only'.format(runtime_arg.name)
# Checks involving *args
if len(stub.pos) <= len(runtime.pos) or runtime.varpos is None:
if stub.varpos is None and runtime.varpos is not None:
yield 'stub does not have *args argument "{}"'.format(runtime.varpos.name)
if stub.varpos is not None and runtime.varpos is None:
yield 'runtime does not have *args argument "{}"'.format(stub.varpos.variable.name)
# Check keyword-only args
for arg in sorted(set(stub.kwonly) & set(runtime.kwonly)):
stub_arg, runtime_arg = stub.kwonly[arg], runtime.kwonly[arg]
yield from _verify_arg_name(stub_arg, runtime_arg, function_name)
yield from _verify_arg_default_value(stub_arg, runtime_arg)
# Check unmatched keyword-only args
if runtime.varkw is None or not set(runtime.kwonly).issubset(set(stub.kwonly)):
# There are cases where the stub exhaustively lists out the extra parameters the function
# would take through *kwargs. Hence, a) we only check if the runtime actually takes those
# parameters when the above condition holds and b) below, we don't enforce that the stub
# takes *kwargs, since runtime logic may prevent additional arguments from actually being
# accepted.
for arg in sorted(set(stub.kwonly) - set(runtime.kwonly)):
yield 'runtime does not have argument "{}"'.format(arg)
for arg in sorted(set(runtime.kwonly) - set(stub.kwonly)):
if arg in set(stub_arg.variable.name for stub_arg in stub.pos):
# Don't report this if we've reported it before
if len(stub.pos) > len(runtime.pos) and runtime.varpos is not None:
yield 'stub argument "{}" is not keyword-only'.format(arg)
else:
yield 'stub does not have argument "{}"'.format(arg)
# Checks involving **kwargs
if stub.varkw is None and runtime.varkw is not None:
# As mentioned above, don't enforce that the stub takes **kwargs.
# Also check against positional parameters, to avoid a nitpicky message when an argument
# isn't marked as keyword-only
stub_pos_names = set(stub_arg.variable.name for stub_arg in stub.pos)
# Ideally we'd do a strict subset check, but in practice the errors from that aren't useful
if not set(runtime.kwonly).issubset(set(stub.kwonly) | stub_pos_names):
yield 'stub does not have **kwargs argument "{}"'.format(runtime.varkw.name)
if stub.varkw is not None and runtime.varkw is None:
yield 'runtime does not have **kwargs argument "{}"'.format(stub.varkw.variable.name)
@verify.register(nodes.FuncItem)
def verify_funcitem(
stub: nodes.FuncItem, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if (
not isinstance(runtime, (types.FunctionType, types.BuiltinFunctionType))
and not isinstance(runtime, (types.MethodType, types.BuiltinMethodType))
and not inspect.ismethoddescriptor(runtime)
):
yield Error(object_path, "is not a function", stub, runtime)
if not callable(runtime):
return
for message in _verify_static_class_methods(stub, runtime, object_path):
yield Error(object_path, "is inconsistent, " + message, stub, runtime)
try:
signature = inspect.signature(runtime)
except (ValueError, RuntimeError):
# inspect.signature throws sometimes
# catch RuntimeError because of https://bugs.python.org/issue39504
return
stub_sig = Signature.from_funcitem(stub)
runtime_sig = Signature.from_inspect_signature(signature)
for message in _verify_signature(stub_sig, runtime_sig, function_name=stub.name):
yield Error(
object_path,
"is inconsistent, " + message,
stub,
runtime,
runtime_desc="def " + str(signature),
)
@verify.register(Missing)
def verify_none(
stub: Missing, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
yield Error(object_path, "is not present in stub", stub, runtime)
@verify.register(nodes.Var)
def verify_var(
stub: nodes.Var, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
# Don't always yield an error here, because we often can't find instance variables
if len(object_path) <= 2:
yield Error(object_path, "is not present at runtime", stub, runtime)
return
runtime_type = get_mypy_type_of_runtime_value(runtime)
if (
runtime_type is not None
and stub.type is not None
and not is_subtype_helper(runtime_type, stub.type)
):
should_error = True
# Avoid errors when defining enums, since runtime_type is the enum itself, but we'd
# annotate it with the type of runtime.value
if isinstance(runtime, enum.Enum):
runtime_type = get_mypy_type_of_runtime_value(runtime.value)
if runtime_type is not None and is_subtype_helper(runtime_type, stub.type):
should_error = False
if should_error:
yield Error(
object_path,
"variable differs from runtime type {}".format(runtime_type),
stub,
runtime,
)
@verify.register(nodes.OverloadedFuncDef)
def verify_overloadedfuncdef(
stub: nodes.OverloadedFuncDef, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if stub.is_property:
# We get here in cases of overloads from property.setter
return
if (
not isinstance(runtime, (types.FunctionType, types.BuiltinFunctionType))
and not isinstance(runtime, (types.MethodType, types.BuiltinMethodType))
and not inspect.ismethoddescriptor(runtime)
):
yield Error(object_path, "is not a function", stub, runtime)
if not callable(runtime):
return
for message in _verify_static_class_methods(stub, runtime, object_path):
yield Error(object_path, "is inconsistent, " + message, stub, runtime)
try:
signature = inspect.signature(runtime)
except ValueError:
return
stub_sig = Signature.from_overloadedfuncdef(stub)
runtime_sig = Signature.from_inspect_signature(signature)
for message in _verify_signature(stub_sig, runtime_sig, function_name=stub.name):
# TODO: This is a little hacky, but the addition here is super useful
if "has a default value of type" in message:
message += (
". This is often caused by overloads failing to account for explicitly passing "
"in the default value."
)
yield Error(
object_path,
"is inconsistent, " + message,
stub,
runtime,
stub_desc=str(stub.type) + "\nInferred signature: {}".format(stub_sig),
runtime_desc="def " + str(signature),
)
@verify.register(nodes.TypeVarExpr)
def verify_typevarexpr(
stub: nodes.TypeVarExpr, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if False:
yield None
def _verify_property(stub: nodes.Decorator, runtime: Any) -> Iterator[str]:
assert stub.func.is_property
if isinstance(runtime, property):
return
if inspect.isdatadescriptor(runtime):
# It's enough like a property...
return
# Sometimes attributes pretend to be properties, for instance, to express that they
# are read only. So allowlist if runtime_type matches the return type of stub.
runtime_type = get_mypy_type_of_runtime_value(runtime)
func_type = (
stub.func.type.ret_type if isinstance(stub.func.type, mypy.types.CallableType) else None
)
if (
runtime_type is not None
and func_type is not None
and is_subtype_helper(runtime_type, func_type)
):
return
yield "is inconsistent, cannot reconcile @property on stub with runtime object"
def _resolve_funcitem_from_decorator(dec: nodes.OverloadPart) -> Optional[nodes.FuncItem]:
"""Returns a FuncItem that corresponds to the output of the decorator.
Returns None if we can't figure out what that would be. For convenience, this function also
accepts FuncItems.
"""
if isinstance(dec, nodes.FuncItem):
return dec
if dec.func.is_property:
return None
def apply_decorator_to_funcitem(
decorator: nodes.Expression, func: nodes.FuncItem
) -> Optional[nodes.FuncItem]:
if not isinstance(decorator, nodes.RefExpr):
return None
if decorator.fullname is None:
# Happens with namedtuple
return None
if decorator.fullname in (
"builtins.staticmethod",
"typing.overload",
"abc.abstractmethod",
):
return func
if decorator.fullname == "builtins.classmethod":
assert func.arguments[0].variable.name in ("cls", "metacls")
ret = copy.copy(func)
# Remove the cls argument, since it's not present in inspect.signature of classmethods
ret.arguments = ret.arguments[1:]
return ret
# Just give up on any other decorators. After excluding properties, we don't run into
# anything else when running on typeshed's stdlib.
return None
func = dec.func # type: nodes.FuncItem
for decorator in dec.original_decorators:
resulting_func = apply_decorator_to_funcitem(decorator, func)
if resulting_func is None:
return None
func = resulting_func
return func
@verify.register(nodes.Decorator)
def verify_decorator(
stub: nodes.Decorator, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if stub.func.is_property:
for message in _verify_property(stub, runtime):
yield Error(object_path, message, stub, runtime)
return
func = _resolve_funcitem_from_decorator(stub)
if func is not None:
yield from verify(func, runtime, object_path)
@verify.register(nodes.TypeAlias)
def verify_typealias(
stub: nodes.TypeAlias, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if False:
yield None
SPECIAL_DUNDERS = ("__init__", "__new__", "__call__", "__init_subclass__", "__class_getitem__")
def is_dunder(name: str, exclude_special: bool = False) -> bool:
"""Returns whether name is a dunder name.
:param exclude_special: Whether to return False for a couple special dunder methods.
"""
if exclude_special and name in SPECIAL_DUNDERS:
return False
return name.startswith("__") and name.endswith("__")
def is_subtype_helper(left: mypy.types.Type, right: mypy.types.Type) -> bool:
"""Checks whether ``left`` is a subtype of ``right``."""
left = mypy.types.get_proper_type(left)
right = mypy.types.get_proper_type(right)
if (
isinstance(left, mypy.types.LiteralType)
and isinstance(left.value, int)
and left.value in (0, 1)
and isinstance(right, mypy.types.Instance)
and right.type.fullname == "builtins.bool"
):
# Pretend Literal[0, 1] is a subtype of bool to avoid unhelpful errors.
return True
with mypy.state.strict_optional_set(True):
return mypy.subtypes.is_subtype(left, right)
def get_mypy_type_of_runtime_value(runtime: Any) -> Optional[mypy.types.Type]:
"""Returns a mypy type object representing the type of ``runtime``.
Returns None if we can't find something that works.
"""
if runtime is None:
return mypy.types.NoneType()
if isinstance(runtime, property):
# Give up on properties to avoid issues with things that are typed as attributes.
return None
def anytype() -> mypy.types.AnyType:
return mypy.types.AnyType(mypy.types.TypeOfAny.unannotated)
if isinstance(
runtime,
(types.FunctionType, types.BuiltinFunctionType,
types.MethodType, types.BuiltinMethodType)
):
builtins = get_stub("builtins")
assert builtins is not None
type_info = builtins.names["function"].node
assert isinstance(type_info, nodes.TypeInfo)
fallback = mypy.types.Instance(type_info, [anytype()])
try:
signature = inspect.signature(runtime)
arg_types = []
arg_kinds = []
arg_names = []
for arg in signature.parameters.values():
arg_types.append(anytype())
arg_names.append(
None if arg.kind == inspect.Parameter.POSITIONAL_ONLY else arg.name
)
has_default = arg.default == inspect.Parameter.empty
if arg.kind == inspect.Parameter.POSITIONAL_ONLY:
arg_kinds.append(nodes.ARG_POS if has_default else nodes.ARG_OPT)
elif arg.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:
arg_kinds.append(nodes.ARG_POS if has_default else nodes.ARG_OPT)
elif arg.kind == inspect.Parameter.KEYWORD_ONLY:
arg_kinds.append(nodes.ARG_NAMED if has_default else nodes.ARG_NAMED_OPT)
elif arg.kind == inspect.Parameter.VAR_POSITIONAL:
arg_kinds.append(nodes.ARG_STAR)
elif arg.kind == inspect.Parameter.VAR_KEYWORD:
arg_kinds.append(nodes.ARG_STAR2)
else:
raise AssertionError
except ValueError:
arg_types = [anytype(), anytype()]
arg_kinds = [nodes.ARG_STAR, nodes.ARG_STAR2]
arg_names = [None, None]
return mypy.types.CallableType(
arg_types,
arg_kinds,
arg_names,
ret_type=anytype(),
fallback=fallback,
is_ellipsis_args=True,
)
# Try and look up a stub for the runtime object
stub = get_stub(type(runtime).__module__)
if stub is None:
return None
type_name = type(runtime).__name__
if type_name not in stub.names:
return None
type_info = stub.names[type_name].node
if isinstance(type_info, nodes.Var):
return type_info.type
if not isinstance(type_info, nodes.TypeInfo):
return None
if isinstance(runtime, tuple):
# Special case tuples so we construct a valid mypy.types.TupleType
optional_items = [get_mypy_type_of_runtime_value(v) for v in runtime]
items = [(i if i is not None else anytype()) for i in optional_items]
fallback = mypy.types.Instance(type_info, [anytype()])
return mypy.types.TupleType(items, fallback)
fallback = mypy.types.Instance(type_info, [anytype() for _ in type_info.type_vars])
try:
# Literals are supposed to be only bool, int, str, bytes or enums, but this seems to work
# well (when not using mypyc, for which bytes and enums are also problematic).
return mypy.types.LiteralType(
value=runtime,
fallback=fallback,
)
except TypeError:
# Ask for forgiveness if we're using mypyc.
return fallback
_all_stubs = {} # type: Dict[str, nodes.MypyFile]
def build_stubs(modules: List[str], options: Options, find_submodules: bool = False) -> List[str]:
"""Uses mypy to construct stub objects for the given modules.
This sets global state that ``get_stub`` can access.
Returns all modules we might want to check. If ``find_submodules`` is False, this is equal
to ``modules``.
:param modules: List of modules to build stubs for.
:param options: Mypy options for finding and building stubs.
:param find_submodules: Whether to attempt to find submodules of the given modules as well.
"""
data_dir = mypy.build.default_data_dir()
search_path = mypy.modulefinder.compute_search_paths([], options, data_dir)
find_module_cache = mypy.modulefinder.FindModuleCache(
search_path, fscache=None, options=options
)
all_modules = []
sources = []
for module in modules:
all_modules.append(module)
if not find_submodules:
module_path = find_module_cache.find_module(module)
if not isinstance(module_path, str):
# test_module will yield an error later when it can't find stubs
continue
sources.append(mypy.modulefinder.BuildSource(module_path, module, None))
else:
found_sources = find_module_cache.find_modules_recursive(module)
sources.extend(found_sources)
all_modules.extend(s.module for s in found_sources if s.module not in all_modules)
try:
res = mypy.build.build(sources=sources, options=options)
except mypy.errors.CompileError as e:
output = [
_style("error: ", color="red", bold=True),
"not checking stubs due to failed mypy compile:\n",
str(e),
]
print("".join(output))
raise RuntimeError from e
if res.errors:
output = [
_style("error: ", color="red", bold=True),
"not checking stubs due to mypy build errors:\n",
]
print("".join(output) + "\n".join(res.errors))
raise RuntimeError
global _all_stubs
_all_stubs = res.files
return all_modules
def get_stub(module: str) -> Optional[nodes.MypyFile]:
"""Returns a stub object for the given module, if we've built one."""
return _all_stubs.get(module)
def get_typeshed_stdlib_modules(custom_typeshed_dir: Optional[str]) -> List[str]:
"""Returns a list of stdlib modules in typeshed (for current Python version)."""
stdlib_py_versions = mypy.modulefinder.load_stdlib_py_versions(custom_typeshed_dir)
packages = set()
# Typeshed doesn't cover Python 3.5.
if sys.version_info < (3, 6):
version_info = (3, 6)
else:
version_info = sys.version_info[0:2]
for module, versions in stdlib_py_versions.items():
minver, maxver = versions
if version_info >= minver and (maxver is None or version_info <= maxver):
packages.add(module)
if custom_typeshed_dir:
typeshed_dir = Path(custom_typeshed_dir)
else:
typeshed_dir = Path(mypy.build.default_data_dir()) / "typeshed"
stdlib_dir = typeshed_dir / "stdlib"
modules = []
for path in stdlib_dir.rglob("*.pyi"):
if path.stem == "__init__":
path = path.parent
module = ".".join(path.relative_to(stdlib_dir).parts[:-1] + (path.stem,))
if module.split(".")[0] in packages:
modules.append(module)
return sorted(modules)
def get_allowlist_entries(allowlist_file: str) -> Iterator[str]:
def strip_comments(s: str) -> str:
try:
return s[: s.index("#")].strip()
except ValueError:
return s.strip()
with open(allowlist_file) as f:
for line in f.readlines():
entry = strip_comments(line)
if entry:
yield entry
def test_stubs(args: argparse.Namespace, use_builtins_fixtures: bool = False) -> int:
"""This is stubtest! It's time to test the stubs!"""
# Load the allowlist. This is a series of strings corresponding to Error.object_desc
# Values in the dict will store whether we used the allowlist entry or not.
allowlist = {
entry: False
for allowlist_file in args.allowlist
for entry in get_allowlist_entries(allowlist_file)
}
allowlist_regexes = {entry: re.compile(entry) for entry in allowlist}
# If we need to generate an allowlist, we store Error.object_desc for each error here.
generated_allowlist = set()
modules = args.modules
if args.check_typeshed:
assert not args.modules, "Cannot pass both --check-typeshed and a list of modules"
modules = get_typeshed_stdlib_modules(args.custom_typeshed_dir)
annoying_modules = {"antigravity", "this"}
modules = [m for m in modules if m not in annoying_modules]
assert modules, "No modules to check"
options = Options()
options.incremental = False
options.custom_typeshed_dir = args.custom_typeshed_dir
options.config_file = args.mypy_config_file
options.use_builtins_fixtures = use_builtins_fixtures
if options.config_file:
def set_strict_flags() -> None: # not needed yet
return
parse_config_file(options, set_strict_flags, options.config_file, sys.stdout, sys.stderr)
try:
modules = build_stubs(modules, options, find_submodules=not args.check_typeshed)
except RuntimeError:
return 1
exit_code = 0
for module in modules:
for error in test_module(module):
# Filter errors
if args.ignore_missing_stub and error.is_missing_stub():
continue
if args.ignore_positional_only and error.is_positional_only_related():
continue
if error.object_desc in allowlist:
allowlist[error.object_desc] = True
continue
is_allowlisted = False
for w in allowlist:
if allowlist_regexes[w].fullmatch(error.object_desc):
allowlist[w] = True
is_allowlisted = True
break
if is_allowlisted:
continue
# We have errors, so change exit code, and output whatever necessary
exit_code = 1
if args.generate_allowlist:
generated_allowlist.add(error.object_desc)
continue
print(error.get_description(concise=args.concise))
# Print unused allowlist entries
if not args.ignore_unused_allowlist:
for w in allowlist:
# Don't consider an entry unused if it regex-matches the empty string
# This lets us allowlist errors that don't manifest at all on some systems
if not allowlist[w] and not allowlist_regexes[w].fullmatch(""):
exit_code = 1
print("note: unused allowlist entry {}".format(w))
# Print the generated allowlist
if args.generate_allowlist:
for e in sorted(generated_allowlist):
print(e)
exit_code = 0
return exit_code
def parse_options(args: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Compares stubs to objects introspected from the runtime."
)
parser.add_argument("modules", nargs="*", help="Modules to test")
parser.add_argument("--concise", action="store_true", help="Make output concise")
parser.add_argument(
"--ignore-missing-stub",
action="store_true",
help="Ignore errors for stub missing things that are present at runtime",
)
parser.add_argument(
"--ignore-positional-only",
action="store_true",
help="Ignore errors for whether an argument should or shouldn't be positional-only",
)
parser.add_argument(
"--custom-typeshed-dir", metavar="DIR", help="Use the custom typeshed in DIR"
)
parser.add_argument(
"--check-typeshed", action="store_true", help="Check all stdlib modules in typeshed"
)
parser.add_argument(
"--allowlist",
"--whitelist",
action="append",
metavar="FILE",
default=[],
help=(
"Use file as an allowlist. Can be passed multiple times to combine multiple "
"allowlists. Allowlists can be created with --generate-allowlist"
),
)
parser.add_argument(
"--generate-allowlist",
"--generate-whitelist",
action="store_true",
help="Print an allowlist (to stdout) to be used with --allowlist",
)
parser.add_argument(
"--ignore-unused-allowlist",
"--ignore-unused-whitelist",
action="store_true",
help="Ignore unused allowlist entries",
)
config_group = parser.add_argument_group(
title='mypy config file',
description="Use a config file instead of command line arguments. "
"Plugins and mypy path are the only supported "
"configurations.",
)
config_group.add_argument(
'--mypy-config-file',
help=(
"An existing mypy configuration file, currently used by stubtest to help "
"determine mypy path and plugins"
),
)
return parser.parse_args(args)
def main() -> int:
mypy.util.check_python_version("stubtest")
return test_stubs(parse_options(sys.argv[1:]))
if __name__ == "__main__":
sys.exit(main())
|
build_stubs
|
Uses mypy to construct stub objects for the given modules.
This sets global state that ``get_stub`` can access.
Returns all modules we might want to check. If ``find_submodules`` is False, this is equal
to ``modules``.
:param modules: List of modules to build stubs for.
:param options: Mypy options for finding and building stubs.
:param find_submodules: Whether to attempt to find submodules of the given modules as well.
|
"""Tests for stubs.
Verify that various things in stubs are consistent with how things behave at runtime.
"""
import argparse
import copy
import enum
import importlib
import inspect
import re
import sys
import types
import warnings
from functools import singledispatch
from pathlib import Path
from typing import Any, Dict, Generic, Iterator, List, Optional, Tuple, TypeVar, Union, cast
from typing_extensions import Type
import mypy.build
import mypy.modulefinder
import mypy.types
from mypy import nodes
from mypy.config_parser import parse_config_file
from mypy.options import Options
from mypy.util import FancyFormatter
class Missing:
"""Marker object for things that are missing (from a stub or the runtime)."""
def __repr__(self) -> str:
return "MISSING"
MISSING = Missing()
T = TypeVar("T")
if sys.version_info >= (3, 5, 3):
MaybeMissing = Union[T, Missing]
else:
# work around a bug in 3.5.2 and earlier's typing.py
class MaybeMissingMeta(type):
def __getitem__(self, arg: Any) -> Any:
return Union[arg, Missing]
class MaybeMissing(metaclass=MaybeMissingMeta): # type: ignore
pass
_formatter = FancyFormatter(sys.stdout, sys.stderr, False)
def _style(message: str, **kwargs: Any) -> str:
"""Wrapper around mypy.util for fancy formatting."""
kwargs.setdefault("color", "none")
return _formatter.style(message, **kwargs)
class Error:
def __init__(
self,
object_path: List[str],
message: str,
stub_object: MaybeMissing[nodes.Node],
runtime_object: MaybeMissing[Any],
*,
stub_desc: Optional[str] = None,
runtime_desc: Optional[str] = None
) -> None:
"""Represents an error found by stubtest.
:param object_path: Location of the object with the error,
e.g. ``["module", "Class", "method"]``
:param message: Error message
:param stub_object: The mypy node representing the stub
:param runtime_object: Actual object obtained from the runtime
:param stub_desc: Specialised description for the stub object, should you wish
:param runtime_desc: Specialised description for the runtime object, should you wish
"""
self.object_desc = ".".join(object_path)
self.message = message
self.stub_object = stub_object
self.runtime_object = runtime_object
self.stub_desc = stub_desc or str(getattr(stub_object, "type", stub_object))
self.runtime_desc = runtime_desc or str(runtime_object)
def is_missing_stub(self) -> bool:
"""Whether or not the error is for something missing from the stub."""
return isinstance(self.stub_object, Missing)
def is_positional_only_related(self) -> bool:
"""Whether or not the error is for something being (or not being) positional-only."""
# TODO: This is hacky, use error codes or something more resilient
return "leading double underscore" in self.message
def get_description(self, concise: bool = False) -> str:
"""Returns a description of the error.
:param concise: Whether to return a concise, one-line description
"""
if concise:
return _style(self.object_desc, bold=True) + " " + self.message
stub_line = None
stub_file = None # type: None
if not isinstance(self.stub_object, Missing):
stub_line = self.stub_object.line
# TODO: Find a way of getting the stub file
stub_loc_str = ""
if stub_line:
stub_loc_str += " at line {}".format(stub_line)
if stub_file:
stub_loc_str += " in file {}".format(Path(stub_file))
runtime_line = None
runtime_file = None
if not isinstance(self.runtime_object, Missing):
try:
runtime_line = inspect.getsourcelines(self.runtime_object)[1]
except (OSError, TypeError):
pass
try:
runtime_file = inspect.getsourcefile(self.runtime_object)
except TypeError:
pass
runtime_loc_str = ""
if runtime_line:
runtime_loc_str += " at line {}".format(runtime_line)
if runtime_file:
runtime_loc_str += " in file {}".format(Path(runtime_file))
output = [
_style("error: ", color="red", bold=True),
_style(self.object_desc, bold=True),
" ",
self.message,
"\n",
"Stub:",
_style(stub_loc_str, dim=True),
"\n",
_style(self.stub_desc + "\n", color="blue", dim=True),
"Runtime:",
_style(runtime_loc_str, dim=True),
"\n",
_style(self.runtime_desc + "\n", color="blue", dim=True),
]
return "".join(output)
def test_module(module_name: str) -> Iterator[Error]:
"""Tests a given module's stub against introspecting it at runtime.
Requires the stub to have been built already, accomplished by a call to ``build_stubs``.
:param module_name: The module to test
"""
stub = get_stub(module_name)
if stub is None:
yield Error([module_name], "failed to find stubs", MISSING, None)
return
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
runtime = importlib.import_module(module_name)
except Exception as e:
yield Error([module_name], "failed to import: {}".format(e), stub, MISSING)
return
with warnings.catch_warnings():
warnings.simplefilter("ignore")
yield from verify(stub, runtime, [module_name])
@singledispatch
def verify(
stub: nodes.Node, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
"""Entry point for comparing a stub to a runtime object.
We use single dispatch based on the type of ``stub``.
:param stub: The mypy node representing a part of the stub
:param runtime: The runtime object corresponding to ``stub``
"""
yield Error(object_path, "is an unknown mypy node", stub, runtime)
@verify.register(nodes.MypyFile)
def verify_mypyfile(
stub: nodes.MypyFile, runtime: MaybeMissing[types.ModuleType], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if not isinstance(runtime, types.ModuleType):
yield Error(object_path, "is not a module", stub, runtime)
return
# Check things in the stub that are public
to_check = set(
m
for m, o in stub.names.items()
if o.module_public and (not m.startswith("_") or hasattr(runtime, m))
)
runtime_public_contents = [
m
for m in dir(runtime)
if not m.startswith("_")
# Ensure that the object's module is `runtime`, e.g. so that we don't pick up reexported
# modules and infinitely recurse. Unfortunately, there's no way to detect an explicit
# reexport missing from the stubs (that isn't specified in __all__)
and getattr(getattr(runtime, m), "__module__", None) == runtime.__name__
]
# Check all things declared in module's __all__, falling back to runtime_public_contents
to_check.update(getattr(runtime, "__all__", runtime_public_contents))
to_check.difference_update({"__file__", "__doc__", "__name__", "__builtins__", "__package__"})
for entry in sorted(to_check):
yield from verify(
stub.names[entry].node if entry in stub.names else MISSING,
getattr(runtime, entry, MISSING),
object_path + [entry],
)
@verify.register(nodes.TypeInfo)
def verify_typeinfo(
stub: nodes.TypeInfo, runtime: MaybeMissing[Type[Any]], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime, stub_desc=repr(stub))
return
if not isinstance(runtime, type):
yield Error(object_path, "is not a type", stub, runtime, stub_desc=repr(stub))
return
# Check everything already defined in the stub
to_check = set(stub.names)
# There's a reasonable case to be made that we should always check all dunders, but it's
# currently quite noisy. We could turn this into a denylist instead of an allowlist.
to_check.update(
# cast to workaround mypyc complaints
m for m in cast(Any, vars)(runtime) if not m.startswith("_") or m in SPECIAL_DUNDERS
)
for entry in sorted(to_check):
mangled_entry = entry
if entry.startswith("__") and not entry.endswith("__"):
mangled_entry = "_{}{}".format(stub.name, entry)
yield from verify(
next((t.names[entry].node for t in stub.mro if entry in t.names), MISSING),
getattr(runtime, mangled_entry, MISSING),
object_path + [entry],
)
def _verify_static_class_methods(
stub: nodes.FuncBase, runtime: Any, object_path: List[str]
) -> Iterator[str]:
if stub.name in ("__new__", "__init_subclass__", "__class_getitem__"):
# Special cased by Python, so don't bother checking
return
if inspect.isbuiltin(runtime):
# The isinstance checks don't work reliably for builtins, e.g. datetime.datetime.now, so do
# something a little hacky that seems to work well
probably_class_method = isinstance(getattr(runtime, "__self__", None), type)
if probably_class_method and not stub.is_class:
yield "runtime is a classmethod but stub is not"
if not probably_class_method and stub.is_class:
yield "stub is a classmethod but runtime is not"
return
# Look the object up statically, to avoid binding by the descriptor protocol
static_runtime = importlib.import_module(object_path[0])
for entry in object_path[1:]:
try:
static_runtime = inspect.getattr_static(static_runtime, entry)
except AttributeError:
# This can happen with mangled names, ignore for now.
# TODO: pass more information about ancestors of nodes/objects to verify, so we don't
# have to do this hacky lookup. Would be useful in a couple other places too.
return
if isinstance(static_runtime, classmethod) and not stub.is_class:
yield "runtime is a classmethod but stub is not"
if not isinstance(static_runtime, classmethod) and stub.is_class:
yield "stub is a classmethod but runtime is not"
if isinstance(static_runtime, staticmethod) and not stub.is_static:
yield "runtime is a staticmethod but stub is not"
if not isinstance(static_runtime, staticmethod) and stub.is_static:
yield "stub is a staticmethod but runtime is not"
def _verify_arg_name(
stub_arg: nodes.Argument, runtime_arg: inspect.Parameter, function_name: str
) -> Iterator[str]:
"""Checks whether argument names match."""
# Ignore exact names for most dunder methods
if is_dunder(function_name, exclude_special=True):
return
def strip_prefix(s: str, prefix: str) -> str:
return s[len(prefix):] if s.startswith(prefix) else s
if strip_prefix(stub_arg.variable.name, "__") == runtime_arg.name:
return
def names_approx_match(a: str, b: str) -> bool:
a = a.strip("_")
b = b.strip("_")
return a.startswith(b) or b.startswith(a) or len(a) == 1 or len(b) == 1
# Be more permissive about names matching for positional-only arguments
if runtime_arg.kind == inspect.Parameter.POSITIONAL_ONLY and names_approx_match(
stub_arg.variable.name, runtime_arg.name
):
return
# This comes up with namedtuples, so ignore
if stub_arg.variable.name == "_self":
return
yield (
'stub argument "{}" differs from runtime argument "{}"'.format(
stub_arg.variable.name, runtime_arg.name
)
)
def _verify_arg_default_value(
stub_arg: nodes.Argument, runtime_arg: inspect.Parameter
) -> Iterator[str]:
"""Checks whether argument default values are compatible."""
if runtime_arg.default != inspect.Parameter.empty:
if stub_arg.kind not in (nodes.ARG_OPT, nodes.ARG_NAMED_OPT):
yield (
'runtime argument "{}" has a default value but stub argument does not'.format(
runtime_arg.name
)
)
else:
runtime_type = get_mypy_type_of_runtime_value(runtime_arg.default)
# Fallback to the type annotation type if var type is missing. The type annotation
# is an UnboundType, but I don't know enough to know what the pros and cons here are.
# UnboundTypes have ugly question marks following them, so default to var type.
# Note we do this same fallback when constructing signatures in from_overloadedfuncdef
stub_type = stub_arg.variable.type or stub_arg.type_annotation
if isinstance(stub_type, mypy.types.TypeVarType):
stub_type = stub_type.upper_bound
if (
runtime_type is not None
and stub_type is not None
# Avoid false positives for marker objects
and type(runtime_arg.default) != object
and not is_subtype_helper(runtime_type, stub_type)
):
yield (
'runtime argument "{}" has a default value of type {}, '
"which is incompatible with stub argument type {}".format(
runtime_arg.name, runtime_type, stub_type
)
)
else:
if stub_arg.kind in (nodes.ARG_OPT, nodes.ARG_NAMED_OPT):
yield (
'stub argument "{}" has a default value but runtime argument does not'.format(
stub_arg.variable.name
)
)
def maybe_strip_cls(name: str, args: List[nodes.Argument]) -> List[nodes.Argument]:
if name in ("__init_subclass__", "__class_getitem__"):
# These are implicitly classmethods. If the stub chooses not to have @classmethod, we
# should remove the cls argument
if args[0].variable.name == "cls":
return args[1:]
return args
class Signature(Generic[T]):
def __init__(self) -> None:
self.pos = [] # type: List[T]
self.kwonly = {} # type: Dict[str, T]
self.varpos = None # type: Optional[T]
self.varkw = None # type: Optional[T]
def __str__(self) -> str:
def get_name(arg: Any) -> str:
if isinstance(arg, inspect.Parameter):
return arg.name
if isinstance(arg, nodes.Argument):
return arg.variable.name
raise AssertionError
def get_type(arg: Any) -> Optional[str]:
if isinstance(arg, inspect.Parameter):
return None
if isinstance(arg, nodes.Argument):
return str(arg.variable.type or arg.type_annotation)
raise AssertionError
def has_default(arg: Any) -> bool:
if isinstance(arg, inspect.Parameter):
return arg.default != inspect.Parameter.empty
if isinstance(arg, nodes.Argument):
return arg.kind in (nodes.ARG_OPT, nodes.ARG_NAMED_OPT)
raise AssertionError
def get_desc(arg: Any) -> str:
arg_type = get_type(arg)
return (
get_name(arg)
+ (": {}".format(arg_type) if arg_type else "")
+ (" = ..." if has_default(arg) else "")
)
kw_only = sorted(self.kwonly.values(), key=lambda a: (has_default(a), get_name(a)))
ret = "def ("
ret += ", ".join(
[get_desc(arg) for arg in self.pos]
+ (["*" + get_name(self.varpos)] if self.varpos else (["*"] if self.kwonly else []))
+ [get_desc(arg) for arg in kw_only]
+ (["**" + get_name(self.varkw)] if self.varkw else [])
)
ret += ")"
return ret
@staticmethod
def from_funcitem(stub: nodes.FuncItem) -> "Signature[nodes.Argument]":
stub_sig = Signature() # type: Signature[nodes.Argument]
stub_args = maybe_strip_cls(stub.name, stub.arguments)
for stub_arg in stub_args:
if stub_arg.kind in (nodes.ARG_POS, nodes.ARG_OPT):
stub_sig.pos.append(stub_arg)
elif stub_arg.kind in (nodes.ARG_NAMED, nodes.ARG_NAMED_OPT):
stub_sig.kwonly[stub_arg.variable.name] = stub_arg
elif stub_arg.kind == nodes.ARG_STAR:
stub_sig.varpos = stub_arg
elif stub_arg.kind == nodes.ARG_STAR2:
stub_sig.varkw = stub_arg
else:
raise AssertionError
return stub_sig
@staticmethod
def from_inspect_signature(signature: inspect.Signature) -> "Signature[inspect.Parameter]":
runtime_sig = Signature() # type: Signature[inspect.Parameter]
for runtime_arg in signature.parameters.values():
if runtime_arg.kind in (
inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD,
):
runtime_sig.pos.append(runtime_arg)
elif runtime_arg.kind == inspect.Parameter.KEYWORD_ONLY:
runtime_sig.kwonly[runtime_arg.name] = runtime_arg
elif runtime_arg.kind == inspect.Parameter.VAR_POSITIONAL:
runtime_sig.varpos = runtime_arg
elif runtime_arg.kind == inspect.Parameter.VAR_KEYWORD:
runtime_sig.varkw = runtime_arg
else:
raise AssertionError
return runtime_sig
@staticmethod
def from_overloadedfuncdef(stub: nodes.OverloadedFuncDef) -> "Signature[nodes.Argument]":
"""Returns a Signature from an OverloadedFuncDef.
If life were simple, to verify_overloadedfuncdef, we'd just verify_funcitem for each of its
items. Unfortunately, life isn't simple and overloads are pretty deceitful. So instead, we
try and combine the overload's items into a single signature that is compatible with any
lies it might try to tell.
"""
# For most dunder methods, just assume all args are positional-only
assume_positional_only = is_dunder(stub.name, exclude_special=True)
all_args = {} # type: Dict[str, List[Tuple[nodes.Argument, int]]]
for func in map(_resolve_funcitem_from_decorator, stub.items):
assert func is not None
args = maybe_strip_cls(stub.name, func.arguments)
for index, arg in enumerate(args):
# For positional-only args, we allow overloads to have different names for the same
# argument. To accomplish this, we just make up a fake index-based name.
name = (
"__{}".format(index)
if arg.variable.name.startswith("__") or assume_positional_only
else arg.variable.name
)
all_args.setdefault(name, []).append((arg, index))
def get_position(arg_name: str) -> int:
# We just need this to return the positional args in the correct order.
return max(index for _, index in all_args[arg_name])
def get_type(arg_name: str) -> mypy.types.ProperType:
with mypy.state.strict_optional_set(True):
all_types = [
arg.variable.type or arg.type_annotation for arg, _ in all_args[arg_name]
]
return mypy.typeops.make_simplified_union([t for t in all_types if t])
def get_kind(arg_name: str) -> int:
kinds = {arg.kind for arg, _ in all_args[arg_name]}
if nodes.ARG_STAR in kinds:
return nodes.ARG_STAR
if nodes.ARG_STAR2 in kinds:
return nodes.ARG_STAR2
# The logic here is based on two tenets:
# 1) If an arg is ever optional (or unspecified), it is optional
# 2) If an arg is ever positional, it is positional
is_opt = (
len(all_args[arg_name]) < len(stub.items)
or nodes.ARG_OPT in kinds
or nodes.ARG_NAMED_OPT in kinds
)
is_pos = nodes.ARG_OPT in kinds or nodes.ARG_POS in kinds
if is_opt:
return nodes.ARG_OPT if is_pos else nodes.ARG_NAMED_OPT
return nodes.ARG_POS if is_pos else nodes.ARG_NAMED
sig = Signature() # type: Signature[nodes.Argument]
for arg_name in sorted(all_args, key=get_position):
# example_arg_name gives us a real name (in case we had a fake index-based name)
example_arg_name = all_args[arg_name][0][0].variable.name
arg = nodes.Argument(
nodes.Var(example_arg_name, get_type(arg_name)),
type_annotation=None,
initializer=None,
kind=get_kind(arg_name),
)
if arg.kind in (nodes.ARG_POS, nodes.ARG_OPT):
sig.pos.append(arg)
elif arg.kind in (nodes.ARG_NAMED, nodes.ARG_NAMED_OPT):
sig.kwonly[arg.variable.name] = arg
elif arg.kind == nodes.ARG_STAR:
sig.varpos = arg
elif arg.kind == nodes.ARG_STAR2:
sig.varkw = arg
else:
raise AssertionError
return sig
def _verify_signature(
stub: Signature[nodes.Argument], runtime: Signature[inspect.Parameter], function_name: str
) -> Iterator[str]:
# Check positional arguments match up
for stub_arg, runtime_arg in zip(stub.pos, runtime.pos):
yield from _verify_arg_name(stub_arg, runtime_arg, function_name)
yield from _verify_arg_default_value(stub_arg, runtime_arg)
if (
runtime_arg.kind == inspect.Parameter.POSITIONAL_ONLY
and not stub_arg.variable.name.startswith("__")
and not stub_arg.variable.name.strip("_") == "self"
and not is_dunder(function_name, exclude_special=True) # noisy for dunder methods
):
yield (
'stub argument "{}" should be positional-only '
'(rename with a leading double underscore, i.e. "__{}")'.format(
stub_arg.variable.name, runtime_arg.name
)
)
if (
runtime_arg.kind != inspect.Parameter.POSITIONAL_ONLY
and stub_arg.variable.name.startswith("__")
):
yield (
'stub argument "{}" should be positional or keyword '
"(remove leading double underscore)".format(stub_arg.variable.name)
)
# Check unmatched positional args
if len(stub.pos) > len(runtime.pos):
# There are cases where the stub exhaustively lists out the extra parameters the function
# would take through *args. Hence, a) we can't check that the runtime actually takes those
# parameters and b) below, we don't enforce that the stub takes *args, since runtime logic
# may prevent those arguments from actually being accepted.
if runtime.varpos is None:
for stub_arg in stub.pos[len(runtime.pos):]:
# If the variable is in runtime.kwonly, it's just mislabelled as not a
# keyword-only argument
if stub_arg.variable.name not in runtime.kwonly:
yield 'runtime does not have argument "{}"'.format(stub_arg.variable.name)
else:
yield 'stub argument "{}" is not keyword-only'.format(stub_arg.variable.name)
if stub.varpos is not None:
yield 'runtime does not have *args argument "{}"'.format(stub.varpos.variable.name)
elif len(stub.pos) < len(runtime.pos):
for runtime_arg in runtime.pos[len(stub.pos):]:
if runtime_arg.name not in stub.kwonly:
yield 'stub does not have argument "{}"'.format(runtime_arg.name)
else:
yield 'runtime argument "{}" is not keyword-only'.format(runtime_arg.name)
# Checks involving *args
if len(stub.pos) <= len(runtime.pos) or runtime.varpos is None:
if stub.varpos is None and runtime.varpos is not None:
yield 'stub does not have *args argument "{}"'.format(runtime.varpos.name)
if stub.varpos is not None and runtime.varpos is None:
yield 'runtime does not have *args argument "{}"'.format(stub.varpos.variable.name)
# Check keyword-only args
for arg in sorted(set(stub.kwonly) & set(runtime.kwonly)):
stub_arg, runtime_arg = stub.kwonly[arg], runtime.kwonly[arg]
yield from _verify_arg_name(stub_arg, runtime_arg, function_name)
yield from _verify_arg_default_value(stub_arg, runtime_arg)
# Check unmatched keyword-only args
if runtime.varkw is None or not set(runtime.kwonly).issubset(set(stub.kwonly)):
# There are cases where the stub exhaustively lists out the extra parameters the function
# would take through *kwargs. Hence, a) we only check if the runtime actually takes those
# parameters when the above condition holds and b) below, we don't enforce that the stub
# takes *kwargs, since runtime logic may prevent additional arguments from actually being
# accepted.
for arg in sorted(set(stub.kwonly) - set(runtime.kwonly)):
yield 'runtime does not have argument "{}"'.format(arg)
for arg in sorted(set(runtime.kwonly) - set(stub.kwonly)):
if arg in set(stub_arg.variable.name for stub_arg in stub.pos):
# Don't report this if we've reported it before
if len(stub.pos) > len(runtime.pos) and runtime.varpos is not None:
yield 'stub argument "{}" is not keyword-only'.format(arg)
else:
yield 'stub does not have argument "{}"'.format(arg)
# Checks involving **kwargs
if stub.varkw is None and runtime.varkw is not None:
# As mentioned above, don't enforce that the stub takes **kwargs.
# Also check against positional parameters, to avoid a nitpicky message when an argument
# isn't marked as keyword-only
stub_pos_names = set(stub_arg.variable.name for stub_arg in stub.pos)
# Ideally we'd do a strict subset check, but in practice the errors from that aren't useful
if not set(runtime.kwonly).issubset(set(stub.kwonly) | stub_pos_names):
yield 'stub does not have **kwargs argument "{}"'.format(runtime.varkw.name)
if stub.varkw is not None and runtime.varkw is None:
yield 'runtime does not have **kwargs argument "{}"'.format(stub.varkw.variable.name)
@verify.register(nodes.FuncItem)
def verify_funcitem(
stub: nodes.FuncItem, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if (
not isinstance(runtime, (types.FunctionType, types.BuiltinFunctionType))
and not isinstance(runtime, (types.MethodType, types.BuiltinMethodType))
and not inspect.ismethoddescriptor(runtime)
):
yield Error(object_path, "is not a function", stub, runtime)
if not callable(runtime):
return
for message in _verify_static_class_methods(stub, runtime, object_path):
yield Error(object_path, "is inconsistent, " + message, stub, runtime)
try:
signature = inspect.signature(runtime)
except (ValueError, RuntimeError):
# inspect.signature throws sometimes
# catch RuntimeError because of https://bugs.python.org/issue39504
return
stub_sig = Signature.from_funcitem(stub)
runtime_sig = Signature.from_inspect_signature(signature)
for message in _verify_signature(stub_sig, runtime_sig, function_name=stub.name):
yield Error(
object_path,
"is inconsistent, " + message,
stub,
runtime,
runtime_desc="def " + str(signature),
)
@verify.register(Missing)
def verify_none(
stub: Missing, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
yield Error(object_path, "is not present in stub", stub, runtime)
@verify.register(nodes.Var)
def verify_var(
stub: nodes.Var, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
# Don't always yield an error here, because we often can't find instance variables
if len(object_path) <= 2:
yield Error(object_path, "is not present at runtime", stub, runtime)
return
runtime_type = get_mypy_type_of_runtime_value(runtime)
if (
runtime_type is not None
and stub.type is not None
and not is_subtype_helper(runtime_type, stub.type)
):
should_error = True
# Avoid errors when defining enums, since runtime_type is the enum itself, but we'd
# annotate it with the type of runtime.value
if isinstance(runtime, enum.Enum):
runtime_type = get_mypy_type_of_runtime_value(runtime.value)
if runtime_type is not None and is_subtype_helper(runtime_type, stub.type):
should_error = False
if should_error:
yield Error(
object_path,
"variable differs from runtime type {}".format(runtime_type),
stub,
runtime,
)
@verify.register(nodes.OverloadedFuncDef)
def verify_overloadedfuncdef(
stub: nodes.OverloadedFuncDef, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if stub.is_property:
# We get here in cases of overloads from property.setter
return
if (
not isinstance(runtime, (types.FunctionType, types.BuiltinFunctionType))
and not isinstance(runtime, (types.MethodType, types.BuiltinMethodType))
and not inspect.ismethoddescriptor(runtime)
):
yield Error(object_path, "is not a function", stub, runtime)
if not callable(runtime):
return
for message in _verify_static_class_methods(stub, runtime, object_path):
yield Error(object_path, "is inconsistent, " + message, stub, runtime)
try:
signature = inspect.signature(runtime)
except ValueError:
return
stub_sig = Signature.from_overloadedfuncdef(stub)
runtime_sig = Signature.from_inspect_signature(signature)
for message in _verify_signature(stub_sig, runtime_sig, function_name=stub.name):
# TODO: This is a little hacky, but the addition here is super useful
if "has a default value of type" in message:
message += (
". This is often caused by overloads failing to account for explicitly passing "
"in the default value."
)
yield Error(
object_path,
"is inconsistent, " + message,
stub,
runtime,
stub_desc=str(stub.type) + "\nInferred signature: {}".format(stub_sig),
runtime_desc="def " + str(signature),
)
@verify.register(nodes.TypeVarExpr)
def verify_typevarexpr(
stub: nodes.TypeVarExpr, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if False:
yield None
def _verify_property(stub: nodes.Decorator, runtime: Any) -> Iterator[str]:
assert stub.func.is_property
if isinstance(runtime, property):
return
if inspect.isdatadescriptor(runtime):
# It's enough like a property...
return
# Sometimes attributes pretend to be properties, for instance, to express that they
# are read only. So allowlist if runtime_type matches the return type of stub.
runtime_type = get_mypy_type_of_runtime_value(runtime)
func_type = (
stub.func.type.ret_type if isinstance(stub.func.type, mypy.types.CallableType) else None
)
if (
runtime_type is not None
and func_type is not None
and is_subtype_helper(runtime_type, func_type)
):
return
yield "is inconsistent, cannot reconcile @property on stub with runtime object"
def _resolve_funcitem_from_decorator(dec: nodes.OverloadPart) -> Optional[nodes.FuncItem]:
"""Returns a FuncItem that corresponds to the output of the decorator.
Returns None if we can't figure out what that would be. For convenience, this function also
accepts FuncItems.
"""
if isinstance(dec, nodes.FuncItem):
return dec
if dec.func.is_property:
return None
def apply_decorator_to_funcitem(
decorator: nodes.Expression, func: nodes.FuncItem
) -> Optional[nodes.FuncItem]:
if not isinstance(decorator, nodes.RefExpr):
return None
if decorator.fullname is None:
# Happens with namedtuple
return None
if decorator.fullname in (
"builtins.staticmethod",
"typing.overload",
"abc.abstractmethod",
):
return func
if decorator.fullname == "builtins.classmethod":
assert func.arguments[0].variable.name in ("cls", "metacls")
ret = copy.copy(func)
# Remove the cls argument, since it's not present in inspect.signature of classmethods
ret.arguments = ret.arguments[1:]
return ret
# Just give up on any other decorators. After excluding properties, we don't run into
# anything else when running on typeshed's stdlib.
return None
func = dec.func # type: nodes.FuncItem
for decorator in dec.original_decorators:
resulting_func = apply_decorator_to_funcitem(decorator, func)
if resulting_func is None:
return None
func = resulting_func
return func
@verify.register(nodes.Decorator)
def verify_decorator(
stub: nodes.Decorator, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if stub.func.is_property:
for message in _verify_property(stub, runtime):
yield Error(object_path, message, stub, runtime)
return
func = _resolve_funcitem_from_decorator(stub)
if func is not None:
yield from verify(func, runtime, object_path)
@verify.register(nodes.TypeAlias)
def verify_typealias(
stub: nodes.TypeAlias, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if False:
yield None
SPECIAL_DUNDERS = ("__init__", "__new__", "__call__", "__init_subclass__", "__class_getitem__")
def is_dunder(name: str, exclude_special: bool = False) -> bool:
"""Returns whether name is a dunder name.
:param exclude_special: Whether to return False for a couple special dunder methods.
"""
if exclude_special and name in SPECIAL_DUNDERS:
return False
return name.startswith("__") and name.endswith("__")
def is_subtype_helper(left: mypy.types.Type, right: mypy.types.Type) -> bool:
"""Checks whether ``left`` is a subtype of ``right``."""
left = mypy.types.get_proper_type(left)
right = mypy.types.get_proper_type(right)
if (
isinstance(left, mypy.types.LiteralType)
and isinstance(left.value, int)
and left.value in (0, 1)
and isinstance(right, mypy.types.Instance)
and right.type.fullname == "builtins.bool"
):
# Pretend Literal[0, 1] is a subtype of bool to avoid unhelpful errors.
return True
with mypy.state.strict_optional_set(True):
return mypy.subtypes.is_subtype(left, right)
def get_mypy_type_of_runtime_value(runtime: Any) -> Optional[mypy.types.Type]:
"""Returns a mypy type object representing the type of ``runtime``.
Returns None if we can't find something that works.
"""
if runtime is None:
return mypy.types.NoneType()
if isinstance(runtime, property):
# Give up on properties to avoid issues with things that are typed as attributes.
return None
def anytype() -> mypy.types.AnyType:
return mypy.types.AnyType(mypy.types.TypeOfAny.unannotated)
if isinstance(
runtime,
(types.FunctionType, types.BuiltinFunctionType,
types.MethodType, types.BuiltinMethodType)
):
builtins = get_stub("builtins")
assert builtins is not None
type_info = builtins.names["function"].node
assert isinstance(type_info, nodes.TypeInfo)
fallback = mypy.types.Instance(type_info, [anytype()])
try:
signature = inspect.signature(runtime)
arg_types = []
arg_kinds = []
arg_names = []
for arg in signature.parameters.values():
arg_types.append(anytype())
arg_names.append(
None if arg.kind == inspect.Parameter.POSITIONAL_ONLY else arg.name
)
has_default = arg.default == inspect.Parameter.empty
if arg.kind == inspect.Parameter.POSITIONAL_ONLY:
arg_kinds.append(nodes.ARG_POS if has_default else nodes.ARG_OPT)
elif arg.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:
arg_kinds.append(nodes.ARG_POS if has_default else nodes.ARG_OPT)
elif arg.kind == inspect.Parameter.KEYWORD_ONLY:
arg_kinds.append(nodes.ARG_NAMED if has_default else nodes.ARG_NAMED_OPT)
elif arg.kind == inspect.Parameter.VAR_POSITIONAL:
arg_kinds.append(nodes.ARG_STAR)
elif arg.kind == inspect.Parameter.VAR_KEYWORD:
arg_kinds.append(nodes.ARG_STAR2)
else:
raise AssertionError
except ValueError:
arg_types = [anytype(), anytype()]
arg_kinds = [nodes.ARG_STAR, nodes.ARG_STAR2]
arg_names = [None, None]
return mypy.types.CallableType(
arg_types,
arg_kinds,
arg_names,
ret_type=anytype(),
fallback=fallback,
is_ellipsis_args=True,
)
# Try and look up a stub for the runtime object
stub = get_stub(type(runtime).__module__)
if stub is None:
return None
type_name = type(runtime).__name__
if type_name not in stub.names:
return None
type_info = stub.names[type_name].node
if isinstance(type_info, nodes.Var):
return type_info.type
if not isinstance(type_info, nodes.TypeInfo):
return None
if isinstance(runtime, tuple):
# Special case tuples so we construct a valid mypy.types.TupleType
optional_items = [get_mypy_type_of_runtime_value(v) for v in runtime]
items = [(i if i is not None else anytype()) for i in optional_items]
fallback = mypy.types.Instance(type_info, [anytype()])
return mypy.types.TupleType(items, fallback)
fallback = mypy.types.Instance(type_info, [anytype() for _ in type_info.type_vars])
try:
# Literals are supposed to be only bool, int, str, bytes or enums, but this seems to work
# well (when not using mypyc, for which bytes and enums are also problematic).
return mypy.types.LiteralType(
value=runtime,
fallback=fallback,
)
except TypeError:
# Ask for forgiveness if we're using mypyc.
return fallback
_all_stubs = {} # type: Dict[str, nodes.MypyFile]
# MASKED: build_stubs function (lines 1005-1060)
def get_stub(module: str) -> Optional[nodes.MypyFile]:
"""Returns a stub object for the given module, if we've built one."""
return _all_stubs.get(module)
def get_typeshed_stdlib_modules(custom_typeshed_dir: Optional[str]) -> List[str]:
"""Returns a list of stdlib modules in typeshed (for current Python version)."""
stdlib_py_versions = mypy.modulefinder.load_stdlib_py_versions(custom_typeshed_dir)
packages = set()
# Typeshed doesn't cover Python 3.5.
if sys.version_info < (3, 6):
version_info = (3, 6)
else:
version_info = sys.version_info[0:2]
for module, versions in stdlib_py_versions.items():
minver, maxver = versions
if version_info >= minver and (maxver is None or version_info <= maxver):
packages.add(module)
if custom_typeshed_dir:
typeshed_dir = Path(custom_typeshed_dir)
else:
typeshed_dir = Path(mypy.build.default_data_dir()) / "typeshed"
stdlib_dir = typeshed_dir / "stdlib"
modules = []
for path in stdlib_dir.rglob("*.pyi"):
if path.stem == "__init__":
path = path.parent
module = ".".join(path.relative_to(stdlib_dir).parts[:-1] + (path.stem,))
if module.split(".")[0] in packages:
modules.append(module)
return sorted(modules)
def get_allowlist_entries(allowlist_file: str) -> Iterator[str]:
def strip_comments(s: str) -> str:
try:
return s[: s.index("#")].strip()
except ValueError:
return s.strip()
with open(allowlist_file) as f:
for line in f.readlines():
entry = strip_comments(line)
if entry:
yield entry
def test_stubs(args: argparse.Namespace, use_builtins_fixtures: bool = False) -> int:
"""This is stubtest! It's time to test the stubs!"""
# Load the allowlist. This is a series of strings corresponding to Error.object_desc
# Values in the dict will store whether we used the allowlist entry or not.
allowlist = {
entry: False
for allowlist_file in args.allowlist
for entry in get_allowlist_entries(allowlist_file)
}
allowlist_regexes = {entry: re.compile(entry) for entry in allowlist}
# If we need to generate an allowlist, we store Error.object_desc for each error here.
generated_allowlist = set()
modules = args.modules
if args.check_typeshed:
assert not args.modules, "Cannot pass both --check-typeshed and a list of modules"
modules = get_typeshed_stdlib_modules(args.custom_typeshed_dir)
annoying_modules = {"antigravity", "this"}
modules = [m for m in modules if m not in annoying_modules]
assert modules, "No modules to check"
options = Options()
options.incremental = False
options.custom_typeshed_dir = args.custom_typeshed_dir
options.config_file = args.mypy_config_file
options.use_builtins_fixtures = use_builtins_fixtures
if options.config_file:
def set_strict_flags() -> None: # not needed yet
return
parse_config_file(options, set_strict_flags, options.config_file, sys.stdout, sys.stderr)
try:
modules = build_stubs(modules, options, find_submodules=not args.check_typeshed)
except RuntimeError:
return 1
exit_code = 0
for module in modules:
for error in test_module(module):
# Filter errors
if args.ignore_missing_stub and error.is_missing_stub():
continue
if args.ignore_positional_only and error.is_positional_only_related():
continue
if error.object_desc in allowlist:
allowlist[error.object_desc] = True
continue
is_allowlisted = False
for w in allowlist:
if allowlist_regexes[w].fullmatch(error.object_desc):
allowlist[w] = True
is_allowlisted = True
break
if is_allowlisted:
continue
# We have errors, so change exit code, and output whatever necessary
exit_code = 1
if args.generate_allowlist:
generated_allowlist.add(error.object_desc)
continue
print(error.get_description(concise=args.concise))
# Print unused allowlist entries
if not args.ignore_unused_allowlist:
for w in allowlist:
# Don't consider an entry unused if it regex-matches the empty string
# This lets us allowlist errors that don't manifest at all on some systems
if not allowlist[w] and not allowlist_regexes[w].fullmatch(""):
exit_code = 1
print("note: unused allowlist entry {}".format(w))
# Print the generated allowlist
if args.generate_allowlist:
for e in sorted(generated_allowlist):
print(e)
exit_code = 0
return exit_code
def parse_options(args: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Compares stubs to objects introspected from the runtime."
)
parser.add_argument("modules", nargs="*", help="Modules to test")
parser.add_argument("--concise", action="store_true", help="Make output concise")
parser.add_argument(
"--ignore-missing-stub",
action="store_true",
help="Ignore errors for stub missing things that are present at runtime",
)
parser.add_argument(
"--ignore-positional-only",
action="store_true",
help="Ignore errors for whether an argument should or shouldn't be positional-only",
)
parser.add_argument(
"--custom-typeshed-dir", metavar="DIR", help="Use the custom typeshed in DIR"
)
parser.add_argument(
"--check-typeshed", action="store_true", help="Check all stdlib modules in typeshed"
)
parser.add_argument(
"--allowlist",
"--whitelist",
action="append",
metavar="FILE",
default=[],
help=(
"Use file as an allowlist. Can be passed multiple times to combine multiple "
"allowlists. Allowlists can be created with --generate-allowlist"
),
)
parser.add_argument(
"--generate-allowlist",
"--generate-whitelist",
action="store_true",
help="Print an allowlist (to stdout) to be used with --allowlist",
)
parser.add_argument(
"--ignore-unused-allowlist",
"--ignore-unused-whitelist",
action="store_true",
help="Ignore unused allowlist entries",
)
config_group = parser.add_argument_group(
title='mypy config file',
description="Use a config file instead of command line arguments. "
"Plugins and mypy path are the only supported "
"configurations.",
)
config_group.add_argument(
'--mypy-config-file',
help=(
"An existing mypy configuration file, currently used by stubtest to help "
"determine mypy path and plugins"
),
)
return parser.parse_args(args)
def main() -> int:
mypy.util.check_python_version("stubtest")
return test_stubs(parse_options(sys.argv[1:]))
if __name__ == "__main__":
sys.exit(main())
|
def build_stubs(modules: List[str], options: Options, find_submodules: bool = False) -> List[str]:
"""Uses mypy to construct stub objects for the given modules.
This sets global state that ``get_stub`` can access.
Returns all modules we might want to check. If ``find_submodules`` is False, this is equal
to ``modules``.
:param modules: List of modules to build stubs for.
:param options: Mypy options for finding and building stubs.
:param find_submodules: Whether to attempt to find submodules of the given modules as well.
"""
data_dir = mypy.build.default_data_dir()
search_path = mypy.modulefinder.compute_search_paths([], options, data_dir)
find_module_cache = mypy.modulefinder.FindModuleCache(
search_path, fscache=None, options=options
)
all_modules = []
sources = []
for module in modules:
all_modules.append(module)
if not find_submodules:
module_path = find_module_cache.find_module(module)
if not isinstance(module_path, str):
# test_module will yield an error later when it can't find stubs
continue
sources.append(mypy.modulefinder.BuildSource(module_path, module, None))
else:
found_sources = find_module_cache.find_modules_recursive(module)
sources.extend(found_sources)
all_modules.extend(s.module for s in found_sources if s.module not in all_modules)
try:
res = mypy.build.build(sources=sources, options=options)
except mypy.errors.CompileError as e:
output = [
_style("error: ", color="red", bold=True),
"not checking stubs due to failed mypy compile:\n",
str(e),
]
print("".join(output))
raise RuntimeError from e
if res.errors:
output = [
_style("error: ", color="red", bold=True),
"not checking stubs due to mypy build errors:\n",
]
print("".join(output) + "\n".join(res.errors))
raise RuntimeError
global _all_stubs
_all_stubs = res.files
return all_modules
| 1,005 | 1,060 |
"""Tests for stubs.
Verify that various things in stubs are consistent with how things behave at runtime.
"""
import argparse
import copy
import enum
import importlib
import inspect
import re
import sys
import types
import warnings
from functools import singledispatch
from pathlib import Path
from typing import Any, Dict, Generic, Iterator, List, Optional, Tuple, TypeVar, Union, cast
from typing_extensions import Type
import mypy.build
import mypy.modulefinder
import mypy.types
from mypy import nodes
from mypy.config_parser import parse_config_file
from mypy.options import Options
from mypy.util import FancyFormatter
class Missing:
"""Marker object for things that are missing (from a stub or the runtime)."""
def __repr__(self) -> str:
return "MISSING"
MISSING = Missing()
T = TypeVar("T")
if sys.version_info >= (3, 5, 3):
MaybeMissing = Union[T, Missing]
else:
# work around a bug in 3.5.2 and earlier's typing.py
class MaybeMissingMeta(type):
def __getitem__(self, arg: Any) -> Any:
return Union[arg, Missing]
class MaybeMissing(metaclass=MaybeMissingMeta): # type: ignore
pass
_formatter = FancyFormatter(sys.stdout, sys.stderr, False)
def _style(message: str, **kwargs: Any) -> str:
"""Wrapper around mypy.util for fancy formatting."""
kwargs.setdefault("color", "none")
return _formatter.style(message, **kwargs)
class Error:
def __init__(
self,
object_path: List[str],
message: str,
stub_object: MaybeMissing[nodes.Node],
runtime_object: MaybeMissing[Any],
*,
stub_desc: Optional[str] = None,
runtime_desc: Optional[str] = None
) -> None:
"""Represents an error found by stubtest.
:param object_path: Location of the object with the error,
e.g. ``["module", "Class", "method"]``
:param message: Error message
:param stub_object: The mypy node representing the stub
:param runtime_object: Actual object obtained from the runtime
:param stub_desc: Specialised description for the stub object, should you wish
:param runtime_desc: Specialised description for the runtime object, should you wish
"""
self.object_desc = ".".join(object_path)
self.message = message
self.stub_object = stub_object
self.runtime_object = runtime_object
self.stub_desc = stub_desc or str(getattr(stub_object, "type", stub_object))
self.runtime_desc = runtime_desc or str(runtime_object)
def is_missing_stub(self) -> bool:
"""Whether or not the error is for something missing from the stub."""
return isinstance(self.stub_object, Missing)
def is_positional_only_related(self) -> bool:
"""Whether or not the error is for something being (or not being) positional-only."""
# TODO: This is hacky, use error codes or something more resilient
return "leading double underscore" in self.message
def get_description(self, concise: bool = False) -> str:
"""Returns a description of the error.
:param concise: Whether to return a concise, one-line description
"""
if concise:
return _style(self.object_desc, bold=True) + " " + self.message
stub_line = None
stub_file = None # type: None
if not isinstance(self.stub_object, Missing):
stub_line = self.stub_object.line
# TODO: Find a way of getting the stub file
stub_loc_str = ""
if stub_line:
stub_loc_str += " at line {}".format(stub_line)
if stub_file:
stub_loc_str += " in file {}".format(Path(stub_file))
runtime_line = None
runtime_file = None
if not isinstance(self.runtime_object, Missing):
try:
runtime_line = inspect.getsourcelines(self.runtime_object)[1]
except (OSError, TypeError):
pass
try:
runtime_file = inspect.getsourcefile(self.runtime_object)
except TypeError:
pass
runtime_loc_str = ""
if runtime_line:
runtime_loc_str += " at line {}".format(runtime_line)
if runtime_file:
runtime_loc_str += " in file {}".format(Path(runtime_file))
output = [
_style("error: ", color="red", bold=True),
_style(self.object_desc, bold=True),
" ",
self.message,
"\n",
"Stub:",
_style(stub_loc_str, dim=True),
"\n",
_style(self.stub_desc + "\n", color="blue", dim=True),
"Runtime:",
_style(runtime_loc_str, dim=True),
"\n",
_style(self.runtime_desc + "\n", color="blue", dim=True),
]
return "".join(output)
def test_module(module_name: str) -> Iterator[Error]:
"""Tests a given module's stub against introspecting it at runtime.
Requires the stub to have been built already, accomplished by a call to ``build_stubs``.
:param module_name: The module to test
"""
stub = get_stub(module_name)
if stub is None:
yield Error([module_name], "failed to find stubs", MISSING, None)
return
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
runtime = importlib.import_module(module_name)
except Exception as e:
yield Error([module_name], "failed to import: {}".format(e), stub, MISSING)
return
with warnings.catch_warnings():
warnings.simplefilter("ignore")
yield from verify(stub, runtime, [module_name])
@singledispatch
def verify(
stub: nodes.Node, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
"""Entry point for comparing a stub to a runtime object.
We use single dispatch based on the type of ``stub``.
:param stub: The mypy node representing a part of the stub
:param runtime: The runtime object corresponding to ``stub``
"""
yield Error(object_path, "is an unknown mypy node", stub, runtime)
@verify.register(nodes.MypyFile)
def verify_mypyfile(
stub: nodes.MypyFile, runtime: MaybeMissing[types.ModuleType], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if not isinstance(runtime, types.ModuleType):
yield Error(object_path, "is not a module", stub, runtime)
return
# Check things in the stub that are public
to_check = set(
m
for m, o in stub.names.items()
if o.module_public and (not m.startswith("_") or hasattr(runtime, m))
)
runtime_public_contents = [
m
for m in dir(runtime)
if not m.startswith("_")
# Ensure that the object's module is `runtime`, e.g. so that we don't pick up reexported
# modules and infinitely recurse. Unfortunately, there's no way to detect an explicit
# reexport missing from the stubs (that isn't specified in __all__)
and getattr(getattr(runtime, m), "__module__", None) == runtime.__name__
]
# Check all things declared in module's __all__, falling back to runtime_public_contents
to_check.update(getattr(runtime, "__all__", runtime_public_contents))
to_check.difference_update({"__file__", "__doc__", "__name__", "__builtins__", "__package__"})
for entry in sorted(to_check):
yield from verify(
stub.names[entry].node if entry in stub.names else MISSING,
getattr(runtime, entry, MISSING),
object_path + [entry],
)
@verify.register(nodes.TypeInfo)
def verify_typeinfo(
stub: nodes.TypeInfo, runtime: MaybeMissing[Type[Any]], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime, stub_desc=repr(stub))
return
if not isinstance(runtime, type):
yield Error(object_path, "is not a type", stub, runtime, stub_desc=repr(stub))
return
# Check everything already defined in the stub
to_check = set(stub.names)
# There's a reasonable case to be made that we should always check all dunders, but it's
# currently quite noisy. We could turn this into a denylist instead of an allowlist.
to_check.update(
# cast to workaround mypyc complaints
m for m in cast(Any, vars)(runtime) if not m.startswith("_") or m in SPECIAL_DUNDERS
)
for entry in sorted(to_check):
mangled_entry = entry
if entry.startswith("__") and not entry.endswith("__"):
mangled_entry = "_{}{}".format(stub.name, entry)
yield from verify(
next((t.names[entry].node for t in stub.mro if entry in t.names), MISSING),
getattr(runtime, mangled_entry, MISSING),
object_path + [entry],
)
def _verify_static_class_methods(
stub: nodes.FuncBase, runtime: Any, object_path: List[str]
) -> Iterator[str]:
if stub.name in ("__new__", "__init_subclass__", "__class_getitem__"):
# Special cased by Python, so don't bother checking
return
if inspect.isbuiltin(runtime):
# The isinstance checks don't work reliably for builtins, e.g. datetime.datetime.now, so do
# something a little hacky that seems to work well
probably_class_method = isinstance(getattr(runtime, "__self__", None), type)
if probably_class_method and not stub.is_class:
yield "runtime is a classmethod but stub is not"
if not probably_class_method and stub.is_class:
yield "stub is a classmethod but runtime is not"
return
# Look the object up statically, to avoid binding by the descriptor protocol
static_runtime = importlib.import_module(object_path[0])
for entry in object_path[1:]:
try:
static_runtime = inspect.getattr_static(static_runtime, entry)
except AttributeError:
# This can happen with mangled names, ignore for now.
# TODO: pass more information about ancestors of nodes/objects to verify, so we don't
# have to do this hacky lookup. Would be useful in a couple other places too.
return
if isinstance(static_runtime, classmethod) and not stub.is_class:
yield "runtime is a classmethod but stub is not"
if not isinstance(static_runtime, classmethod) and stub.is_class:
yield "stub is a classmethod but runtime is not"
if isinstance(static_runtime, staticmethod) and not stub.is_static:
yield "runtime is a staticmethod but stub is not"
if not isinstance(static_runtime, staticmethod) and stub.is_static:
yield "stub is a staticmethod but runtime is not"
def _verify_arg_name(
stub_arg: nodes.Argument, runtime_arg: inspect.Parameter, function_name: str
) -> Iterator[str]:
"""Checks whether argument names match."""
# Ignore exact names for most dunder methods
if is_dunder(function_name, exclude_special=True):
return
def strip_prefix(s: str, prefix: str) -> str:
return s[len(prefix):] if s.startswith(prefix) else s
if strip_prefix(stub_arg.variable.name, "__") == runtime_arg.name:
return
def names_approx_match(a: str, b: str) -> bool:
a = a.strip("_")
b = b.strip("_")
return a.startswith(b) or b.startswith(a) or len(a) == 1 or len(b) == 1
# Be more permissive about names matching for positional-only arguments
if runtime_arg.kind == inspect.Parameter.POSITIONAL_ONLY and names_approx_match(
stub_arg.variable.name, runtime_arg.name
):
return
# This comes up with namedtuples, so ignore
if stub_arg.variable.name == "_self":
return
yield (
'stub argument "{}" differs from runtime argument "{}"'.format(
stub_arg.variable.name, runtime_arg.name
)
)
def _verify_arg_default_value(
stub_arg: nodes.Argument, runtime_arg: inspect.Parameter
) -> Iterator[str]:
"""Checks whether argument default values are compatible."""
if runtime_arg.default != inspect.Parameter.empty:
if stub_arg.kind not in (nodes.ARG_OPT, nodes.ARG_NAMED_OPT):
yield (
'runtime argument "{}" has a default value but stub argument does not'.format(
runtime_arg.name
)
)
else:
runtime_type = get_mypy_type_of_runtime_value(runtime_arg.default)
# Fallback to the type annotation type if var type is missing. The type annotation
# is an UnboundType, but I don't know enough to know what the pros and cons here are.
# UnboundTypes have ugly question marks following them, so default to var type.
# Note we do this same fallback when constructing signatures in from_overloadedfuncdef
stub_type = stub_arg.variable.type or stub_arg.type_annotation
if isinstance(stub_type, mypy.types.TypeVarType):
stub_type = stub_type.upper_bound
if (
runtime_type is not None
and stub_type is not None
# Avoid false positives for marker objects
and type(runtime_arg.default) != object
and not is_subtype_helper(runtime_type, stub_type)
):
yield (
'runtime argument "{}" has a default value of type {}, '
"which is incompatible with stub argument type {}".format(
runtime_arg.name, runtime_type, stub_type
)
)
else:
if stub_arg.kind in (nodes.ARG_OPT, nodes.ARG_NAMED_OPT):
yield (
'stub argument "{}" has a default value but runtime argument does not'.format(
stub_arg.variable.name
)
)
def maybe_strip_cls(name: str, args: List[nodes.Argument]) -> List[nodes.Argument]:
if name in ("__init_subclass__", "__class_getitem__"):
# These are implicitly classmethods. If the stub chooses not to have @classmethod, we
# should remove the cls argument
if args[0].variable.name == "cls":
return args[1:]
return args
class Signature(Generic[T]):
def __init__(self) -> None:
self.pos = [] # type: List[T]
self.kwonly = {} # type: Dict[str, T]
self.varpos = None # type: Optional[T]
self.varkw = None # type: Optional[T]
def __str__(self) -> str:
def get_name(arg: Any) -> str:
if isinstance(arg, inspect.Parameter):
return arg.name
if isinstance(arg, nodes.Argument):
return arg.variable.name
raise AssertionError
def get_type(arg: Any) -> Optional[str]:
if isinstance(arg, inspect.Parameter):
return None
if isinstance(arg, nodes.Argument):
return str(arg.variable.type or arg.type_annotation)
raise AssertionError
def has_default(arg: Any) -> bool:
if isinstance(arg, inspect.Parameter):
return arg.default != inspect.Parameter.empty
if isinstance(arg, nodes.Argument):
return arg.kind in (nodes.ARG_OPT, nodes.ARG_NAMED_OPT)
raise AssertionError
def get_desc(arg: Any) -> str:
arg_type = get_type(arg)
return (
get_name(arg)
+ (": {}".format(arg_type) if arg_type else "")
+ (" = ..." if has_default(arg) else "")
)
kw_only = sorted(self.kwonly.values(), key=lambda a: (has_default(a), get_name(a)))
ret = "def ("
ret += ", ".join(
[get_desc(arg) for arg in self.pos]
+ (["*" + get_name(self.varpos)] if self.varpos else (["*"] if self.kwonly else []))
+ [get_desc(arg) for arg in kw_only]
+ (["**" + get_name(self.varkw)] if self.varkw else [])
)
ret += ")"
return ret
@staticmethod
def from_funcitem(stub: nodes.FuncItem) -> "Signature[nodes.Argument]":
stub_sig = Signature() # type: Signature[nodes.Argument]
stub_args = maybe_strip_cls(stub.name, stub.arguments)
for stub_arg in stub_args:
if stub_arg.kind in (nodes.ARG_POS, nodes.ARG_OPT):
stub_sig.pos.append(stub_arg)
elif stub_arg.kind in (nodes.ARG_NAMED, nodes.ARG_NAMED_OPT):
stub_sig.kwonly[stub_arg.variable.name] = stub_arg
elif stub_arg.kind == nodes.ARG_STAR:
stub_sig.varpos = stub_arg
elif stub_arg.kind == nodes.ARG_STAR2:
stub_sig.varkw = stub_arg
else:
raise AssertionError
return stub_sig
@staticmethod
def from_inspect_signature(signature: inspect.Signature) -> "Signature[inspect.Parameter]":
runtime_sig = Signature() # type: Signature[inspect.Parameter]
for runtime_arg in signature.parameters.values():
if runtime_arg.kind in (
inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD,
):
runtime_sig.pos.append(runtime_arg)
elif runtime_arg.kind == inspect.Parameter.KEYWORD_ONLY:
runtime_sig.kwonly[runtime_arg.name] = runtime_arg
elif runtime_arg.kind == inspect.Parameter.VAR_POSITIONAL:
runtime_sig.varpos = runtime_arg
elif runtime_arg.kind == inspect.Parameter.VAR_KEYWORD:
runtime_sig.varkw = runtime_arg
else:
raise AssertionError
return runtime_sig
@staticmethod
def from_overloadedfuncdef(stub: nodes.OverloadedFuncDef) -> "Signature[nodes.Argument]":
"""Returns a Signature from an OverloadedFuncDef.
If life were simple, to verify_overloadedfuncdef, we'd just verify_funcitem for each of its
items. Unfortunately, life isn't simple and overloads are pretty deceitful. So instead, we
try and combine the overload's items into a single signature that is compatible with any
lies it might try to tell.
"""
# For most dunder methods, just assume all args are positional-only
assume_positional_only = is_dunder(stub.name, exclude_special=True)
all_args = {} # type: Dict[str, List[Tuple[nodes.Argument, int]]]
for func in map(_resolve_funcitem_from_decorator, stub.items):
assert func is not None
args = maybe_strip_cls(stub.name, func.arguments)
for index, arg in enumerate(args):
# For positional-only args, we allow overloads to have different names for the same
# argument. To accomplish this, we just make up a fake index-based name.
name = (
"__{}".format(index)
if arg.variable.name.startswith("__") or assume_positional_only
else arg.variable.name
)
all_args.setdefault(name, []).append((arg, index))
def get_position(arg_name: str) -> int:
# We just need this to return the positional args in the correct order.
return max(index for _, index in all_args[arg_name])
def get_type(arg_name: str) -> mypy.types.ProperType:
with mypy.state.strict_optional_set(True):
all_types = [
arg.variable.type or arg.type_annotation for arg, _ in all_args[arg_name]
]
return mypy.typeops.make_simplified_union([t for t in all_types if t])
def get_kind(arg_name: str) -> int:
kinds = {arg.kind for arg, _ in all_args[arg_name]}
if nodes.ARG_STAR in kinds:
return nodes.ARG_STAR
if nodes.ARG_STAR2 in kinds:
return nodes.ARG_STAR2
# The logic here is based on two tenets:
# 1) If an arg is ever optional (or unspecified), it is optional
# 2) If an arg is ever positional, it is positional
is_opt = (
len(all_args[arg_name]) < len(stub.items)
or nodes.ARG_OPT in kinds
or nodes.ARG_NAMED_OPT in kinds
)
is_pos = nodes.ARG_OPT in kinds or nodes.ARG_POS in kinds
if is_opt:
return nodes.ARG_OPT if is_pos else nodes.ARG_NAMED_OPT
return nodes.ARG_POS if is_pos else nodes.ARG_NAMED
sig = Signature() # type: Signature[nodes.Argument]
for arg_name in sorted(all_args, key=get_position):
# example_arg_name gives us a real name (in case we had a fake index-based name)
example_arg_name = all_args[arg_name][0][0].variable.name
arg = nodes.Argument(
nodes.Var(example_arg_name, get_type(arg_name)),
type_annotation=None,
initializer=None,
kind=get_kind(arg_name),
)
if arg.kind in (nodes.ARG_POS, nodes.ARG_OPT):
sig.pos.append(arg)
elif arg.kind in (nodes.ARG_NAMED, nodes.ARG_NAMED_OPT):
sig.kwonly[arg.variable.name] = arg
elif arg.kind == nodes.ARG_STAR:
sig.varpos = arg
elif arg.kind == nodes.ARG_STAR2:
sig.varkw = arg
else:
raise AssertionError
return sig
def _verify_signature(
stub: Signature[nodes.Argument], runtime: Signature[inspect.Parameter], function_name: str
) -> Iterator[str]:
# Check positional arguments match up
for stub_arg, runtime_arg in zip(stub.pos, runtime.pos):
yield from _verify_arg_name(stub_arg, runtime_arg, function_name)
yield from _verify_arg_default_value(stub_arg, runtime_arg)
if (
runtime_arg.kind == inspect.Parameter.POSITIONAL_ONLY
and not stub_arg.variable.name.startswith("__")
and not stub_arg.variable.name.strip("_") == "self"
and not is_dunder(function_name, exclude_special=True) # noisy for dunder methods
):
yield (
'stub argument "{}" should be positional-only '
'(rename with a leading double underscore, i.e. "__{}")'.format(
stub_arg.variable.name, runtime_arg.name
)
)
if (
runtime_arg.kind != inspect.Parameter.POSITIONAL_ONLY
and stub_arg.variable.name.startswith("__")
):
yield (
'stub argument "{}" should be positional or keyword '
"(remove leading double underscore)".format(stub_arg.variable.name)
)
# Check unmatched positional args
if len(stub.pos) > len(runtime.pos):
# There are cases where the stub exhaustively lists out the extra parameters the function
# would take through *args. Hence, a) we can't check that the runtime actually takes those
# parameters and b) below, we don't enforce that the stub takes *args, since runtime logic
# may prevent those arguments from actually being accepted.
if runtime.varpos is None:
for stub_arg in stub.pos[len(runtime.pos):]:
# If the variable is in runtime.kwonly, it's just mislabelled as not a
# keyword-only argument
if stub_arg.variable.name not in runtime.kwonly:
yield 'runtime does not have argument "{}"'.format(stub_arg.variable.name)
else:
yield 'stub argument "{}" is not keyword-only'.format(stub_arg.variable.name)
if stub.varpos is not None:
yield 'runtime does not have *args argument "{}"'.format(stub.varpos.variable.name)
elif len(stub.pos) < len(runtime.pos):
for runtime_arg in runtime.pos[len(stub.pos):]:
if runtime_arg.name not in stub.kwonly:
yield 'stub does not have argument "{}"'.format(runtime_arg.name)
else:
yield 'runtime argument "{}" is not keyword-only'.format(runtime_arg.name)
# Checks involving *args
if len(stub.pos) <= len(runtime.pos) or runtime.varpos is None:
if stub.varpos is None and runtime.varpos is not None:
yield 'stub does not have *args argument "{}"'.format(runtime.varpos.name)
if stub.varpos is not None and runtime.varpos is None:
yield 'runtime does not have *args argument "{}"'.format(stub.varpos.variable.name)
# Check keyword-only args
for arg in sorted(set(stub.kwonly) & set(runtime.kwonly)):
stub_arg, runtime_arg = stub.kwonly[arg], runtime.kwonly[arg]
yield from _verify_arg_name(stub_arg, runtime_arg, function_name)
yield from _verify_arg_default_value(stub_arg, runtime_arg)
# Check unmatched keyword-only args
if runtime.varkw is None or not set(runtime.kwonly).issubset(set(stub.kwonly)):
# There are cases where the stub exhaustively lists out the extra parameters the function
# would take through *kwargs. Hence, a) we only check if the runtime actually takes those
# parameters when the above condition holds and b) below, we don't enforce that the stub
# takes *kwargs, since runtime logic may prevent additional arguments from actually being
# accepted.
for arg in sorted(set(stub.kwonly) - set(runtime.kwonly)):
yield 'runtime does not have argument "{}"'.format(arg)
for arg in sorted(set(runtime.kwonly) - set(stub.kwonly)):
if arg in set(stub_arg.variable.name for stub_arg in stub.pos):
# Don't report this if we've reported it before
if len(stub.pos) > len(runtime.pos) and runtime.varpos is not None:
yield 'stub argument "{}" is not keyword-only'.format(arg)
else:
yield 'stub does not have argument "{}"'.format(arg)
# Checks involving **kwargs
if stub.varkw is None and runtime.varkw is not None:
# As mentioned above, don't enforce that the stub takes **kwargs.
# Also check against positional parameters, to avoid a nitpicky message when an argument
# isn't marked as keyword-only
stub_pos_names = set(stub_arg.variable.name for stub_arg in stub.pos)
# Ideally we'd do a strict subset check, but in practice the errors from that aren't useful
if not set(runtime.kwonly).issubset(set(stub.kwonly) | stub_pos_names):
yield 'stub does not have **kwargs argument "{}"'.format(runtime.varkw.name)
if stub.varkw is not None and runtime.varkw is None:
yield 'runtime does not have **kwargs argument "{}"'.format(stub.varkw.variable.name)
@verify.register(nodes.FuncItem)
def verify_funcitem(
stub: nodes.FuncItem, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if (
not isinstance(runtime, (types.FunctionType, types.BuiltinFunctionType))
and not isinstance(runtime, (types.MethodType, types.BuiltinMethodType))
and not inspect.ismethoddescriptor(runtime)
):
yield Error(object_path, "is not a function", stub, runtime)
if not callable(runtime):
return
for message in _verify_static_class_methods(stub, runtime, object_path):
yield Error(object_path, "is inconsistent, " + message, stub, runtime)
try:
signature = inspect.signature(runtime)
except (ValueError, RuntimeError):
# inspect.signature throws sometimes
# catch RuntimeError because of https://bugs.python.org/issue39504
return
stub_sig = Signature.from_funcitem(stub)
runtime_sig = Signature.from_inspect_signature(signature)
for message in _verify_signature(stub_sig, runtime_sig, function_name=stub.name):
yield Error(
object_path,
"is inconsistent, " + message,
stub,
runtime,
runtime_desc="def " + str(signature),
)
@verify.register(Missing)
def verify_none(
stub: Missing, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
yield Error(object_path, "is not present in stub", stub, runtime)
@verify.register(nodes.Var)
def verify_var(
stub: nodes.Var, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
# Don't always yield an error here, because we often can't find instance variables
if len(object_path) <= 2:
yield Error(object_path, "is not present at runtime", stub, runtime)
return
runtime_type = get_mypy_type_of_runtime_value(runtime)
if (
runtime_type is not None
and stub.type is not None
and not is_subtype_helper(runtime_type, stub.type)
):
should_error = True
# Avoid errors when defining enums, since runtime_type is the enum itself, but we'd
# annotate it with the type of runtime.value
if isinstance(runtime, enum.Enum):
runtime_type = get_mypy_type_of_runtime_value(runtime.value)
if runtime_type is not None and is_subtype_helper(runtime_type, stub.type):
should_error = False
if should_error:
yield Error(
object_path,
"variable differs from runtime type {}".format(runtime_type),
stub,
runtime,
)
@verify.register(nodes.OverloadedFuncDef)
def verify_overloadedfuncdef(
stub: nodes.OverloadedFuncDef, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if stub.is_property:
# We get here in cases of overloads from property.setter
return
if (
not isinstance(runtime, (types.FunctionType, types.BuiltinFunctionType))
and not isinstance(runtime, (types.MethodType, types.BuiltinMethodType))
and not inspect.ismethoddescriptor(runtime)
):
yield Error(object_path, "is not a function", stub, runtime)
if not callable(runtime):
return
for message in _verify_static_class_methods(stub, runtime, object_path):
yield Error(object_path, "is inconsistent, " + message, stub, runtime)
try:
signature = inspect.signature(runtime)
except ValueError:
return
stub_sig = Signature.from_overloadedfuncdef(stub)
runtime_sig = Signature.from_inspect_signature(signature)
for message in _verify_signature(stub_sig, runtime_sig, function_name=stub.name):
# TODO: This is a little hacky, but the addition here is super useful
if "has a default value of type" in message:
message += (
". This is often caused by overloads failing to account for explicitly passing "
"in the default value."
)
yield Error(
object_path,
"is inconsistent, " + message,
stub,
runtime,
stub_desc=str(stub.type) + "\nInferred signature: {}".format(stub_sig),
runtime_desc="def " + str(signature),
)
@verify.register(nodes.TypeVarExpr)
def verify_typevarexpr(
stub: nodes.TypeVarExpr, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if False:
yield None
def _verify_property(stub: nodes.Decorator, runtime: Any) -> Iterator[str]:
assert stub.func.is_property
if isinstance(runtime, property):
return
if inspect.isdatadescriptor(runtime):
# It's enough like a property...
return
# Sometimes attributes pretend to be properties, for instance, to express that they
# are read only. So allowlist if runtime_type matches the return type of stub.
runtime_type = get_mypy_type_of_runtime_value(runtime)
func_type = (
stub.func.type.ret_type if isinstance(stub.func.type, mypy.types.CallableType) else None
)
if (
runtime_type is not None
and func_type is not None
and is_subtype_helper(runtime_type, func_type)
):
return
yield "is inconsistent, cannot reconcile @property on stub with runtime object"
def _resolve_funcitem_from_decorator(dec: nodes.OverloadPart) -> Optional[nodes.FuncItem]:
"""Returns a FuncItem that corresponds to the output of the decorator.
Returns None if we can't figure out what that would be. For convenience, this function also
accepts FuncItems.
"""
if isinstance(dec, nodes.FuncItem):
return dec
if dec.func.is_property:
return None
def apply_decorator_to_funcitem(
decorator: nodes.Expression, func: nodes.FuncItem
) -> Optional[nodes.FuncItem]:
if not isinstance(decorator, nodes.RefExpr):
return None
if decorator.fullname is None:
# Happens with namedtuple
return None
if decorator.fullname in (
"builtins.staticmethod",
"typing.overload",
"abc.abstractmethod",
):
return func
if decorator.fullname == "builtins.classmethod":
assert func.arguments[0].variable.name in ("cls", "metacls")
ret = copy.copy(func)
# Remove the cls argument, since it's not present in inspect.signature of classmethods
ret.arguments = ret.arguments[1:]
return ret
# Just give up on any other decorators. After excluding properties, we don't run into
# anything else when running on typeshed's stdlib.
return None
func = dec.func # type: nodes.FuncItem
for decorator in dec.original_decorators:
resulting_func = apply_decorator_to_funcitem(decorator, func)
if resulting_func is None:
return None
func = resulting_func
return func
@verify.register(nodes.Decorator)
def verify_decorator(
stub: nodes.Decorator, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if stub.func.is_property:
for message in _verify_property(stub, runtime):
yield Error(object_path, message, stub, runtime)
return
func = _resolve_funcitem_from_decorator(stub)
if func is not None:
yield from verify(func, runtime, object_path)
@verify.register(nodes.TypeAlias)
def verify_typealias(
stub: nodes.TypeAlias, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if False:
yield None
SPECIAL_DUNDERS = ("__init__", "__new__", "__call__", "__init_subclass__", "__class_getitem__")
def is_dunder(name: str, exclude_special: bool = False) -> bool:
"""Returns whether name is a dunder name.
:param exclude_special: Whether to return False for a couple special dunder methods.
"""
if exclude_special and name in SPECIAL_DUNDERS:
return False
return name.startswith("__") and name.endswith("__")
def is_subtype_helper(left: mypy.types.Type, right: mypy.types.Type) -> bool:
"""Checks whether ``left`` is a subtype of ``right``."""
left = mypy.types.get_proper_type(left)
right = mypy.types.get_proper_type(right)
if (
isinstance(left, mypy.types.LiteralType)
and isinstance(left.value, int)
and left.value in (0, 1)
and isinstance(right, mypy.types.Instance)
and right.type.fullname == "builtins.bool"
):
# Pretend Literal[0, 1] is a subtype of bool to avoid unhelpful errors.
return True
with mypy.state.strict_optional_set(True):
return mypy.subtypes.is_subtype(left, right)
def get_mypy_type_of_runtime_value(runtime: Any) -> Optional[mypy.types.Type]:
"""Returns a mypy type object representing the type of ``runtime``.
Returns None if we can't find something that works.
"""
if runtime is None:
return mypy.types.NoneType()
if isinstance(runtime, property):
# Give up on properties to avoid issues with things that are typed as attributes.
return None
def anytype() -> mypy.types.AnyType:
return mypy.types.AnyType(mypy.types.TypeOfAny.unannotated)
if isinstance(
runtime,
(types.FunctionType, types.BuiltinFunctionType,
types.MethodType, types.BuiltinMethodType)
):
builtins = get_stub("builtins")
assert builtins is not None
type_info = builtins.names["function"].node
assert isinstance(type_info, nodes.TypeInfo)
fallback = mypy.types.Instance(type_info, [anytype()])
try:
signature = inspect.signature(runtime)
arg_types = []
arg_kinds = []
arg_names = []
for arg in signature.parameters.values():
arg_types.append(anytype())
arg_names.append(
None if arg.kind == inspect.Parameter.POSITIONAL_ONLY else arg.name
)
has_default = arg.default == inspect.Parameter.empty
if arg.kind == inspect.Parameter.POSITIONAL_ONLY:
arg_kinds.append(nodes.ARG_POS if has_default else nodes.ARG_OPT)
elif arg.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:
arg_kinds.append(nodes.ARG_POS if has_default else nodes.ARG_OPT)
elif arg.kind == inspect.Parameter.KEYWORD_ONLY:
arg_kinds.append(nodes.ARG_NAMED if has_default else nodes.ARG_NAMED_OPT)
elif arg.kind == inspect.Parameter.VAR_POSITIONAL:
arg_kinds.append(nodes.ARG_STAR)
elif arg.kind == inspect.Parameter.VAR_KEYWORD:
arg_kinds.append(nodes.ARG_STAR2)
else:
raise AssertionError
except ValueError:
arg_types = [anytype(), anytype()]
arg_kinds = [nodes.ARG_STAR, nodes.ARG_STAR2]
arg_names = [None, None]
return mypy.types.CallableType(
arg_types,
arg_kinds,
arg_names,
ret_type=anytype(),
fallback=fallback,
is_ellipsis_args=True,
)
# Try and look up a stub for the runtime object
stub = get_stub(type(runtime).__module__)
if stub is None:
return None
type_name = type(runtime).__name__
if type_name not in stub.names:
return None
type_info = stub.names[type_name].node
if isinstance(type_info, nodes.Var):
return type_info.type
if not isinstance(type_info, nodes.TypeInfo):
return None
if isinstance(runtime, tuple):
# Special case tuples so we construct a valid mypy.types.TupleType
optional_items = [get_mypy_type_of_runtime_value(v) for v in runtime]
items = [(i if i is not None else anytype()) for i in optional_items]
fallback = mypy.types.Instance(type_info, [anytype()])
return mypy.types.TupleType(items, fallback)
fallback = mypy.types.Instance(type_info, [anytype() for _ in type_info.type_vars])
try:
# Literals are supposed to be only bool, int, str, bytes or enums, but this seems to work
# well (when not using mypyc, for which bytes and enums are also problematic).
return mypy.types.LiteralType(
value=runtime,
fallback=fallback,
)
except TypeError:
# Ask for forgiveness if we're using mypyc.
return fallback
_all_stubs = {} # type: Dict[str, nodes.MypyFile]
def build_stubs(modules: List[str], options: Options, find_submodules: bool = False) -> List[str]:
"""Uses mypy to construct stub objects for the given modules.
This sets global state that ``get_stub`` can access.
Returns all modules we might want to check. If ``find_submodules`` is False, this is equal
to ``modules``.
:param modules: List of modules to build stubs for.
:param options: Mypy options for finding and building stubs.
:param find_submodules: Whether to attempt to find submodules of the given modules as well.
"""
data_dir = mypy.build.default_data_dir()
search_path = mypy.modulefinder.compute_search_paths([], options, data_dir)
find_module_cache = mypy.modulefinder.FindModuleCache(
search_path, fscache=None, options=options
)
all_modules = []
sources = []
for module in modules:
all_modules.append(module)
if not find_submodules:
module_path = find_module_cache.find_module(module)
if not isinstance(module_path, str):
# test_module will yield an error later when it can't find stubs
continue
sources.append(mypy.modulefinder.BuildSource(module_path, module, None))
else:
found_sources = find_module_cache.find_modules_recursive(module)
sources.extend(found_sources)
all_modules.extend(s.module for s in found_sources if s.module not in all_modules)
try:
res = mypy.build.build(sources=sources, options=options)
except mypy.errors.CompileError as e:
output = [
_style("error: ", color="red", bold=True),
"not checking stubs due to failed mypy compile:\n",
str(e),
]
print("".join(output))
raise RuntimeError from e
if res.errors:
output = [
_style("error: ", color="red", bold=True),
"not checking stubs due to mypy build errors:\n",
]
print("".join(output) + "\n".join(res.errors))
raise RuntimeError
global _all_stubs
_all_stubs = res.files
return all_modules
def get_stub(module: str) -> Optional[nodes.MypyFile]:
"""Returns a stub object for the given module, if we've built one."""
return _all_stubs.get(module)
def get_typeshed_stdlib_modules(custom_typeshed_dir: Optional[str]) -> List[str]:
"""Returns a list of stdlib modules in typeshed (for current Python version)."""
stdlib_py_versions = mypy.modulefinder.load_stdlib_py_versions(custom_typeshed_dir)
packages = set()
# Typeshed doesn't cover Python 3.5.
if sys.version_info < (3, 6):
version_info = (3, 6)
else:
version_info = sys.version_info[0:2]
for module, versions in stdlib_py_versions.items():
minver, maxver = versions
if version_info >= minver and (maxver is None or version_info <= maxver):
packages.add(module)
if custom_typeshed_dir:
typeshed_dir = Path(custom_typeshed_dir)
else:
typeshed_dir = Path(mypy.build.default_data_dir()) / "typeshed"
stdlib_dir = typeshed_dir / "stdlib"
modules = []
for path in stdlib_dir.rglob("*.pyi"):
if path.stem == "__init__":
path = path.parent
module = ".".join(path.relative_to(stdlib_dir).parts[:-1] + (path.stem,))
if module.split(".")[0] in packages:
modules.append(module)
return sorted(modules)
def get_allowlist_entries(allowlist_file: str) -> Iterator[str]:
def strip_comments(s: str) -> str:
try:
return s[: s.index("#")].strip()
except ValueError:
return s.strip()
with open(allowlist_file) as f:
for line in f.readlines():
entry = strip_comments(line)
if entry:
yield entry
def test_stubs(args: argparse.Namespace, use_builtins_fixtures: bool = False) -> int:
"""This is stubtest! It's time to test the stubs!"""
# Load the allowlist. This is a series of strings corresponding to Error.object_desc
# Values in the dict will store whether we used the allowlist entry or not.
allowlist = {
entry: False
for allowlist_file in args.allowlist
for entry in get_allowlist_entries(allowlist_file)
}
allowlist_regexes = {entry: re.compile(entry) for entry in allowlist}
# If we need to generate an allowlist, we store Error.object_desc for each error here.
generated_allowlist = set()
modules = args.modules
if args.check_typeshed:
assert not args.modules, "Cannot pass both --check-typeshed and a list of modules"
modules = get_typeshed_stdlib_modules(args.custom_typeshed_dir)
annoying_modules = {"antigravity", "this"}
modules = [m for m in modules if m not in annoying_modules]
assert modules, "No modules to check"
options = Options()
options.incremental = False
options.custom_typeshed_dir = args.custom_typeshed_dir
options.config_file = args.mypy_config_file
options.use_builtins_fixtures = use_builtins_fixtures
if options.config_file:
def set_strict_flags() -> None: # not needed yet
return
parse_config_file(options, set_strict_flags, options.config_file, sys.stdout, sys.stderr)
try:
modules = build_stubs(modules, options, find_submodules=not args.check_typeshed)
except RuntimeError:
return 1
exit_code = 0
for module in modules:
for error in test_module(module):
# Filter errors
if args.ignore_missing_stub and error.is_missing_stub():
continue
if args.ignore_positional_only and error.is_positional_only_related():
continue
if error.object_desc in allowlist:
allowlist[error.object_desc] = True
continue
is_allowlisted = False
for w in allowlist:
if allowlist_regexes[w].fullmatch(error.object_desc):
allowlist[w] = True
is_allowlisted = True
break
if is_allowlisted:
continue
# We have errors, so change exit code, and output whatever necessary
exit_code = 1
if args.generate_allowlist:
generated_allowlist.add(error.object_desc)
continue
print(error.get_description(concise=args.concise))
# Print unused allowlist entries
if not args.ignore_unused_allowlist:
for w in allowlist:
# Don't consider an entry unused if it regex-matches the empty string
# This lets us allowlist errors that don't manifest at all on some systems
if not allowlist[w] and not allowlist_regexes[w].fullmatch(""):
exit_code = 1
print("note: unused allowlist entry {}".format(w))
# Print the generated allowlist
if args.generate_allowlist:
for e in sorted(generated_allowlist):
print(e)
exit_code = 0
return exit_code
def parse_options(args: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Compares stubs to objects introspected from the runtime."
)
parser.add_argument("modules", nargs="*", help="Modules to test")
parser.add_argument("--concise", action="store_true", help="Make output concise")
parser.add_argument(
"--ignore-missing-stub",
action="store_true",
help="Ignore errors for stub missing things that are present at runtime",
)
parser.add_argument(
"--ignore-positional-only",
action="store_true",
help="Ignore errors for whether an argument should or shouldn't be positional-only",
)
parser.add_argument(
"--custom-typeshed-dir", metavar="DIR", help="Use the custom typeshed in DIR"
)
parser.add_argument(
"--check-typeshed", action="store_true", help="Check all stdlib modules in typeshed"
)
parser.add_argument(
"--allowlist",
"--whitelist",
action="append",
metavar="FILE",
default=[],
help=(
"Use file as an allowlist. Can be passed multiple times to combine multiple "
"allowlists. Allowlists can be created with --generate-allowlist"
),
)
parser.add_argument(
"--generate-allowlist",
"--generate-whitelist",
action="store_true",
help="Print an allowlist (to stdout) to be used with --allowlist",
)
parser.add_argument(
"--ignore-unused-allowlist",
"--ignore-unused-whitelist",
action="store_true",
help="Ignore unused allowlist entries",
)
config_group = parser.add_argument_group(
title='mypy config file',
description="Use a config file instead of command line arguments. "
"Plugins and mypy path are the only supported "
"configurations.",
)
config_group.add_argument(
'--mypy-config-file',
help=(
"An existing mypy configuration file, currently used by stubtest to help "
"determine mypy path and plugins"
),
)
return parser.parse_args(args)
def main() -> int:
mypy.util.check_python_version("stubtest")
return test_stubs(parse_options(sys.argv[1:]))
if __name__ == "__main__":
sys.exit(main())
|
_maybe_convert_timedelta
|
Convert timedelta-like input to an integer multiple of self.freq
Parameters
----------
other : timedelta, np.timedelta64, DateOffset, int, np.ndarray
Returns
-------
converted : int, np.ndarray[int64]
Raises
------
IncompatibleFrequency : if the input cannot be written as a multiple
of self.freq. Note IncompatibleFrequency subclasses ValueError.
|
from __future__ import annotations
from datetime import (
datetime,
timedelta,
)
from typing import Hashable
import warnings
import numpy as np
from pandas._libs import (
index as libindex,
lib,
)
from pandas._libs.tslibs import (
BaseOffset,
NaT,
Period,
Resolution,
Tick,
)
from pandas._libs.tslibs.parsing import (
DateParseError,
parse_time_string,
)
from pandas._typing import (
Dtype,
DtypeObj,
)
from pandas.errors import InvalidIndexError
from pandas.util._decorators import doc
from pandas.core.dtypes.common import (
is_datetime64_any_dtype,
is_integer,
is_scalar,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.dtypes.missing import is_valid_na_for_dtype
from pandas.core.arrays.period import (
PeriodArray,
period_array,
raise_on_incompatible,
validate_dtype_freq,
)
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import maybe_extract_name
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
from pandas.core.indexes.datetimes import (
DatetimeIndex,
Index,
)
from pandas.core.indexes.extension import inherit_names
from pandas.core.indexes.numeric import Int64Index
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update({"target_klass": "PeriodIndex or list of Periods"})
_shared_doc_kwargs = {
"klass": "PeriodArray",
}
# --- Period index sketch
def _new_PeriodIndex(cls, **d):
# GH13277 for unpickling
values = d.pop("data")
if values.dtype == "int64":
freq = d.pop("freq", None)
values = PeriodArray(values, freq=freq)
return cls._simple_new(values, **d)
else:
return cls(values, **d)
@inherit_names(
["strftime", "start_time", "end_time"] + PeriodArray._field_ops,
PeriodArray,
wrap=True,
)
@inherit_names(["is_leap_year", "_format_native_types"], PeriodArray)
class PeriodIndex(DatetimeIndexOpsMixin):
"""
Immutable ndarray holding ordinal values indicating regular periods in time.
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1d int np.ndarray or PeriodArray), optional
Optional period-like data to construct index with.
copy : bool
Make a copy of input ndarray.
freq : str or period object, optional
One of pandas period strings or corresponding objects.
year : int, array, or Series, default None
month : int, array, or Series, default None
quarter : int, array, or Series, default None
day : int, array, or Series, default None
hour : int, array, or Series, default None
minute : int, array, or Series, default None
second : int, array, or Series, default None
dtype : str or PeriodDtype, default None
Attributes
----------
day
dayofweek
day_of_week
dayofyear
day_of_year
days_in_month
daysinmonth
end_time
freq
freqstr
hour
is_leap_year
minute
month
quarter
qyear
second
start_time
week
weekday
weekofyear
year
Methods
-------
asfreq
strftime
to_timestamp
See Also
--------
Index : The base pandas Index type.
Period : Represents a period of time.
DatetimeIndex : Index with datetime64 data.
TimedeltaIndex : Index of timedelta64 data.
period_range : Create a fixed-frequency PeriodIndex.
Examples
--------
>>> idx = pd.PeriodIndex(year=[2000, 2002], quarter=[1, 3])
>>> idx
PeriodIndex(['2000Q1', '2002Q3'], dtype='period[Q-DEC]')
"""
_typ = "periodindex"
_attributes = ["name"]
_data: PeriodArray
freq: BaseOffset
_data_cls = PeriodArray
_engine_type = libindex.PeriodEngine
_supports_partial_string_indexing = True
# --------------------------------------------------------------------
# methods that dispatch to array and wrap result in Index
# These are defined here instead of via inherit_names for mypy
@doc(
PeriodArray.asfreq,
other="pandas.arrays.PeriodArray",
other_name="PeriodArray",
**_shared_doc_kwargs,
)
def asfreq(self, freq=None, how: str = "E") -> PeriodIndex:
arr = self._data.asfreq(freq, how)
return type(self)._simple_new(arr, name=self.name)
@doc(PeriodArray.to_timestamp)
def to_timestamp(self, freq=None, how="start") -> DatetimeIndex:
arr = self._data.to_timestamp(freq, how)
return DatetimeIndex._simple_new(arr, name=self.name)
# https://github.com/python/mypy/issues/1362
# error: Decorated property not supported
@property # type:ignore[misc]
@doc(PeriodArray.hour.fget)
def hour(self) -> Int64Index:
return Int64Index(self._data.hour, name=self.name)
# https://github.com/python/mypy/issues/1362
# error: Decorated property not supported
@property # type:ignore[misc]
@doc(PeriodArray.minute.fget)
def minute(self) -> Int64Index:
return Int64Index(self._data.minute, name=self.name)
# https://github.com/python/mypy/issues/1362
# error: Decorated property not supported
@property # type:ignore[misc]
@doc(PeriodArray.second.fget)
def second(self) -> Int64Index:
return Int64Index(self._data.second, name=self.name)
# ------------------------------------------------------------------------
# Index Constructors
def __new__(
cls,
data=None,
ordinal=None,
freq=None,
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable = None,
**fields,
) -> PeriodIndex:
valid_field_set = {
"year",
"month",
"day",
"quarter",
"hour",
"minute",
"second",
}
if not set(fields).issubset(valid_field_set):
argument = list(set(fields) - valid_field_set)[0]
raise TypeError(f"__new__() got an unexpected keyword argument {argument}")
name = maybe_extract_name(name, data, cls)
if data is None and ordinal is None:
# range-based.
data, freq2 = PeriodArray._generate_range(None, None, None, freq, fields)
# PeriodArray._generate range does validation that fields is
# empty when really using the range-based constructor.
freq = freq2
data = PeriodArray(data, freq=freq)
else:
freq = validate_dtype_freq(dtype, freq)
# PeriodIndex allow PeriodIndex(period_index, freq=different)
# Let's not encourage that kind of behavior in PeriodArray.
if freq and isinstance(data, cls) and data.freq != freq:
# TODO: We can do some of these with no-copy / coercion?
# e.g. D -> 2D seems to be OK
data = data.asfreq(freq)
if data is None and ordinal is not None:
# we strangely ignore `ordinal` if data is passed.
ordinal = np.asarray(ordinal, dtype=np.int64)
data = PeriodArray(ordinal, freq=freq)
else:
# don't pass copy here, since we copy later.
data = period_array(data=data, freq=freq)
if copy:
data = data.copy()
return cls._simple_new(data, name=name)
# ------------------------------------------------------------------------
# Data
@property
def values(self) -> np.ndarray:
return np.asarray(self, dtype=object)
# MASKED: _maybe_convert_timedelta function (lines 275-309)
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
"""
Can we compare values of the given dtype to our own?
"""
if not isinstance(dtype, PeriodDtype):
return False
return dtype.freq == self.freq
# ------------------------------------------------------------------------
# Index Methods
def asof_locs(self, where: Index, mask: np.ndarray) -> np.ndarray:
"""
where : array of timestamps
mask : np.ndarray[bool]
Array of booleans where data is not NA.
"""
if isinstance(where, DatetimeIndex):
where = PeriodIndex(where._values, freq=self.freq)
elif not isinstance(where, PeriodIndex):
raise TypeError("asof_locs `where` must be DatetimeIndex or PeriodIndex")
return super().asof_locs(where, mask)
@doc(Index.astype)
def astype(self, dtype, copy: bool = True, how=lib.no_default):
dtype = pandas_dtype(dtype)
if how is not lib.no_default:
# GH#37982
warnings.warn(
"The 'how' keyword in PeriodIndex.astype is deprecated and "
"will be removed in a future version. "
"Use index.to_timestamp(how=how) instead",
FutureWarning,
stacklevel=2,
)
else:
how = "start"
if is_datetime64_any_dtype(dtype):
# 'how' is index-specific, isn't part of the EA interface.
tz = getattr(dtype, "tz", None)
return self.to_timestamp(how=how).tz_localize(tz)
return super().astype(dtype, copy=copy)
@property
def is_full(self) -> bool:
"""
Returns True if this PeriodIndex is range-like in that all Periods
between start and end are present, in order.
"""
if len(self) == 0:
return True
if not self.is_monotonic_increasing:
raise ValueError("Index is not monotonic")
values = self.asi8
return ((values[1:] - values[:-1]) < 2).all()
@property
def inferred_type(self) -> str:
# b/c data is represented as ints make sure we can't have ambiguous
# indexing
return "period"
# ------------------------------------------------------------------------
# Indexing Methods
def _convert_tolerance(self, tolerance, target):
# Returned tolerance must be in dtype/units so that
# `|self._get_engine_target() - target._engine_target()| <= tolerance`
# is meaningful. Since PeriodIndex returns int64 for engine_target,
# we may need to convert timedelta64 tolerance to int64.
tolerance = super()._convert_tolerance(tolerance, target)
if self.dtype == target.dtype:
# convert tolerance to i8
tolerance = self._maybe_convert_timedelta(tolerance)
return tolerance
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label.
Parameters
----------
key : Period, NaT, str, or datetime
String or datetime key must be parsable as Period.
Returns
-------
loc : int or ndarray[int64]
Raises
------
KeyError
Key is not present in the index.
TypeError
If key is listlike or otherwise not hashable.
"""
orig_key = key
if not is_scalar(key):
raise InvalidIndexError(key)
if is_valid_na_for_dtype(key, self.dtype):
key = NaT
elif isinstance(key, str):
try:
loc = self._get_string_slice(key)
return loc
except (TypeError, ValueError):
pass
try:
asdt, reso_str = parse_time_string(key, self.freq)
except (ValueError, DateParseError) as err:
# A string with invalid format
raise KeyError(f"Cannot interpret '{key}' as period") from err
reso = Resolution.from_attrname(reso_str)
grp = reso.freq_group.value
freqn = self.dtype.freq_group_code
# _get_string_slice will handle cases where grp < freqn
assert grp >= freqn
# BusinessDay is a bit strange. It has a *lower* code, but we never parse
# a string as "BusinessDay" resolution, just Day.
if grp == freqn or (
reso == Resolution.RESO_DAY and self.dtype.freq.name == "B"
):
key = Period(asdt, freq=self.freq)
loc = self.get_loc(key, method=method, tolerance=tolerance)
return loc
elif method is None:
raise KeyError(key)
else:
key = asdt
elif isinstance(key, Period):
sfreq = self.freq
kfreq = key.freq
if not (
sfreq.n == kfreq.n
and sfreq._period_dtype_code == kfreq._period_dtype_code
):
# GH#42247 For the subset of DateOffsets that can be Period freqs,
# checking these two attributes is sufficient to check equality,
# and much more performant than `self.freq == key.freq`
raise KeyError(key)
elif isinstance(key, datetime):
try:
key = Period(key, freq=self.freq)
except ValueError as err:
# we cannot construct the Period
raise KeyError(orig_key) from err
else:
# in particular integer, which Period constructor would cast to string
raise KeyError(key)
try:
key = Period(key, freq=self.freq)
except ValueError as err:
# we cannot construct the Period
raise KeyError(orig_key) from err
try:
return Index.get_loc(self, key, method, tolerance)
except KeyError as err:
raise KeyError(orig_key) from err
def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default):
"""
If label is a string or a datetime, cast it to Period.ordinal according
to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'loc', 'getitem'}, or None
Returns
-------
bound : Period or object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ["loc", "getitem", None, lib.no_default]
self._deprecated_arg(kind, "kind", "_maybe_cast_slice_bound")
if isinstance(label, datetime):
return Period(label, freq=self.freq)
elif isinstance(label, str):
try:
parsed, reso_str = parse_time_string(label, self.freq)
except ValueError as err:
# string cannot be parsed as datetime-like
raise self._invalid_indexer("slice", label) from err
reso = Resolution.from_attrname(reso_str)
lower, upper = self._parsed_string_to_bounds(reso, parsed)
return lower if side == "left" else upper
elif not isinstance(label, self._data._recognized_scalars):
raise self._invalid_indexer("slice", label)
return label
def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
grp = reso.freq_group
iv = Period(parsed, freq=grp.value)
return (iv.asfreq(self.freq, how="start"), iv.asfreq(self.freq, how="end"))
def _validate_partial_date_slice(self, reso: Resolution):
assert isinstance(reso, Resolution), (type(reso), reso)
grp = reso.freq_group
freqn = self.dtype.freq_group_code
if not grp.value < freqn:
# TODO: we used to also check for
# reso in ["day", "hour", "minute", "second"]
# why is that check not needed?
raise ValueError
def _get_string_slice(self, key: str):
parsed, reso_str = parse_time_string(key, self.freq)
reso = Resolution.from_attrname(reso_str)
try:
return self._partial_date_slice(reso, parsed)
except KeyError as err:
raise KeyError(key) from err
def period_range(
start=None, end=None, periods: int | None = None, freq=None, name=None
) -> PeriodIndex:
"""
Return a fixed frequency PeriodIndex.
The day (calendar) is the default frequency.
Parameters
----------
start : str or period-like, default None
Left bound for generating periods.
end : str or period-like, default None
Right bound for generating periods.
periods : int, default None
Number of periods to generate.
freq : str or DateOffset, optional
Frequency alias. By default the freq is taken from `start` or `end`
if those are Period objects. Otherwise, the default is ``"D"`` for
daily frequency.
name : str, default None
Name of the resulting PeriodIndex.
Returns
-------
PeriodIndex
Notes
-----
Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
must be specified.
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
>>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M')
PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05', '2017-06',
'2017-07', '2017-08', '2017-09', '2017-10', '2017-11', '2017-12',
'2018-01'],
dtype='period[M]')
If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor
endpoints for a ``PeriodIndex`` with frequency matching that of the
``period_range`` constructor.
>>> pd.period_range(start=pd.Period('2017Q1', freq='Q'),
... end=pd.Period('2017Q2', freq='Q'), freq='M')
PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'],
dtype='period[M]')
"""
if com.count_not_none(start, end, periods) != 2:
raise ValueError(
"Of the three parameters: start, end, and periods, "
"exactly two must be specified"
)
if freq is None and (not isinstance(start, Period) and not isinstance(end, Period)):
freq = "D"
data, freq = PeriodArray._generate_range(start, end, periods, freq, fields={})
data = PeriodArray(data, freq=freq)
return PeriodIndex(data, name=name)
|
def _maybe_convert_timedelta(self, other):
"""
Convert timedelta-like input to an integer multiple of self.freq
Parameters
----------
other : timedelta, np.timedelta64, DateOffset, int, np.ndarray
Returns
-------
converted : int, np.ndarray[int64]
Raises
------
IncompatibleFrequency : if the input cannot be written as a multiple
of self.freq. Note IncompatibleFrequency subclasses ValueError.
"""
if isinstance(other, (timedelta, np.timedelta64, Tick, np.ndarray)):
if isinstance(self.freq, Tick):
# _check_timedeltalike_freq_compat will raise if incompatible
delta = self._data._check_timedeltalike_freq_compat(other)
return delta
elif isinstance(other, BaseOffset):
if other.base == self.freq.base:
return other.n
raise raise_on_incompatible(self, other)
elif is_integer(other):
# integer is passed to .shift via
# _add_datetimelike_methods basically
# but ufunc may pass integer to _add_delta
return other
# raise when input doesn't have freq
raise raise_on_incompatible(self, None)
| 275 | 309 |
from __future__ import annotations
from datetime import (
datetime,
timedelta,
)
from typing import Hashable
import warnings
import numpy as np
from pandas._libs import (
index as libindex,
lib,
)
from pandas._libs.tslibs import (
BaseOffset,
NaT,
Period,
Resolution,
Tick,
)
from pandas._libs.tslibs.parsing import (
DateParseError,
parse_time_string,
)
from pandas._typing import (
Dtype,
DtypeObj,
)
from pandas.errors import InvalidIndexError
from pandas.util._decorators import doc
from pandas.core.dtypes.common import (
is_datetime64_any_dtype,
is_integer,
is_scalar,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.dtypes.missing import is_valid_na_for_dtype
from pandas.core.arrays.period import (
PeriodArray,
period_array,
raise_on_incompatible,
validate_dtype_freq,
)
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import maybe_extract_name
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
from pandas.core.indexes.datetimes import (
DatetimeIndex,
Index,
)
from pandas.core.indexes.extension import inherit_names
from pandas.core.indexes.numeric import Int64Index
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update({"target_klass": "PeriodIndex or list of Periods"})
_shared_doc_kwargs = {
"klass": "PeriodArray",
}
# --- Period index sketch
def _new_PeriodIndex(cls, **d):
# GH13277 for unpickling
values = d.pop("data")
if values.dtype == "int64":
freq = d.pop("freq", None)
values = PeriodArray(values, freq=freq)
return cls._simple_new(values, **d)
else:
return cls(values, **d)
@inherit_names(
["strftime", "start_time", "end_time"] + PeriodArray._field_ops,
PeriodArray,
wrap=True,
)
@inherit_names(["is_leap_year", "_format_native_types"], PeriodArray)
class PeriodIndex(DatetimeIndexOpsMixin):
"""
Immutable ndarray holding ordinal values indicating regular periods in time.
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1d int np.ndarray or PeriodArray), optional
Optional period-like data to construct index with.
copy : bool
Make a copy of input ndarray.
freq : str or period object, optional
One of pandas period strings or corresponding objects.
year : int, array, or Series, default None
month : int, array, or Series, default None
quarter : int, array, or Series, default None
day : int, array, or Series, default None
hour : int, array, or Series, default None
minute : int, array, or Series, default None
second : int, array, or Series, default None
dtype : str or PeriodDtype, default None
Attributes
----------
day
dayofweek
day_of_week
dayofyear
day_of_year
days_in_month
daysinmonth
end_time
freq
freqstr
hour
is_leap_year
minute
month
quarter
qyear
second
start_time
week
weekday
weekofyear
year
Methods
-------
asfreq
strftime
to_timestamp
See Also
--------
Index : The base pandas Index type.
Period : Represents a period of time.
DatetimeIndex : Index with datetime64 data.
TimedeltaIndex : Index of timedelta64 data.
period_range : Create a fixed-frequency PeriodIndex.
Examples
--------
>>> idx = pd.PeriodIndex(year=[2000, 2002], quarter=[1, 3])
>>> idx
PeriodIndex(['2000Q1', '2002Q3'], dtype='period[Q-DEC]')
"""
_typ = "periodindex"
_attributes = ["name"]
_data: PeriodArray
freq: BaseOffset
_data_cls = PeriodArray
_engine_type = libindex.PeriodEngine
_supports_partial_string_indexing = True
# --------------------------------------------------------------------
# methods that dispatch to array and wrap result in Index
# These are defined here instead of via inherit_names for mypy
@doc(
PeriodArray.asfreq,
other="pandas.arrays.PeriodArray",
other_name="PeriodArray",
**_shared_doc_kwargs,
)
def asfreq(self, freq=None, how: str = "E") -> PeriodIndex:
arr = self._data.asfreq(freq, how)
return type(self)._simple_new(arr, name=self.name)
@doc(PeriodArray.to_timestamp)
def to_timestamp(self, freq=None, how="start") -> DatetimeIndex:
arr = self._data.to_timestamp(freq, how)
return DatetimeIndex._simple_new(arr, name=self.name)
# https://github.com/python/mypy/issues/1362
# error: Decorated property not supported
@property # type:ignore[misc]
@doc(PeriodArray.hour.fget)
def hour(self) -> Int64Index:
return Int64Index(self._data.hour, name=self.name)
# https://github.com/python/mypy/issues/1362
# error: Decorated property not supported
@property # type:ignore[misc]
@doc(PeriodArray.minute.fget)
def minute(self) -> Int64Index:
return Int64Index(self._data.minute, name=self.name)
# https://github.com/python/mypy/issues/1362
# error: Decorated property not supported
@property # type:ignore[misc]
@doc(PeriodArray.second.fget)
def second(self) -> Int64Index:
return Int64Index(self._data.second, name=self.name)
# ------------------------------------------------------------------------
# Index Constructors
def __new__(
cls,
data=None,
ordinal=None,
freq=None,
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable = None,
**fields,
) -> PeriodIndex:
valid_field_set = {
"year",
"month",
"day",
"quarter",
"hour",
"minute",
"second",
}
if not set(fields).issubset(valid_field_set):
argument = list(set(fields) - valid_field_set)[0]
raise TypeError(f"__new__() got an unexpected keyword argument {argument}")
name = maybe_extract_name(name, data, cls)
if data is None and ordinal is None:
# range-based.
data, freq2 = PeriodArray._generate_range(None, None, None, freq, fields)
# PeriodArray._generate range does validation that fields is
# empty when really using the range-based constructor.
freq = freq2
data = PeriodArray(data, freq=freq)
else:
freq = validate_dtype_freq(dtype, freq)
# PeriodIndex allow PeriodIndex(period_index, freq=different)
# Let's not encourage that kind of behavior in PeriodArray.
if freq and isinstance(data, cls) and data.freq != freq:
# TODO: We can do some of these with no-copy / coercion?
# e.g. D -> 2D seems to be OK
data = data.asfreq(freq)
if data is None and ordinal is not None:
# we strangely ignore `ordinal` if data is passed.
ordinal = np.asarray(ordinal, dtype=np.int64)
data = PeriodArray(ordinal, freq=freq)
else:
# don't pass copy here, since we copy later.
data = period_array(data=data, freq=freq)
if copy:
data = data.copy()
return cls._simple_new(data, name=name)
# ------------------------------------------------------------------------
# Data
@property
def values(self) -> np.ndarray:
return np.asarray(self, dtype=object)
def _maybe_convert_timedelta(self, other):
"""
Convert timedelta-like input to an integer multiple of self.freq
Parameters
----------
other : timedelta, np.timedelta64, DateOffset, int, np.ndarray
Returns
-------
converted : int, np.ndarray[int64]
Raises
------
IncompatibleFrequency : if the input cannot be written as a multiple
of self.freq. Note IncompatibleFrequency subclasses ValueError.
"""
if isinstance(other, (timedelta, np.timedelta64, Tick, np.ndarray)):
if isinstance(self.freq, Tick):
# _check_timedeltalike_freq_compat will raise if incompatible
delta = self._data._check_timedeltalike_freq_compat(other)
return delta
elif isinstance(other, BaseOffset):
if other.base == self.freq.base:
return other.n
raise raise_on_incompatible(self, other)
elif is_integer(other):
# integer is passed to .shift via
# _add_datetimelike_methods basically
# but ufunc may pass integer to _add_delta
return other
# raise when input doesn't have freq
raise raise_on_incompatible(self, None)
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
"""
Can we compare values of the given dtype to our own?
"""
if not isinstance(dtype, PeriodDtype):
return False
return dtype.freq == self.freq
# ------------------------------------------------------------------------
# Index Methods
def asof_locs(self, where: Index, mask: np.ndarray) -> np.ndarray:
"""
where : array of timestamps
mask : np.ndarray[bool]
Array of booleans where data is not NA.
"""
if isinstance(where, DatetimeIndex):
where = PeriodIndex(where._values, freq=self.freq)
elif not isinstance(where, PeriodIndex):
raise TypeError("asof_locs `where` must be DatetimeIndex or PeriodIndex")
return super().asof_locs(where, mask)
@doc(Index.astype)
def astype(self, dtype, copy: bool = True, how=lib.no_default):
dtype = pandas_dtype(dtype)
if how is not lib.no_default:
# GH#37982
warnings.warn(
"The 'how' keyword in PeriodIndex.astype is deprecated and "
"will be removed in a future version. "
"Use index.to_timestamp(how=how) instead",
FutureWarning,
stacklevel=2,
)
else:
how = "start"
if is_datetime64_any_dtype(dtype):
# 'how' is index-specific, isn't part of the EA interface.
tz = getattr(dtype, "tz", None)
return self.to_timestamp(how=how).tz_localize(tz)
return super().astype(dtype, copy=copy)
@property
def is_full(self) -> bool:
"""
Returns True if this PeriodIndex is range-like in that all Periods
between start and end are present, in order.
"""
if len(self) == 0:
return True
if not self.is_monotonic_increasing:
raise ValueError("Index is not monotonic")
values = self.asi8
return ((values[1:] - values[:-1]) < 2).all()
@property
def inferred_type(self) -> str:
# b/c data is represented as ints make sure we can't have ambiguous
# indexing
return "period"
# ------------------------------------------------------------------------
# Indexing Methods
def _convert_tolerance(self, tolerance, target):
# Returned tolerance must be in dtype/units so that
# `|self._get_engine_target() - target._engine_target()| <= tolerance`
# is meaningful. Since PeriodIndex returns int64 for engine_target,
# we may need to convert timedelta64 tolerance to int64.
tolerance = super()._convert_tolerance(tolerance, target)
if self.dtype == target.dtype:
# convert tolerance to i8
tolerance = self._maybe_convert_timedelta(tolerance)
return tolerance
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label.
Parameters
----------
key : Period, NaT, str, or datetime
String or datetime key must be parsable as Period.
Returns
-------
loc : int or ndarray[int64]
Raises
------
KeyError
Key is not present in the index.
TypeError
If key is listlike or otherwise not hashable.
"""
orig_key = key
if not is_scalar(key):
raise InvalidIndexError(key)
if is_valid_na_for_dtype(key, self.dtype):
key = NaT
elif isinstance(key, str):
try:
loc = self._get_string_slice(key)
return loc
except (TypeError, ValueError):
pass
try:
asdt, reso_str = parse_time_string(key, self.freq)
except (ValueError, DateParseError) as err:
# A string with invalid format
raise KeyError(f"Cannot interpret '{key}' as period") from err
reso = Resolution.from_attrname(reso_str)
grp = reso.freq_group.value
freqn = self.dtype.freq_group_code
# _get_string_slice will handle cases where grp < freqn
assert grp >= freqn
# BusinessDay is a bit strange. It has a *lower* code, but we never parse
# a string as "BusinessDay" resolution, just Day.
if grp == freqn or (
reso == Resolution.RESO_DAY and self.dtype.freq.name == "B"
):
key = Period(asdt, freq=self.freq)
loc = self.get_loc(key, method=method, tolerance=tolerance)
return loc
elif method is None:
raise KeyError(key)
else:
key = asdt
elif isinstance(key, Period):
sfreq = self.freq
kfreq = key.freq
if not (
sfreq.n == kfreq.n
and sfreq._period_dtype_code == kfreq._period_dtype_code
):
# GH#42247 For the subset of DateOffsets that can be Period freqs,
# checking these two attributes is sufficient to check equality,
# and much more performant than `self.freq == key.freq`
raise KeyError(key)
elif isinstance(key, datetime):
try:
key = Period(key, freq=self.freq)
except ValueError as err:
# we cannot construct the Period
raise KeyError(orig_key) from err
else:
# in particular integer, which Period constructor would cast to string
raise KeyError(key)
try:
key = Period(key, freq=self.freq)
except ValueError as err:
# we cannot construct the Period
raise KeyError(orig_key) from err
try:
return Index.get_loc(self, key, method, tolerance)
except KeyError as err:
raise KeyError(orig_key) from err
def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default):
"""
If label is a string or a datetime, cast it to Period.ordinal according
to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'loc', 'getitem'}, or None
Returns
-------
bound : Period or object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ["loc", "getitem", None, lib.no_default]
self._deprecated_arg(kind, "kind", "_maybe_cast_slice_bound")
if isinstance(label, datetime):
return Period(label, freq=self.freq)
elif isinstance(label, str):
try:
parsed, reso_str = parse_time_string(label, self.freq)
except ValueError as err:
# string cannot be parsed as datetime-like
raise self._invalid_indexer("slice", label) from err
reso = Resolution.from_attrname(reso_str)
lower, upper = self._parsed_string_to_bounds(reso, parsed)
return lower if side == "left" else upper
elif not isinstance(label, self._data._recognized_scalars):
raise self._invalid_indexer("slice", label)
return label
def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
grp = reso.freq_group
iv = Period(parsed, freq=grp.value)
return (iv.asfreq(self.freq, how="start"), iv.asfreq(self.freq, how="end"))
def _validate_partial_date_slice(self, reso: Resolution):
assert isinstance(reso, Resolution), (type(reso), reso)
grp = reso.freq_group
freqn = self.dtype.freq_group_code
if not grp.value < freqn:
# TODO: we used to also check for
# reso in ["day", "hour", "minute", "second"]
# why is that check not needed?
raise ValueError
def _get_string_slice(self, key: str):
parsed, reso_str = parse_time_string(key, self.freq)
reso = Resolution.from_attrname(reso_str)
try:
return self._partial_date_slice(reso, parsed)
except KeyError as err:
raise KeyError(key) from err
def period_range(
start=None, end=None, periods: int | None = None, freq=None, name=None
) -> PeriodIndex:
"""
Return a fixed frequency PeriodIndex.
The day (calendar) is the default frequency.
Parameters
----------
start : str or period-like, default None
Left bound for generating periods.
end : str or period-like, default None
Right bound for generating periods.
periods : int, default None
Number of periods to generate.
freq : str or DateOffset, optional
Frequency alias. By default the freq is taken from `start` or `end`
if those are Period objects. Otherwise, the default is ``"D"`` for
daily frequency.
name : str, default None
Name of the resulting PeriodIndex.
Returns
-------
PeriodIndex
Notes
-----
Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
must be specified.
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
>>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M')
PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05', '2017-06',
'2017-07', '2017-08', '2017-09', '2017-10', '2017-11', '2017-12',
'2018-01'],
dtype='period[M]')
If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor
endpoints for a ``PeriodIndex`` with frequency matching that of the
``period_range`` constructor.
>>> pd.period_range(start=pd.Period('2017Q1', freq='Q'),
... end=pd.Period('2017Q2', freq='Q'), freq='M')
PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'],
dtype='period[M]')
"""
if com.count_not_none(start, end, periods) != 2:
raise ValueError(
"Of the three parameters: start, end, and periods, "
"exactly two must be specified"
)
if freq is None and (not isinstance(start, Period) and not isinstance(end, Period)):
freq = "D"
data, freq = PeriodArray._generate_range(start, end, periods, freq, fields={})
data = PeriodArray(data, freq=freq)
return PeriodIndex(data, name=name)
|
get_loc
|
Get integer location for requested label.
Parameters
----------
key : Period, NaT, str, or datetime
String or datetime key must be parsable as Period.
Returns
-------
loc : int or ndarray[int64]
Raises
------
KeyError
Key is not present in the index.
TypeError
If key is listlike or otherwise not hashable.
|
from __future__ import annotations
from datetime import (
datetime,
timedelta,
)
from typing import Hashable
import warnings
import numpy as np
from pandas._libs import (
index as libindex,
lib,
)
from pandas._libs.tslibs import (
BaseOffset,
NaT,
Period,
Resolution,
Tick,
)
from pandas._libs.tslibs.parsing import (
DateParseError,
parse_time_string,
)
from pandas._typing import (
Dtype,
DtypeObj,
)
from pandas.errors import InvalidIndexError
from pandas.util._decorators import doc
from pandas.core.dtypes.common import (
is_datetime64_any_dtype,
is_integer,
is_scalar,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.dtypes.missing import is_valid_na_for_dtype
from pandas.core.arrays.period import (
PeriodArray,
period_array,
raise_on_incompatible,
validate_dtype_freq,
)
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import maybe_extract_name
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
from pandas.core.indexes.datetimes import (
DatetimeIndex,
Index,
)
from pandas.core.indexes.extension import inherit_names
from pandas.core.indexes.numeric import Int64Index
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update({"target_klass": "PeriodIndex or list of Periods"})
_shared_doc_kwargs = {
"klass": "PeriodArray",
}
# --- Period index sketch
def _new_PeriodIndex(cls, **d):
# GH13277 for unpickling
values = d.pop("data")
if values.dtype == "int64":
freq = d.pop("freq", None)
values = PeriodArray(values, freq=freq)
return cls._simple_new(values, **d)
else:
return cls(values, **d)
@inherit_names(
["strftime", "start_time", "end_time"] + PeriodArray._field_ops,
PeriodArray,
wrap=True,
)
@inherit_names(["is_leap_year", "_format_native_types"], PeriodArray)
class PeriodIndex(DatetimeIndexOpsMixin):
"""
Immutable ndarray holding ordinal values indicating regular periods in time.
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1d int np.ndarray or PeriodArray), optional
Optional period-like data to construct index with.
copy : bool
Make a copy of input ndarray.
freq : str or period object, optional
One of pandas period strings or corresponding objects.
year : int, array, or Series, default None
month : int, array, or Series, default None
quarter : int, array, or Series, default None
day : int, array, or Series, default None
hour : int, array, or Series, default None
minute : int, array, or Series, default None
second : int, array, or Series, default None
dtype : str or PeriodDtype, default None
Attributes
----------
day
dayofweek
day_of_week
dayofyear
day_of_year
days_in_month
daysinmonth
end_time
freq
freqstr
hour
is_leap_year
minute
month
quarter
qyear
second
start_time
week
weekday
weekofyear
year
Methods
-------
asfreq
strftime
to_timestamp
See Also
--------
Index : The base pandas Index type.
Period : Represents a period of time.
DatetimeIndex : Index with datetime64 data.
TimedeltaIndex : Index of timedelta64 data.
period_range : Create a fixed-frequency PeriodIndex.
Examples
--------
>>> idx = pd.PeriodIndex(year=[2000, 2002], quarter=[1, 3])
>>> idx
PeriodIndex(['2000Q1', '2002Q3'], dtype='period[Q-DEC]')
"""
_typ = "periodindex"
_attributes = ["name"]
_data: PeriodArray
freq: BaseOffset
_data_cls = PeriodArray
_engine_type = libindex.PeriodEngine
_supports_partial_string_indexing = True
# --------------------------------------------------------------------
# methods that dispatch to array and wrap result in Index
# These are defined here instead of via inherit_names for mypy
@doc(
PeriodArray.asfreq,
other="pandas.arrays.PeriodArray",
other_name="PeriodArray",
**_shared_doc_kwargs,
)
def asfreq(self, freq=None, how: str = "E") -> PeriodIndex:
arr = self._data.asfreq(freq, how)
return type(self)._simple_new(arr, name=self.name)
@doc(PeriodArray.to_timestamp)
def to_timestamp(self, freq=None, how="start") -> DatetimeIndex:
arr = self._data.to_timestamp(freq, how)
return DatetimeIndex._simple_new(arr, name=self.name)
# https://github.com/python/mypy/issues/1362
# error: Decorated property not supported
@property # type:ignore[misc]
@doc(PeriodArray.hour.fget)
def hour(self) -> Int64Index:
return Int64Index(self._data.hour, name=self.name)
# https://github.com/python/mypy/issues/1362
# error: Decorated property not supported
@property # type:ignore[misc]
@doc(PeriodArray.minute.fget)
def minute(self) -> Int64Index:
return Int64Index(self._data.minute, name=self.name)
# https://github.com/python/mypy/issues/1362
# error: Decorated property not supported
@property # type:ignore[misc]
@doc(PeriodArray.second.fget)
def second(self) -> Int64Index:
return Int64Index(self._data.second, name=self.name)
# ------------------------------------------------------------------------
# Index Constructors
def __new__(
cls,
data=None,
ordinal=None,
freq=None,
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable = None,
**fields,
) -> PeriodIndex:
valid_field_set = {
"year",
"month",
"day",
"quarter",
"hour",
"minute",
"second",
}
if not set(fields).issubset(valid_field_set):
argument = list(set(fields) - valid_field_set)[0]
raise TypeError(f"__new__() got an unexpected keyword argument {argument}")
name = maybe_extract_name(name, data, cls)
if data is None and ordinal is None:
# range-based.
data, freq2 = PeriodArray._generate_range(None, None, None, freq, fields)
# PeriodArray._generate range does validation that fields is
# empty when really using the range-based constructor.
freq = freq2
data = PeriodArray(data, freq=freq)
else:
freq = validate_dtype_freq(dtype, freq)
# PeriodIndex allow PeriodIndex(period_index, freq=different)
# Let's not encourage that kind of behavior in PeriodArray.
if freq and isinstance(data, cls) and data.freq != freq:
# TODO: We can do some of these with no-copy / coercion?
# e.g. D -> 2D seems to be OK
data = data.asfreq(freq)
if data is None and ordinal is not None:
# we strangely ignore `ordinal` if data is passed.
ordinal = np.asarray(ordinal, dtype=np.int64)
data = PeriodArray(ordinal, freq=freq)
else:
# don't pass copy here, since we copy later.
data = period_array(data=data, freq=freq)
if copy:
data = data.copy()
return cls._simple_new(data, name=name)
# ------------------------------------------------------------------------
# Data
@property
def values(self) -> np.ndarray:
return np.asarray(self, dtype=object)
def _maybe_convert_timedelta(self, other):
"""
Convert timedelta-like input to an integer multiple of self.freq
Parameters
----------
other : timedelta, np.timedelta64, DateOffset, int, np.ndarray
Returns
-------
converted : int, np.ndarray[int64]
Raises
------
IncompatibleFrequency : if the input cannot be written as a multiple
of self.freq. Note IncompatibleFrequency subclasses ValueError.
"""
if isinstance(other, (timedelta, np.timedelta64, Tick, np.ndarray)):
if isinstance(self.freq, Tick):
# _check_timedeltalike_freq_compat will raise if incompatible
delta = self._data._check_timedeltalike_freq_compat(other)
return delta
elif isinstance(other, BaseOffset):
if other.base == self.freq.base:
return other.n
raise raise_on_incompatible(self, other)
elif is_integer(other):
# integer is passed to .shift via
# _add_datetimelike_methods basically
# but ufunc may pass integer to _add_delta
return other
# raise when input doesn't have freq
raise raise_on_incompatible(self, None)
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
"""
Can we compare values of the given dtype to our own?
"""
if not isinstance(dtype, PeriodDtype):
return False
return dtype.freq == self.freq
# ------------------------------------------------------------------------
# Index Methods
def asof_locs(self, where: Index, mask: np.ndarray) -> np.ndarray:
"""
where : array of timestamps
mask : np.ndarray[bool]
Array of booleans where data is not NA.
"""
if isinstance(where, DatetimeIndex):
where = PeriodIndex(where._values, freq=self.freq)
elif not isinstance(where, PeriodIndex):
raise TypeError("asof_locs `where` must be DatetimeIndex or PeriodIndex")
return super().asof_locs(where, mask)
@doc(Index.astype)
def astype(self, dtype, copy: bool = True, how=lib.no_default):
dtype = pandas_dtype(dtype)
if how is not lib.no_default:
# GH#37982
warnings.warn(
"The 'how' keyword in PeriodIndex.astype is deprecated and "
"will be removed in a future version. "
"Use index.to_timestamp(how=how) instead",
FutureWarning,
stacklevel=2,
)
else:
how = "start"
if is_datetime64_any_dtype(dtype):
# 'how' is index-specific, isn't part of the EA interface.
tz = getattr(dtype, "tz", None)
return self.to_timestamp(how=how).tz_localize(tz)
return super().astype(dtype, copy=copy)
@property
def is_full(self) -> bool:
"""
Returns True if this PeriodIndex is range-like in that all Periods
between start and end are present, in order.
"""
if len(self) == 0:
return True
if not self.is_monotonic_increasing:
raise ValueError("Index is not monotonic")
values = self.asi8
return ((values[1:] - values[:-1]) < 2).all()
@property
def inferred_type(self) -> str:
# b/c data is represented as ints make sure we can't have ambiguous
# indexing
return "period"
# ------------------------------------------------------------------------
# Indexing Methods
def _convert_tolerance(self, tolerance, target):
# Returned tolerance must be in dtype/units so that
# `|self._get_engine_target() - target._engine_target()| <= tolerance`
# is meaningful. Since PeriodIndex returns int64 for engine_target,
# we may need to convert timedelta64 tolerance to int64.
tolerance = super()._convert_tolerance(tolerance, target)
if self.dtype == target.dtype:
# convert tolerance to i8
tolerance = self._maybe_convert_timedelta(tolerance)
return tolerance
# MASKED: get_loc function (lines 393-485)
def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default):
"""
If label is a string or a datetime, cast it to Period.ordinal according
to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'loc', 'getitem'}, or None
Returns
-------
bound : Period or object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ["loc", "getitem", None, lib.no_default]
self._deprecated_arg(kind, "kind", "_maybe_cast_slice_bound")
if isinstance(label, datetime):
return Period(label, freq=self.freq)
elif isinstance(label, str):
try:
parsed, reso_str = parse_time_string(label, self.freq)
except ValueError as err:
# string cannot be parsed as datetime-like
raise self._invalid_indexer("slice", label) from err
reso = Resolution.from_attrname(reso_str)
lower, upper = self._parsed_string_to_bounds(reso, parsed)
return lower if side == "left" else upper
elif not isinstance(label, self._data._recognized_scalars):
raise self._invalid_indexer("slice", label)
return label
def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
grp = reso.freq_group
iv = Period(parsed, freq=grp.value)
return (iv.asfreq(self.freq, how="start"), iv.asfreq(self.freq, how="end"))
def _validate_partial_date_slice(self, reso: Resolution):
assert isinstance(reso, Resolution), (type(reso), reso)
grp = reso.freq_group
freqn = self.dtype.freq_group_code
if not grp.value < freqn:
# TODO: we used to also check for
# reso in ["day", "hour", "minute", "second"]
# why is that check not needed?
raise ValueError
def _get_string_slice(self, key: str):
parsed, reso_str = parse_time_string(key, self.freq)
reso = Resolution.from_attrname(reso_str)
try:
return self._partial_date_slice(reso, parsed)
except KeyError as err:
raise KeyError(key) from err
def period_range(
start=None, end=None, periods: int | None = None, freq=None, name=None
) -> PeriodIndex:
"""
Return a fixed frequency PeriodIndex.
The day (calendar) is the default frequency.
Parameters
----------
start : str or period-like, default None
Left bound for generating periods.
end : str or period-like, default None
Right bound for generating periods.
periods : int, default None
Number of periods to generate.
freq : str or DateOffset, optional
Frequency alias. By default the freq is taken from `start` or `end`
if those are Period objects. Otherwise, the default is ``"D"`` for
daily frequency.
name : str, default None
Name of the resulting PeriodIndex.
Returns
-------
PeriodIndex
Notes
-----
Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
must be specified.
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
>>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M')
PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05', '2017-06',
'2017-07', '2017-08', '2017-09', '2017-10', '2017-11', '2017-12',
'2018-01'],
dtype='period[M]')
If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor
endpoints for a ``PeriodIndex`` with frequency matching that of the
``period_range`` constructor.
>>> pd.period_range(start=pd.Period('2017Q1', freq='Q'),
... end=pd.Period('2017Q2', freq='Q'), freq='M')
PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'],
dtype='period[M]')
"""
if com.count_not_none(start, end, periods) != 2:
raise ValueError(
"Of the three parameters: start, end, and periods, "
"exactly two must be specified"
)
if freq is None and (not isinstance(start, Period) and not isinstance(end, Period)):
freq = "D"
data, freq = PeriodArray._generate_range(start, end, periods, freq, fields={})
data = PeriodArray(data, freq=freq)
return PeriodIndex(data, name=name)
|
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label.
Parameters
----------
key : Period, NaT, str, or datetime
String or datetime key must be parsable as Period.
Returns
-------
loc : int or ndarray[int64]
Raises
------
KeyError
Key is not present in the index.
TypeError
If key is listlike or otherwise not hashable.
"""
orig_key = key
if not is_scalar(key):
raise InvalidIndexError(key)
if is_valid_na_for_dtype(key, self.dtype):
key = NaT
elif isinstance(key, str):
try:
loc = self._get_string_slice(key)
return loc
except (TypeError, ValueError):
pass
try:
asdt, reso_str = parse_time_string(key, self.freq)
except (ValueError, DateParseError) as err:
# A string with invalid format
raise KeyError(f"Cannot interpret '{key}' as period") from err
reso = Resolution.from_attrname(reso_str)
grp = reso.freq_group.value
freqn = self.dtype.freq_group_code
# _get_string_slice will handle cases where grp < freqn
assert grp >= freqn
# BusinessDay is a bit strange. It has a *lower* code, but we never parse
# a string as "BusinessDay" resolution, just Day.
if grp == freqn or (
reso == Resolution.RESO_DAY and self.dtype.freq.name == "B"
):
key = Period(asdt, freq=self.freq)
loc = self.get_loc(key, method=method, tolerance=tolerance)
return loc
elif method is None:
raise KeyError(key)
else:
key = asdt
elif isinstance(key, Period):
sfreq = self.freq
kfreq = key.freq
if not (
sfreq.n == kfreq.n
and sfreq._period_dtype_code == kfreq._period_dtype_code
):
# GH#42247 For the subset of DateOffsets that can be Period freqs,
# checking these two attributes is sufficient to check equality,
# and much more performant than `self.freq == key.freq`
raise KeyError(key)
elif isinstance(key, datetime):
try:
key = Period(key, freq=self.freq)
except ValueError as err:
# we cannot construct the Period
raise KeyError(orig_key) from err
else:
# in particular integer, which Period constructor would cast to string
raise KeyError(key)
try:
key = Period(key, freq=self.freq)
except ValueError as err:
# we cannot construct the Period
raise KeyError(orig_key) from err
try:
return Index.get_loc(self, key, method, tolerance)
except KeyError as err:
raise KeyError(orig_key) from err
| 393 | 485 |
from __future__ import annotations
from datetime import (
datetime,
timedelta,
)
from typing import Hashable
import warnings
import numpy as np
from pandas._libs import (
index as libindex,
lib,
)
from pandas._libs.tslibs import (
BaseOffset,
NaT,
Period,
Resolution,
Tick,
)
from pandas._libs.tslibs.parsing import (
DateParseError,
parse_time_string,
)
from pandas._typing import (
Dtype,
DtypeObj,
)
from pandas.errors import InvalidIndexError
from pandas.util._decorators import doc
from pandas.core.dtypes.common import (
is_datetime64_any_dtype,
is_integer,
is_scalar,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.dtypes.missing import is_valid_na_for_dtype
from pandas.core.arrays.period import (
PeriodArray,
period_array,
raise_on_incompatible,
validate_dtype_freq,
)
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import maybe_extract_name
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
from pandas.core.indexes.datetimes import (
DatetimeIndex,
Index,
)
from pandas.core.indexes.extension import inherit_names
from pandas.core.indexes.numeric import Int64Index
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update({"target_klass": "PeriodIndex or list of Periods"})
_shared_doc_kwargs = {
"klass": "PeriodArray",
}
# --- Period index sketch
def _new_PeriodIndex(cls, **d):
# GH13277 for unpickling
values = d.pop("data")
if values.dtype == "int64":
freq = d.pop("freq", None)
values = PeriodArray(values, freq=freq)
return cls._simple_new(values, **d)
else:
return cls(values, **d)
@inherit_names(
["strftime", "start_time", "end_time"] + PeriodArray._field_ops,
PeriodArray,
wrap=True,
)
@inherit_names(["is_leap_year", "_format_native_types"], PeriodArray)
class PeriodIndex(DatetimeIndexOpsMixin):
"""
Immutable ndarray holding ordinal values indicating regular periods in time.
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1d int np.ndarray or PeriodArray), optional
Optional period-like data to construct index with.
copy : bool
Make a copy of input ndarray.
freq : str or period object, optional
One of pandas period strings or corresponding objects.
year : int, array, or Series, default None
month : int, array, or Series, default None
quarter : int, array, or Series, default None
day : int, array, or Series, default None
hour : int, array, or Series, default None
minute : int, array, or Series, default None
second : int, array, or Series, default None
dtype : str or PeriodDtype, default None
Attributes
----------
day
dayofweek
day_of_week
dayofyear
day_of_year
days_in_month
daysinmonth
end_time
freq
freqstr
hour
is_leap_year
minute
month
quarter
qyear
second
start_time
week
weekday
weekofyear
year
Methods
-------
asfreq
strftime
to_timestamp
See Also
--------
Index : The base pandas Index type.
Period : Represents a period of time.
DatetimeIndex : Index with datetime64 data.
TimedeltaIndex : Index of timedelta64 data.
period_range : Create a fixed-frequency PeriodIndex.
Examples
--------
>>> idx = pd.PeriodIndex(year=[2000, 2002], quarter=[1, 3])
>>> idx
PeriodIndex(['2000Q1', '2002Q3'], dtype='period[Q-DEC]')
"""
_typ = "periodindex"
_attributes = ["name"]
_data: PeriodArray
freq: BaseOffset
_data_cls = PeriodArray
_engine_type = libindex.PeriodEngine
_supports_partial_string_indexing = True
# --------------------------------------------------------------------
# methods that dispatch to array and wrap result in Index
# These are defined here instead of via inherit_names for mypy
@doc(
PeriodArray.asfreq,
other="pandas.arrays.PeriodArray",
other_name="PeriodArray",
**_shared_doc_kwargs,
)
def asfreq(self, freq=None, how: str = "E") -> PeriodIndex:
arr = self._data.asfreq(freq, how)
return type(self)._simple_new(arr, name=self.name)
@doc(PeriodArray.to_timestamp)
def to_timestamp(self, freq=None, how="start") -> DatetimeIndex:
arr = self._data.to_timestamp(freq, how)
return DatetimeIndex._simple_new(arr, name=self.name)
# https://github.com/python/mypy/issues/1362
# error: Decorated property not supported
@property # type:ignore[misc]
@doc(PeriodArray.hour.fget)
def hour(self) -> Int64Index:
return Int64Index(self._data.hour, name=self.name)
# https://github.com/python/mypy/issues/1362
# error: Decorated property not supported
@property # type:ignore[misc]
@doc(PeriodArray.minute.fget)
def minute(self) -> Int64Index:
return Int64Index(self._data.minute, name=self.name)
# https://github.com/python/mypy/issues/1362
# error: Decorated property not supported
@property # type:ignore[misc]
@doc(PeriodArray.second.fget)
def second(self) -> Int64Index:
return Int64Index(self._data.second, name=self.name)
# ------------------------------------------------------------------------
# Index Constructors
def __new__(
cls,
data=None,
ordinal=None,
freq=None,
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable = None,
**fields,
) -> PeriodIndex:
valid_field_set = {
"year",
"month",
"day",
"quarter",
"hour",
"minute",
"second",
}
if not set(fields).issubset(valid_field_set):
argument = list(set(fields) - valid_field_set)[0]
raise TypeError(f"__new__() got an unexpected keyword argument {argument}")
name = maybe_extract_name(name, data, cls)
if data is None and ordinal is None:
# range-based.
data, freq2 = PeriodArray._generate_range(None, None, None, freq, fields)
# PeriodArray._generate range does validation that fields is
# empty when really using the range-based constructor.
freq = freq2
data = PeriodArray(data, freq=freq)
else:
freq = validate_dtype_freq(dtype, freq)
# PeriodIndex allow PeriodIndex(period_index, freq=different)
# Let's not encourage that kind of behavior in PeriodArray.
if freq and isinstance(data, cls) and data.freq != freq:
# TODO: We can do some of these with no-copy / coercion?
# e.g. D -> 2D seems to be OK
data = data.asfreq(freq)
if data is None and ordinal is not None:
# we strangely ignore `ordinal` if data is passed.
ordinal = np.asarray(ordinal, dtype=np.int64)
data = PeriodArray(ordinal, freq=freq)
else:
# don't pass copy here, since we copy later.
data = period_array(data=data, freq=freq)
if copy:
data = data.copy()
return cls._simple_new(data, name=name)
# ------------------------------------------------------------------------
# Data
@property
def values(self) -> np.ndarray:
return np.asarray(self, dtype=object)
def _maybe_convert_timedelta(self, other):
"""
Convert timedelta-like input to an integer multiple of self.freq
Parameters
----------
other : timedelta, np.timedelta64, DateOffset, int, np.ndarray
Returns
-------
converted : int, np.ndarray[int64]
Raises
------
IncompatibleFrequency : if the input cannot be written as a multiple
of self.freq. Note IncompatibleFrequency subclasses ValueError.
"""
if isinstance(other, (timedelta, np.timedelta64, Tick, np.ndarray)):
if isinstance(self.freq, Tick):
# _check_timedeltalike_freq_compat will raise if incompatible
delta = self._data._check_timedeltalike_freq_compat(other)
return delta
elif isinstance(other, BaseOffset):
if other.base == self.freq.base:
return other.n
raise raise_on_incompatible(self, other)
elif is_integer(other):
# integer is passed to .shift via
# _add_datetimelike_methods basically
# but ufunc may pass integer to _add_delta
return other
# raise when input doesn't have freq
raise raise_on_incompatible(self, None)
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
"""
Can we compare values of the given dtype to our own?
"""
if not isinstance(dtype, PeriodDtype):
return False
return dtype.freq == self.freq
# ------------------------------------------------------------------------
# Index Methods
def asof_locs(self, where: Index, mask: np.ndarray) -> np.ndarray:
"""
where : array of timestamps
mask : np.ndarray[bool]
Array of booleans where data is not NA.
"""
if isinstance(where, DatetimeIndex):
where = PeriodIndex(where._values, freq=self.freq)
elif not isinstance(where, PeriodIndex):
raise TypeError("asof_locs `where` must be DatetimeIndex or PeriodIndex")
return super().asof_locs(where, mask)
@doc(Index.astype)
def astype(self, dtype, copy: bool = True, how=lib.no_default):
dtype = pandas_dtype(dtype)
if how is not lib.no_default:
# GH#37982
warnings.warn(
"The 'how' keyword in PeriodIndex.astype is deprecated and "
"will be removed in a future version. "
"Use index.to_timestamp(how=how) instead",
FutureWarning,
stacklevel=2,
)
else:
how = "start"
if is_datetime64_any_dtype(dtype):
# 'how' is index-specific, isn't part of the EA interface.
tz = getattr(dtype, "tz", None)
return self.to_timestamp(how=how).tz_localize(tz)
return super().astype(dtype, copy=copy)
@property
def is_full(self) -> bool:
"""
Returns True if this PeriodIndex is range-like in that all Periods
between start and end are present, in order.
"""
if len(self) == 0:
return True
if not self.is_monotonic_increasing:
raise ValueError("Index is not monotonic")
values = self.asi8
return ((values[1:] - values[:-1]) < 2).all()
@property
def inferred_type(self) -> str:
# b/c data is represented as ints make sure we can't have ambiguous
# indexing
return "period"
# ------------------------------------------------------------------------
# Indexing Methods
def _convert_tolerance(self, tolerance, target):
# Returned tolerance must be in dtype/units so that
# `|self._get_engine_target() - target._engine_target()| <= tolerance`
# is meaningful. Since PeriodIndex returns int64 for engine_target,
# we may need to convert timedelta64 tolerance to int64.
tolerance = super()._convert_tolerance(tolerance, target)
if self.dtype == target.dtype:
# convert tolerance to i8
tolerance = self._maybe_convert_timedelta(tolerance)
return tolerance
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label.
Parameters
----------
key : Period, NaT, str, or datetime
String or datetime key must be parsable as Period.
Returns
-------
loc : int or ndarray[int64]
Raises
------
KeyError
Key is not present in the index.
TypeError
If key is listlike or otherwise not hashable.
"""
orig_key = key
if not is_scalar(key):
raise InvalidIndexError(key)
if is_valid_na_for_dtype(key, self.dtype):
key = NaT
elif isinstance(key, str):
try:
loc = self._get_string_slice(key)
return loc
except (TypeError, ValueError):
pass
try:
asdt, reso_str = parse_time_string(key, self.freq)
except (ValueError, DateParseError) as err:
# A string with invalid format
raise KeyError(f"Cannot interpret '{key}' as period") from err
reso = Resolution.from_attrname(reso_str)
grp = reso.freq_group.value
freqn = self.dtype.freq_group_code
# _get_string_slice will handle cases where grp < freqn
assert grp >= freqn
# BusinessDay is a bit strange. It has a *lower* code, but we never parse
# a string as "BusinessDay" resolution, just Day.
if grp == freqn or (
reso == Resolution.RESO_DAY and self.dtype.freq.name == "B"
):
key = Period(asdt, freq=self.freq)
loc = self.get_loc(key, method=method, tolerance=tolerance)
return loc
elif method is None:
raise KeyError(key)
else:
key = asdt
elif isinstance(key, Period):
sfreq = self.freq
kfreq = key.freq
if not (
sfreq.n == kfreq.n
and sfreq._period_dtype_code == kfreq._period_dtype_code
):
# GH#42247 For the subset of DateOffsets that can be Period freqs,
# checking these two attributes is sufficient to check equality,
# and much more performant than `self.freq == key.freq`
raise KeyError(key)
elif isinstance(key, datetime):
try:
key = Period(key, freq=self.freq)
except ValueError as err:
# we cannot construct the Period
raise KeyError(orig_key) from err
else:
# in particular integer, which Period constructor would cast to string
raise KeyError(key)
try:
key = Period(key, freq=self.freq)
except ValueError as err:
# we cannot construct the Period
raise KeyError(orig_key) from err
try:
return Index.get_loc(self, key, method, tolerance)
except KeyError as err:
raise KeyError(orig_key) from err
def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default):
"""
If label is a string or a datetime, cast it to Period.ordinal according
to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'loc', 'getitem'}, or None
Returns
-------
bound : Period or object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ["loc", "getitem", None, lib.no_default]
self._deprecated_arg(kind, "kind", "_maybe_cast_slice_bound")
if isinstance(label, datetime):
return Period(label, freq=self.freq)
elif isinstance(label, str):
try:
parsed, reso_str = parse_time_string(label, self.freq)
except ValueError as err:
# string cannot be parsed as datetime-like
raise self._invalid_indexer("slice", label) from err
reso = Resolution.from_attrname(reso_str)
lower, upper = self._parsed_string_to_bounds(reso, parsed)
return lower if side == "left" else upper
elif not isinstance(label, self._data._recognized_scalars):
raise self._invalid_indexer("slice", label)
return label
def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
grp = reso.freq_group
iv = Period(parsed, freq=grp.value)
return (iv.asfreq(self.freq, how="start"), iv.asfreq(self.freq, how="end"))
def _validate_partial_date_slice(self, reso: Resolution):
assert isinstance(reso, Resolution), (type(reso), reso)
grp = reso.freq_group
freqn = self.dtype.freq_group_code
if not grp.value < freqn:
# TODO: we used to also check for
# reso in ["day", "hour", "minute", "second"]
# why is that check not needed?
raise ValueError
def _get_string_slice(self, key: str):
parsed, reso_str = parse_time_string(key, self.freq)
reso = Resolution.from_attrname(reso_str)
try:
return self._partial_date_slice(reso, parsed)
except KeyError as err:
raise KeyError(key) from err
def period_range(
start=None, end=None, periods: int | None = None, freq=None, name=None
) -> PeriodIndex:
"""
Return a fixed frequency PeriodIndex.
The day (calendar) is the default frequency.
Parameters
----------
start : str or period-like, default None
Left bound for generating periods.
end : str or period-like, default None
Right bound for generating periods.
periods : int, default None
Number of periods to generate.
freq : str or DateOffset, optional
Frequency alias. By default the freq is taken from `start` or `end`
if those are Period objects. Otherwise, the default is ``"D"`` for
daily frequency.
name : str, default None
Name of the resulting PeriodIndex.
Returns
-------
PeriodIndex
Notes
-----
Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
must be specified.
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
>>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M')
PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05', '2017-06',
'2017-07', '2017-08', '2017-09', '2017-10', '2017-11', '2017-12',
'2018-01'],
dtype='period[M]')
If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor
endpoints for a ``PeriodIndex`` with frequency matching that of the
``period_range`` constructor.
>>> pd.period_range(start=pd.Period('2017Q1', freq='Q'),
... end=pd.Period('2017Q2', freq='Q'), freq='M')
PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'],
dtype='period[M]')
"""
if com.count_not_none(start, end, periods) != 2:
raise ValueError(
"Of the three parameters: start, end, and periods, "
"exactly two must be specified"
)
if freq is None and (not isinstance(start, Period) and not isinstance(end, Period)):
freq = "D"
data, freq = PeriodArray._generate_range(start, end, periods, freq, fields={})
data = PeriodArray(data, freq=freq)
return PeriodIndex(data, name=name)
|
_maybe_cast_slice_bound
|
If label is a string or a datetime, cast it to Period.ordinal according
to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'loc', 'getitem'}, or None
Returns
-------
bound : Period or object
Notes
-----
Value of `side` parameter should be validated in caller.
|
from __future__ import annotations
from datetime import (
datetime,
timedelta,
)
from typing import Hashable
import warnings
import numpy as np
from pandas._libs import (
index as libindex,
lib,
)
from pandas._libs.tslibs import (
BaseOffset,
NaT,
Period,
Resolution,
Tick,
)
from pandas._libs.tslibs.parsing import (
DateParseError,
parse_time_string,
)
from pandas._typing import (
Dtype,
DtypeObj,
)
from pandas.errors import InvalidIndexError
from pandas.util._decorators import doc
from pandas.core.dtypes.common import (
is_datetime64_any_dtype,
is_integer,
is_scalar,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.dtypes.missing import is_valid_na_for_dtype
from pandas.core.arrays.period import (
PeriodArray,
period_array,
raise_on_incompatible,
validate_dtype_freq,
)
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import maybe_extract_name
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
from pandas.core.indexes.datetimes import (
DatetimeIndex,
Index,
)
from pandas.core.indexes.extension import inherit_names
from pandas.core.indexes.numeric import Int64Index
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update({"target_klass": "PeriodIndex or list of Periods"})
_shared_doc_kwargs = {
"klass": "PeriodArray",
}
# --- Period index sketch
def _new_PeriodIndex(cls, **d):
# GH13277 for unpickling
values = d.pop("data")
if values.dtype == "int64":
freq = d.pop("freq", None)
values = PeriodArray(values, freq=freq)
return cls._simple_new(values, **d)
else:
return cls(values, **d)
@inherit_names(
["strftime", "start_time", "end_time"] + PeriodArray._field_ops,
PeriodArray,
wrap=True,
)
@inherit_names(["is_leap_year", "_format_native_types"], PeriodArray)
class PeriodIndex(DatetimeIndexOpsMixin):
"""
Immutable ndarray holding ordinal values indicating regular periods in time.
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1d int np.ndarray or PeriodArray), optional
Optional period-like data to construct index with.
copy : bool
Make a copy of input ndarray.
freq : str or period object, optional
One of pandas period strings or corresponding objects.
year : int, array, or Series, default None
month : int, array, or Series, default None
quarter : int, array, or Series, default None
day : int, array, or Series, default None
hour : int, array, or Series, default None
minute : int, array, or Series, default None
second : int, array, or Series, default None
dtype : str or PeriodDtype, default None
Attributes
----------
day
dayofweek
day_of_week
dayofyear
day_of_year
days_in_month
daysinmonth
end_time
freq
freqstr
hour
is_leap_year
minute
month
quarter
qyear
second
start_time
week
weekday
weekofyear
year
Methods
-------
asfreq
strftime
to_timestamp
See Also
--------
Index : The base pandas Index type.
Period : Represents a period of time.
DatetimeIndex : Index with datetime64 data.
TimedeltaIndex : Index of timedelta64 data.
period_range : Create a fixed-frequency PeriodIndex.
Examples
--------
>>> idx = pd.PeriodIndex(year=[2000, 2002], quarter=[1, 3])
>>> idx
PeriodIndex(['2000Q1', '2002Q3'], dtype='period[Q-DEC]')
"""
_typ = "periodindex"
_attributes = ["name"]
_data: PeriodArray
freq: BaseOffset
_data_cls = PeriodArray
_engine_type = libindex.PeriodEngine
_supports_partial_string_indexing = True
# --------------------------------------------------------------------
# methods that dispatch to array and wrap result in Index
# These are defined here instead of via inherit_names for mypy
@doc(
PeriodArray.asfreq,
other="pandas.arrays.PeriodArray",
other_name="PeriodArray",
**_shared_doc_kwargs,
)
def asfreq(self, freq=None, how: str = "E") -> PeriodIndex:
arr = self._data.asfreq(freq, how)
return type(self)._simple_new(arr, name=self.name)
@doc(PeriodArray.to_timestamp)
def to_timestamp(self, freq=None, how="start") -> DatetimeIndex:
arr = self._data.to_timestamp(freq, how)
return DatetimeIndex._simple_new(arr, name=self.name)
# https://github.com/python/mypy/issues/1362
# error: Decorated property not supported
@property # type:ignore[misc]
@doc(PeriodArray.hour.fget)
def hour(self) -> Int64Index:
return Int64Index(self._data.hour, name=self.name)
# https://github.com/python/mypy/issues/1362
# error: Decorated property not supported
@property # type:ignore[misc]
@doc(PeriodArray.minute.fget)
def minute(self) -> Int64Index:
return Int64Index(self._data.minute, name=self.name)
# https://github.com/python/mypy/issues/1362
# error: Decorated property not supported
@property # type:ignore[misc]
@doc(PeriodArray.second.fget)
def second(self) -> Int64Index:
return Int64Index(self._data.second, name=self.name)
# ------------------------------------------------------------------------
# Index Constructors
def __new__(
cls,
data=None,
ordinal=None,
freq=None,
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable = None,
**fields,
) -> PeriodIndex:
valid_field_set = {
"year",
"month",
"day",
"quarter",
"hour",
"minute",
"second",
}
if not set(fields).issubset(valid_field_set):
argument = list(set(fields) - valid_field_set)[0]
raise TypeError(f"__new__() got an unexpected keyword argument {argument}")
name = maybe_extract_name(name, data, cls)
if data is None and ordinal is None:
# range-based.
data, freq2 = PeriodArray._generate_range(None, None, None, freq, fields)
# PeriodArray._generate range does validation that fields is
# empty when really using the range-based constructor.
freq = freq2
data = PeriodArray(data, freq=freq)
else:
freq = validate_dtype_freq(dtype, freq)
# PeriodIndex allow PeriodIndex(period_index, freq=different)
# Let's not encourage that kind of behavior in PeriodArray.
if freq and isinstance(data, cls) and data.freq != freq:
# TODO: We can do some of these with no-copy / coercion?
# e.g. D -> 2D seems to be OK
data = data.asfreq(freq)
if data is None and ordinal is not None:
# we strangely ignore `ordinal` if data is passed.
ordinal = np.asarray(ordinal, dtype=np.int64)
data = PeriodArray(ordinal, freq=freq)
else:
# don't pass copy here, since we copy later.
data = period_array(data=data, freq=freq)
if copy:
data = data.copy()
return cls._simple_new(data, name=name)
# ------------------------------------------------------------------------
# Data
@property
def values(self) -> np.ndarray:
return np.asarray(self, dtype=object)
def _maybe_convert_timedelta(self, other):
"""
Convert timedelta-like input to an integer multiple of self.freq
Parameters
----------
other : timedelta, np.timedelta64, DateOffset, int, np.ndarray
Returns
-------
converted : int, np.ndarray[int64]
Raises
------
IncompatibleFrequency : if the input cannot be written as a multiple
of self.freq. Note IncompatibleFrequency subclasses ValueError.
"""
if isinstance(other, (timedelta, np.timedelta64, Tick, np.ndarray)):
if isinstance(self.freq, Tick):
# _check_timedeltalike_freq_compat will raise if incompatible
delta = self._data._check_timedeltalike_freq_compat(other)
return delta
elif isinstance(other, BaseOffset):
if other.base == self.freq.base:
return other.n
raise raise_on_incompatible(self, other)
elif is_integer(other):
# integer is passed to .shift via
# _add_datetimelike_methods basically
# but ufunc may pass integer to _add_delta
return other
# raise when input doesn't have freq
raise raise_on_incompatible(self, None)
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
"""
Can we compare values of the given dtype to our own?
"""
if not isinstance(dtype, PeriodDtype):
return False
return dtype.freq == self.freq
# ------------------------------------------------------------------------
# Index Methods
def asof_locs(self, where: Index, mask: np.ndarray) -> np.ndarray:
"""
where : array of timestamps
mask : np.ndarray[bool]
Array of booleans where data is not NA.
"""
if isinstance(where, DatetimeIndex):
where = PeriodIndex(where._values, freq=self.freq)
elif not isinstance(where, PeriodIndex):
raise TypeError("asof_locs `where` must be DatetimeIndex or PeriodIndex")
return super().asof_locs(where, mask)
@doc(Index.astype)
def astype(self, dtype, copy: bool = True, how=lib.no_default):
dtype = pandas_dtype(dtype)
if how is not lib.no_default:
# GH#37982
warnings.warn(
"The 'how' keyword in PeriodIndex.astype is deprecated and "
"will be removed in a future version. "
"Use index.to_timestamp(how=how) instead",
FutureWarning,
stacklevel=2,
)
else:
how = "start"
if is_datetime64_any_dtype(dtype):
# 'how' is index-specific, isn't part of the EA interface.
tz = getattr(dtype, "tz", None)
return self.to_timestamp(how=how).tz_localize(tz)
return super().astype(dtype, copy=copy)
@property
def is_full(self) -> bool:
"""
Returns True if this PeriodIndex is range-like in that all Periods
between start and end are present, in order.
"""
if len(self) == 0:
return True
if not self.is_monotonic_increasing:
raise ValueError("Index is not monotonic")
values = self.asi8
return ((values[1:] - values[:-1]) < 2).all()
@property
def inferred_type(self) -> str:
# b/c data is represented as ints make sure we can't have ambiguous
# indexing
return "period"
# ------------------------------------------------------------------------
# Indexing Methods
def _convert_tolerance(self, tolerance, target):
# Returned tolerance must be in dtype/units so that
# `|self._get_engine_target() - target._engine_target()| <= tolerance`
# is meaningful. Since PeriodIndex returns int64 for engine_target,
# we may need to convert timedelta64 tolerance to int64.
tolerance = super()._convert_tolerance(tolerance, target)
if self.dtype == target.dtype:
# convert tolerance to i8
tolerance = self._maybe_convert_timedelta(tolerance)
return tolerance
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label.
Parameters
----------
key : Period, NaT, str, or datetime
String or datetime key must be parsable as Period.
Returns
-------
loc : int or ndarray[int64]
Raises
------
KeyError
Key is not present in the index.
TypeError
If key is listlike or otherwise not hashable.
"""
orig_key = key
if not is_scalar(key):
raise InvalidIndexError(key)
if is_valid_na_for_dtype(key, self.dtype):
key = NaT
elif isinstance(key, str):
try:
loc = self._get_string_slice(key)
return loc
except (TypeError, ValueError):
pass
try:
asdt, reso_str = parse_time_string(key, self.freq)
except (ValueError, DateParseError) as err:
# A string with invalid format
raise KeyError(f"Cannot interpret '{key}' as period") from err
reso = Resolution.from_attrname(reso_str)
grp = reso.freq_group.value
freqn = self.dtype.freq_group_code
# _get_string_slice will handle cases where grp < freqn
assert grp >= freqn
# BusinessDay is a bit strange. It has a *lower* code, but we never parse
# a string as "BusinessDay" resolution, just Day.
if grp == freqn or (
reso == Resolution.RESO_DAY and self.dtype.freq.name == "B"
):
key = Period(asdt, freq=self.freq)
loc = self.get_loc(key, method=method, tolerance=tolerance)
return loc
elif method is None:
raise KeyError(key)
else:
key = asdt
elif isinstance(key, Period):
sfreq = self.freq
kfreq = key.freq
if not (
sfreq.n == kfreq.n
and sfreq._period_dtype_code == kfreq._period_dtype_code
):
# GH#42247 For the subset of DateOffsets that can be Period freqs,
# checking these two attributes is sufficient to check equality,
# and much more performant than `self.freq == key.freq`
raise KeyError(key)
elif isinstance(key, datetime):
try:
key = Period(key, freq=self.freq)
except ValueError as err:
# we cannot construct the Period
raise KeyError(orig_key) from err
else:
# in particular integer, which Period constructor would cast to string
raise KeyError(key)
try:
key = Period(key, freq=self.freq)
except ValueError as err:
# we cannot construct the Period
raise KeyError(orig_key) from err
try:
return Index.get_loc(self, key, method, tolerance)
except KeyError as err:
raise KeyError(orig_key) from err
# MASKED: _maybe_cast_slice_bound function (lines 487-525)
def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
grp = reso.freq_group
iv = Period(parsed, freq=grp.value)
return (iv.asfreq(self.freq, how="start"), iv.asfreq(self.freq, how="end"))
def _validate_partial_date_slice(self, reso: Resolution):
assert isinstance(reso, Resolution), (type(reso), reso)
grp = reso.freq_group
freqn = self.dtype.freq_group_code
if not grp.value < freqn:
# TODO: we used to also check for
# reso in ["day", "hour", "minute", "second"]
# why is that check not needed?
raise ValueError
def _get_string_slice(self, key: str):
parsed, reso_str = parse_time_string(key, self.freq)
reso = Resolution.from_attrname(reso_str)
try:
return self._partial_date_slice(reso, parsed)
except KeyError as err:
raise KeyError(key) from err
def period_range(
start=None, end=None, periods: int | None = None, freq=None, name=None
) -> PeriodIndex:
"""
Return a fixed frequency PeriodIndex.
The day (calendar) is the default frequency.
Parameters
----------
start : str or period-like, default None
Left bound for generating periods.
end : str or period-like, default None
Right bound for generating periods.
periods : int, default None
Number of periods to generate.
freq : str or DateOffset, optional
Frequency alias. By default the freq is taken from `start` or `end`
if those are Period objects. Otherwise, the default is ``"D"`` for
daily frequency.
name : str, default None
Name of the resulting PeriodIndex.
Returns
-------
PeriodIndex
Notes
-----
Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
must be specified.
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
>>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M')
PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05', '2017-06',
'2017-07', '2017-08', '2017-09', '2017-10', '2017-11', '2017-12',
'2018-01'],
dtype='period[M]')
If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor
endpoints for a ``PeriodIndex`` with frequency matching that of the
``period_range`` constructor.
>>> pd.period_range(start=pd.Period('2017Q1', freq='Q'),
... end=pd.Period('2017Q2', freq='Q'), freq='M')
PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'],
dtype='period[M]')
"""
if com.count_not_none(start, end, periods) != 2:
raise ValueError(
"Of the three parameters: start, end, and periods, "
"exactly two must be specified"
)
if freq is None and (not isinstance(start, Period) and not isinstance(end, Period)):
freq = "D"
data, freq = PeriodArray._generate_range(start, end, periods, freq, fields={})
data = PeriodArray(data, freq=freq)
return PeriodIndex(data, name=name)
|
def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default):
"""
If label is a string or a datetime, cast it to Period.ordinal according
to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'loc', 'getitem'}, or None
Returns
-------
bound : Period or object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ["loc", "getitem", None, lib.no_default]
self._deprecated_arg(kind, "kind", "_maybe_cast_slice_bound")
if isinstance(label, datetime):
return Period(label, freq=self.freq)
elif isinstance(label, str):
try:
parsed, reso_str = parse_time_string(label, self.freq)
except ValueError as err:
# string cannot be parsed as datetime-like
raise self._invalid_indexer("slice", label) from err
reso = Resolution.from_attrname(reso_str)
lower, upper = self._parsed_string_to_bounds(reso, parsed)
return lower if side == "left" else upper
elif not isinstance(label, self._data._recognized_scalars):
raise self._invalid_indexer("slice", label)
return label
| 487 | 525 |
from __future__ import annotations
from datetime import (
datetime,
timedelta,
)
from typing import Hashable
import warnings
import numpy as np
from pandas._libs import (
index as libindex,
lib,
)
from pandas._libs.tslibs import (
BaseOffset,
NaT,
Period,
Resolution,
Tick,
)
from pandas._libs.tslibs.parsing import (
DateParseError,
parse_time_string,
)
from pandas._typing import (
Dtype,
DtypeObj,
)
from pandas.errors import InvalidIndexError
from pandas.util._decorators import doc
from pandas.core.dtypes.common import (
is_datetime64_any_dtype,
is_integer,
is_scalar,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.dtypes.missing import is_valid_na_for_dtype
from pandas.core.arrays.period import (
PeriodArray,
period_array,
raise_on_incompatible,
validate_dtype_freq,
)
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import maybe_extract_name
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
from pandas.core.indexes.datetimes import (
DatetimeIndex,
Index,
)
from pandas.core.indexes.extension import inherit_names
from pandas.core.indexes.numeric import Int64Index
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update({"target_klass": "PeriodIndex or list of Periods"})
_shared_doc_kwargs = {
"klass": "PeriodArray",
}
# --- Period index sketch
def _new_PeriodIndex(cls, **d):
# GH13277 for unpickling
values = d.pop("data")
if values.dtype == "int64":
freq = d.pop("freq", None)
values = PeriodArray(values, freq=freq)
return cls._simple_new(values, **d)
else:
return cls(values, **d)
@inherit_names(
["strftime", "start_time", "end_time"] + PeriodArray._field_ops,
PeriodArray,
wrap=True,
)
@inherit_names(["is_leap_year", "_format_native_types"], PeriodArray)
class PeriodIndex(DatetimeIndexOpsMixin):
"""
Immutable ndarray holding ordinal values indicating regular periods in time.
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1d int np.ndarray or PeriodArray), optional
Optional period-like data to construct index with.
copy : bool
Make a copy of input ndarray.
freq : str or period object, optional
One of pandas period strings or corresponding objects.
year : int, array, or Series, default None
month : int, array, or Series, default None
quarter : int, array, or Series, default None
day : int, array, or Series, default None
hour : int, array, or Series, default None
minute : int, array, or Series, default None
second : int, array, or Series, default None
dtype : str or PeriodDtype, default None
Attributes
----------
day
dayofweek
day_of_week
dayofyear
day_of_year
days_in_month
daysinmonth
end_time
freq
freqstr
hour
is_leap_year
minute
month
quarter
qyear
second
start_time
week
weekday
weekofyear
year
Methods
-------
asfreq
strftime
to_timestamp
See Also
--------
Index : The base pandas Index type.
Period : Represents a period of time.
DatetimeIndex : Index with datetime64 data.
TimedeltaIndex : Index of timedelta64 data.
period_range : Create a fixed-frequency PeriodIndex.
Examples
--------
>>> idx = pd.PeriodIndex(year=[2000, 2002], quarter=[1, 3])
>>> idx
PeriodIndex(['2000Q1', '2002Q3'], dtype='period[Q-DEC]')
"""
_typ = "periodindex"
_attributes = ["name"]
_data: PeriodArray
freq: BaseOffset
_data_cls = PeriodArray
_engine_type = libindex.PeriodEngine
_supports_partial_string_indexing = True
# --------------------------------------------------------------------
# methods that dispatch to array and wrap result in Index
# These are defined here instead of via inherit_names for mypy
@doc(
PeriodArray.asfreq,
other="pandas.arrays.PeriodArray",
other_name="PeriodArray",
**_shared_doc_kwargs,
)
def asfreq(self, freq=None, how: str = "E") -> PeriodIndex:
arr = self._data.asfreq(freq, how)
return type(self)._simple_new(arr, name=self.name)
@doc(PeriodArray.to_timestamp)
def to_timestamp(self, freq=None, how="start") -> DatetimeIndex:
arr = self._data.to_timestamp(freq, how)
return DatetimeIndex._simple_new(arr, name=self.name)
# https://github.com/python/mypy/issues/1362
# error: Decorated property not supported
@property # type:ignore[misc]
@doc(PeriodArray.hour.fget)
def hour(self) -> Int64Index:
return Int64Index(self._data.hour, name=self.name)
# https://github.com/python/mypy/issues/1362
# error: Decorated property not supported
@property # type:ignore[misc]
@doc(PeriodArray.minute.fget)
def minute(self) -> Int64Index:
return Int64Index(self._data.minute, name=self.name)
# https://github.com/python/mypy/issues/1362
# error: Decorated property not supported
@property # type:ignore[misc]
@doc(PeriodArray.second.fget)
def second(self) -> Int64Index:
return Int64Index(self._data.second, name=self.name)
# ------------------------------------------------------------------------
# Index Constructors
def __new__(
cls,
data=None,
ordinal=None,
freq=None,
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable = None,
**fields,
) -> PeriodIndex:
valid_field_set = {
"year",
"month",
"day",
"quarter",
"hour",
"minute",
"second",
}
if not set(fields).issubset(valid_field_set):
argument = list(set(fields) - valid_field_set)[0]
raise TypeError(f"__new__() got an unexpected keyword argument {argument}")
name = maybe_extract_name(name, data, cls)
if data is None and ordinal is None:
# range-based.
data, freq2 = PeriodArray._generate_range(None, None, None, freq, fields)
# PeriodArray._generate range does validation that fields is
# empty when really using the range-based constructor.
freq = freq2
data = PeriodArray(data, freq=freq)
else:
freq = validate_dtype_freq(dtype, freq)
# PeriodIndex allow PeriodIndex(period_index, freq=different)
# Let's not encourage that kind of behavior in PeriodArray.
if freq and isinstance(data, cls) and data.freq != freq:
# TODO: We can do some of these with no-copy / coercion?
# e.g. D -> 2D seems to be OK
data = data.asfreq(freq)
if data is None and ordinal is not None:
# we strangely ignore `ordinal` if data is passed.
ordinal = np.asarray(ordinal, dtype=np.int64)
data = PeriodArray(ordinal, freq=freq)
else:
# don't pass copy here, since we copy later.
data = period_array(data=data, freq=freq)
if copy:
data = data.copy()
return cls._simple_new(data, name=name)
# ------------------------------------------------------------------------
# Data
@property
def values(self) -> np.ndarray:
return np.asarray(self, dtype=object)
def _maybe_convert_timedelta(self, other):
"""
Convert timedelta-like input to an integer multiple of self.freq
Parameters
----------
other : timedelta, np.timedelta64, DateOffset, int, np.ndarray
Returns
-------
converted : int, np.ndarray[int64]
Raises
------
IncompatibleFrequency : if the input cannot be written as a multiple
of self.freq. Note IncompatibleFrequency subclasses ValueError.
"""
if isinstance(other, (timedelta, np.timedelta64, Tick, np.ndarray)):
if isinstance(self.freq, Tick):
# _check_timedeltalike_freq_compat will raise if incompatible
delta = self._data._check_timedeltalike_freq_compat(other)
return delta
elif isinstance(other, BaseOffset):
if other.base == self.freq.base:
return other.n
raise raise_on_incompatible(self, other)
elif is_integer(other):
# integer is passed to .shift via
# _add_datetimelike_methods basically
# but ufunc may pass integer to _add_delta
return other
# raise when input doesn't have freq
raise raise_on_incompatible(self, None)
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
"""
Can we compare values of the given dtype to our own?
"""
if not isinstance(dtype, PeriodDtype):
return False
return dtype.freq == self.freq
# ------------------------------------------------------------------------
# Index Methods
def asof_locs(self, where: Index, mask: np.ndarray) -> np.ndarray:
"""
where : array of timestamps
mask : np.ndarray[bool]
Array of booleans where data is not NA.
"""
if isinstance(where, DatetimeIndex):
where = PeriodIndex(where._values, freq=self.freq)
elif not isinstance(where, PeriodIndex):
raise TypeError("asof_locs `where` must be DatetimeIndex or PeriodIndex")
return super().asof_locs(where, mask)
@doc(Index.astype)
def astype(self, dtype, copy: bool = True, how=lib.no_default):
dtype = pandas_dtype(dtype)
if how is not lib.no_default:
# GH#37982
warnings.warn(
"The 'how' keyword in PeriodIndex.astype is deprecated and "
"will be removed in a future version. "
"Use index.to_timestamp(how=how) instead",
FutureWarning,
stacklevel=2,
)
else:
how = "start"
if is_datetime64_any_dtype(dtype):
# 'how' is index-specific, isn't part of the EA interface.
tz = getattr(dtype, "tz", None)
return self.to_timestamp(how=how).tz_localize(tz)
return super().astype(dtype, copy=copy)
@property
def is_full(self) -> bool:
"""
Returns True if this PeriodIndex is range-like in that all Periods
between start and end are present, in order.
"""
if len(self) == 0:
return True
if not self.is_monotonic_increasing:
raise ValueError("Index is not monotonic")
values = self.asi8
return ((values[1:] - values[:-1]) < 2).all()
@property
def inferred_type(self) -> str:
# b/c data is represented as ints make sure we can't have ambiguous
# indexing
return "period"
# ------------------------------------------------------------------------
# Indexing Methods
def _convert_tolerance(self, tolerance, target):
# Returned tolerance must be in dtype/units so that
# `|self._get_engine_target() - target._engine_target()| <= tolerance`
# is meaningful. Since PeriodIndex returns int64 for engine_target,
# we may need to convert timedelta64 tolerance to int64.
tolerance = super()._convert_tolerance(tolerance, target)
if self.dtype == target.dtype:
# convert tolerance to i8
tolerance = self._maybe_convert_timedelta(tolerance)
return tolerance
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label.
Parameters
----------
key : Period, NaT, str, or datetime
String or datetime key must be parsable as Period.
Returns
-------
loc : int or ndarray[int64]
Raises
------
KeyError
Key is not present in the index.
TypeError
If key is listlike or otherwise not hashable.
"""
orig_key = key
if not is_scalar(key):
raise InvalidIndexError(key)
if is_valid_na_for_dtype(key, self.dtype):
key = NaT
elif isinstance(key, str):
try:
loc = self._get_string_slice(key)
return loc
except (TypeError, ValueError):
pass
try:
asdt, reso_str = parse_time_string(key, self.freq)
except (ValueError, DateParseError) as err:
# A string with invalid format
raise KeyError(f"Cannot interpret '{key}' as period") from err
reso = Resolution.from_attrname(reso_str)
grp = reso.freq_group.value
freqn = self.dtype.freq_group_code
# _get_string_slice will handle cases where grp < freqn
assert grp >= freqn
# BusinessDay is a bit strange. It has a *lower* code, but we never parse
# a string as "BusinessDay" resolution, just Day.
if grp == freqn or (
reso == Resolution.RESO_DAY and self.dtype.freq.name == "B"
):
key = Period(asdt, freq=self.freq)
loc = self.get_loc(key, method=method, tolerance=tolerance)
return loc
elif method is None:
raise KeyError(key)
else:
key = asdt
elif isinstance(key, Period):
sfreq = self.freq
kfreq = key.freq
if not (
sfreq.n == kfreq.n
and sfreq._period_dtype_code == kfreq._period_dtype_code
):
# GH#42247 For the subset of DateOffsets that can be Period freqs,
# checking these two attributes is sufficient to check equality,
# and much more performant than `self.freq == key.freq`
raise KeyError(key)
elif isinstance(key, datetime):
try:
key = Period(key, freq=self.freq)
except ValueError as err:
# we cannot construct the Period
raise KeyError(orig_key) from err
else:
# in particular integer, which Period constructor would cast to string
raise KeyError(key)
try:
key = Period(key, freq=self.freq)
except ValueError as err:
# we cannot construct the Period
raise KeyError(orig_key) from err
try:
return Index.get_loc(self, key, method, tolerance)
except KeyError as err:
raise KeyError(orig_key) from err
def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default):
"""
If label is a string or a datetime, cast it to Period.ordinal according
to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'loc', 'getitem'}, or None
Returns
-------
bound : Period or object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ["loc", "getitem", None, lib.no_default]
self._deprecated_arg(kind, "kind", "_maybe_cast_slice_bound")
if isinstance(label, datetime):
return Period(label, freq=self.freq)
elif isinstance(label, str):
try:
parsed, reso_str = parse_time_string(label, self.freq)
except ValueError as err:
# string cannot be parsed as datetime-like
raise self._invalid_indexer("slice", label) from err
reso = Resolution.from_attrname(reso_str)
lower, upper = self._parsed_string_to_bounds(reso, parsed)
return lower if side == "left" else upper
elif not isinstance(label, self._data._recognized_scalars):
raise self._invalid_indexer("slice", label)
return label
def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
grp = reso.freq_group
iv = Period(parsed, freq=grp.value)
return (iv.asfreq(self.freq, how="start"), iv.asfreq(self.freq, how="end"))
def _validate_partial_date_slice(self, reso: Resolution):
assert isinstance(reso, Resolution), (type(reso), reso)
grp = reso.freq_group
freqn = self.dtype.freq_group_code
if not grp.value < freqn:
# TODO: we used to also check for
# reso in ["day", "hour", "minute", "second"]
# why is that check not needed?
raise ValueError
def _get_string_slice(self, key: str):
parsed, reso_str = parse_time_string(key, self.freq)
reso = Resolution.from_attrname(reso_str)
try:
return self._partial_date_slice(reso, parsed)
except KeyError as err:
raise KeyError(key) from err
def period_range(
start=None, end=None, periods: int | None = None, freq=None, name=None
) -> PeriodIndex:
"""
Return a fixed frequency PeriodIndex.
The day (calendar) is the default frequency.
Parameters
----------
start : str or period-like, default None
Left bound for generating periods.
end : str or period-like, default None
Right bound for generating periods.
periods : int, default None
Number of periods to generate.
freq : str or DateOffset, optional
Frequency alias. By default the freq is taken from `start` or `end`
if those are Period objects. Otherwise, the default is ``"D"`` for
daily frequency.
name : str, default None
Name of the resulting PeriodIndex.
Returns
-------
PeriodIndex
Notes
-----
Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
must be specified.
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
>>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M')
PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05', '2017-06',
'2017-07', '2017-08', '2017-09', '2017-10', '2017-11', '2017-12',
'2018-01'],
dtype='period[M]')
If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor
endpoints for a ``PeriodIndex`` with frequency matching that of the
``period_range`` constructor.
>>> pd.period_range(start=pd.Period('2017Q1', freq='Q'),
... end=pd.Period('2017Q2', freq='Q'), freq='M')
PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'],
dtype='period[M]')
"""
if com.count_not_none(start, end, periods) != 2:
raise ValueError(
"Of the three parameters: start, end, and periods, "
"exactly two must be specified"
)
if freq is None and (not isinstance(start, Period) and not isinstance(end, Period)):
freq = "D"
data, freq = PeriodArray._generate_range(start, end, periods, freq, fields={})
data = PeriodArray(data, freq=freq)
return PeriodIndex(data, name=name)
|
inference_graph
|
Constructs a TF graph for evaluating a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random forest inference graph.
|
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extremely random forest graph builder. go/brain-tree."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
from tensorflow.contrib.tensor_forest.python import constants
from tensorflow.contrib.tensor_forest.python.ops import inference_ops
from tensorflow.contrib.tensor_forest.python.ops import training_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
# A convenience class for holding random forest hyperparameters.
#
# To just get some good default parameters, use:
# hparams = ForestHParams(num_classes=2, num_features=40).fill()
#
# Note that num_classes can not be inferred and so must always be specified.
# Also, either num_splits_to_consider or num_features should be set.
#
# To override specific values, pass them to the constructor:
# hparams = ForestHParams(num_classes=5, num_trees=10, num_features=5).fill()
#
# TODO(thomaswc): Inherit from tf.HParams when that is publicly available.
class ForestHParams(object):
"""A base class for holding hyperparameters and calculating good defaults."""
def __init__(self, num_trees=100, max_nodes=10000, bagging_fraction=1.0,
max_depth=0, num_splits_to_consider=0,
feature_bagging_fraction=1.0,
max_fertile_nodes=0, split_after_samples=250,
min_split_samples=5,
valid_leaf_threshold=1, **kwargs):
self.num_trees = num_trees
self.max_nodes = max_nodes
self.bagging_fraction = bagging_fraction
self.feature_bagging_fraction = feature_bagging_fraction
self.max_depth = max_depth
self.num_splits_to_consider = num_splits_to_consider
self.max_fertile_nodes = max_fertile_nodes
self.split_after_samples = split_after_samples
self.min_split_samples = min_split_samples
self.valid_leaf_threshold = valid_leaf_threshold
for name, value in kwargs.items():
setattr(self, name, value)
def values(self):
return self.__dict__
def fill(self):
"""Intelligently sets any non-specific parameters."""
# Fail fast if num_classes or num_features isn't set.
_ = getattr(self, 'num_classes')
_ = getattr(self, 'num_features')
self.bagged_num_features = int(self.feature_bagging_fraction *
self.num_features)
self.bagged_features = None
if self.feature_bagging_fraction < 1.0:
self.bagged_features = [random.sample(
range(self.num_features),
self.bagged_num_features) for _ in range(self.num_trees)]
self.regression = getattr(self, 'regression', False)
# Num_outputs is the actual number of outputs (a single prediction for
# classification, a N-dimenensional point for regression).
self.num_outputs = self.num_classes if self.regression else 1
# Add an extra column to classes for storing counts, which is needed for
# regression and avoids having to recompute sums for classification.
self.num_output_columns = self.num_classes + 1
# Allow each tree to be unbalanced by up to a factor of 2.
self.max_depth = (self.max_depth or
int(2 * math.ceil(math.log(self.max_nodes, 2))))
# The Random Forest literature recommends sqrt(# features) for
# classification problems, and p/3 for regression problems.
# TODO(thomaswc): Consider capping this for large number of features.
self.num_splits_to_consider = (
self.num_splits_to_consider or
max(10, int(math.ceil(math.sqrt(self.num_features)))))
# max_fertile_nodes doesn't effect performance, only training speed.
# We therefore set it primarily based upon space considerations.
# Each fertile node takes up num_splits_to_consider times as much
# as space as a non-fertile node. We want the fertile nodes to in
# total only take up as much space as the non-fertile nodes, so
num_fertile = int(math.ceil(self.max_nodes / self.num_splits_to_consider))
# But always use at least 1000 accumulate slots.
num_fertile = max(num_fertile, 1000)
self.max_fertile_nodes = self.max_fertile_nodes or num_fertile
# But it also never needs to be larger than the number of leaves,
# which is max_nodes / 2.
self.max_fertile_nodes = min(self.max_fertile_nodes,
int(math.ceil(self.max_nodes / 2.0)))
# We have num_splits_to_consider slots to fill, and we want to spend
# approximately split_after_samples samples initializing them.
num_split_initializiations_per_input = max(1, int(math.floor(
self.num_splits_to_consider / self.split_after_samples)))
self.split_initializations_per_input = getattr(
self, 'split_initializations_per_input',
num_split_initializiations_per_input)
# If base_random_seed is 0, the current time will be used to seed the
# random number generators for each tree. If non-zero, the i-th tree
# will be seeded with base_random_seed + i.
self.base_random_seed = getattr(self, 'base_random_seed', 0)
return self
# A simple container to hold the training variables for a single tree.
class TreeTrainingVariables(object):
"""Stores tf.Variables for training a single random tree.
Uses tf.get_variable to get tree-specific names so that this can be used
with a tf.learn-style implementation (one that trains a model, saves it,
then relies on restoring that model to evaluate).
"""
def __init__(self, params, tree_num, training):
self.tree = variable_scope.get_variable(
name=self.get_tree_name('tree', tree_num), dtype=dtypes.int32,
shape=[params.max_nodes, 2],
initializer=init_ops.constant_initializer(-2))
self.tree_thresholds = variable_scope.get_variable(
name=self.get_tree_name('tree_thresholds', tree_num),
shape=[params.max_nodes],
initializer=init_ops.constant_initializer(-1.0))
self.tree_depths = variable_scope.get_variable(
name=self.get_tree_name('tree_depths', tree_num),
shape=[params.max_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(1))
self.end_of_tree = variable_scope.get_variable(
name=self.get_tree_name('end_of_tree', tree_num),
dtype=dtypes.int32,
initializer=constant_op.constant([1]))
self.start_epoch = tf_variables.Variable(
[0] * (params.max_nodes), name='start_epoch')
if training:
self.node_to_accumulator_map = variable_scope.get_variable(
name=self.get_tree_name('node_to_accumulator_map', tree_num),
shape=[params.max_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_features = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_features', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_thresholds = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_thresholds', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
initializer=init_ops.constant_initializer(0.0))
# Statistics shared by classification and regression.
self.node_sums = variable_scope.get_variable(
name=self.get_tree_name('node_sums', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
if training:
self.candidate_split_sums = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_sums = variable_scope.get_variable(
name=self.get_tree_name('accumulator_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
# Regression also tracks second order stats.
if params.regression:
self.node_squares = variable_scope.get_variable(
name=self.get_tree_name('node_squares', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.candidate_split_squares = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_squares = variable_scope.get_variable(
name=self.get_tree_name('accumulator_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
else:
self.node_squares = constant_op.constant(
0.0, name=self.get_tree_name('node_squares', tree_num))
self.candidate_split_squares = constant_op.constant(
0.0, name=self.get_tree_name('candidate_split_squares', tree_num))
self.accumulator_squares = constant_op.constant(
0.0, name=self.get_tree_name('accumulator_squares', tree_num))
def get_tree_name(self, name, num):
return '{0}-{1}'.format(name, num)
class ForestStats(object):
def __init__(self, tree_stats, params):
"""A simple container for stats about a forest."""
self.tree_stats = tree_stats
self.params = params
def get_average(self, thing):
val = 0.0
for i in range(self.params.num_trees):
val += getattr(self.tree_stats[i], thing)
return val / self.params.num_trees
class TreeStats(object):
def __init__(self, num_nodes, num_leaves):
self.num_nodes = num_nodes
self.num_leaves = num_leaves
class ForestTrainingVariables(object):
"""A container for a forests training data, consisting of multiple trees.
Instantiates a TreeTrainingVariables object for each tree. We override the
__getitem__ and __setitem__ function so that usage looks like this:
forest_variables = ForestTrainingVariables(params)
... forest_variables.tree ...
"""
def __init__(self, params, device_assigner, training=True,
tree_variables_class=TreeTrainingVariables):
self.variables = []
for i in range(params.num_trees):
with ops.device(device_assigner.get_device(i)):
self.variables.append(tree_variables_class(params, i, training))
def __setitem__(self, t, val):
self.variables[t] = val
def __getitem__(self, t):
return self.variables[t]
class RandomForestDeviceAssigner(object):
"""A device assigner that uses the default device.
Write subclasses that implement get_device for control over how trees
get assigned to devices. This assumes that whole trees are assigned
to a device.
"""
def __init__(self):
self.cached = None
def get_device(self, unused_tree_num):
if not self.cached:
dummy = constant_op.constant(0)
self.cached = dummy.device
return self.cached
class RandomForestGraphs(object):
"""Builds TF graphs for random forest training and inference."""
def __init__(self, params, device_assigner=None,
variables=None, tree_variables_class=TreeTrainingVariables,
tree_graphs=None, training=True,
t_ops=training_ops,
i_ops=inference_ops):
self.params = params
self.device_assigner = device_assigner or RandomForestDeviceAssigner()
logging.info('Constructing forest with params = ')
logging.info(self.params.__dict__)
self.variables = variables or ForestTrainingVariables(
self.params, device_assigner=self.device_assigner, training=training,
tree_variables_class=tree_variables_class)
tree_graph_class = tree_graphs or RandomTreeGraphs
self.trees = [
tree_graph_class(
self.variables[i], self.params,
t_ops.Load(), i_ops.Load(), i)
for i in range(self.params.num_trees)]
def _bag_features(self, tree_num, input_data):
split_data = array_ops.split(1, self.params.num_features, input_data)
return array_ops.concat(
1, [split_data[ind] for ind in self.params.bagged_features[tree_num]])
def training_graph(self, input_data, input_labels, data_spec=None,
epoch=None, **tree_kwargs):
"""Constructs a TF graph for training a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
epoch: A tensor or placeholder for the epoch the training data comes from.
**tree_kwargs: Keyword arguments passed to each tree's training_graph.
Returns:
The last op in the random forest training graph.
"""
data_spec = ([constants.DATA_FLOAT] * self.params.num_features
if data_spec is None else data_spec)
tree_graphs = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
seed = self.params.base_random_seed
if seed != 0:
seed += i
# If using bagging, randomly select some of the input.
tree_data = input_data
tree_labels = input_labels
if self.params.bagging_fraction < 1.0:
# TODO(thomaswc): This does sampling without replacment. Consider
# also allowing sampling with replacement as an option.
batch_size = array_ops.slice(array_ops.shape(input_data), [0], [1])
r = random_ops.random_uniform(batch_size, seed=seed)
mask = math_ops.less(
r, array_ops.ones_like(r) * self.params.bagging_fraction)
gather_indices = array_ops.squeeze(
array_ops.where(mask), squeeze_dims=[1])
# TODO(thomaswc): Calculate out-of-bag data and labels, and store
# them for use in calculating statistics later.
tree_data = array_ops.gather(input_data, gather_indices)
tree_labels = array_ops.gather(input_labels, gather_indices)
if self.params.bagged_features:
tree_data = self._bag_features(i, tree_data)
initialization = self.trees[i].tree_initialization()
with ops.control_dependencies([initialization]):
tree_graphs.append(
self.trees[i].training_graph(
tree_data, tree_labels, seed, data_spec=data_spec,
epoch=([0] if epoch is None else epoch),
**tree_kwargs))
return control_flow_ops.group(*tree_graphs)
# MASKED: inference_graph function (lines 389-412)
def average_size(self):
"""Constructs a TF graph for evaluating the average size of a forest.
Returns:
The average number of nodes over the trees.
"""
sizes = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
sizes.append(self.trees[i].size())
return math_ops.reduce_mean(array_ops.pack(sizes))
def training_loss(self):
return math_ops.neg(self.average_size())
# pylint: disable=unused-argument
def validation_loss(self, features, labels):
return math_ops.neg(self.average_size())
def average_impurity(self):
"""Constructs a TF graph for evaluating the leaf impurity of a forest.
Returns:
The last op in the graph.
"""
impurities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
impurities.append(self.trees[i].average_impurity())
return math_ops.reduce_mean(array_ops.pack(impurities))
def get_stats(self, session):
tree_stats = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
tree_stats.append(self.trees[i].get_stats(session))
return ForestStats(tree_stats, self.params)
class RandomTreeGraphs(object):
"""Builds TF graphs for random tree training and inference."""
def __init__(self, variables, params, t_ops, i_ops, tree_num):
self.training_ops = t_ops
self.inference_ops = i_ops
self.variables = variables
self.params = params
self.tree_num = tree_num
def tree_initialization(self):
def _init_tree():
return state_ops.scatter_update(self.variables.tree, [0], [[-1, -1]]).op
def _nothing():
return control_flow_ops.no_op()
return control_flow_ops.cond(
math_ops.equal(array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [1, 1])), -2),
_init_tree, _nothing)
def _gini(self, class_counts):
"""Calculate the Gini impurity.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = 1 - sum_i ( c(i) / c )^2
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return 1.0 - sum_squares / (sums * sums)
def _weighted_gini(self, class_counts):
"""Our split score is the Gini impurity times the number of examples.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = c * (1 - sum_i ( c(i) / c )^2 )
= c - sum_i c(i)^2 / c
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return sums - sum_squares / sums
def _variance(self, sums, squares):
"""Calculate the variance for each row of the input tensors.
Variance is V = E[x^2] - (E[x])^2.
Args:
sums: A tensor containing output sums, usually a slice from
variables.node_sums. Should contain the number of examples seen
in index 0 so we can calculate expected value.
squares: Same as sums, but sums of squares.
Returns:
A 1-D tensor of the variances for each row in the input.
"""
total_count = array_ops.slice(sums, [0, 0], [-1, 1])
e_x = sums / total_count
e_x2 = squares / total_count
return math_ops.reduce_sum(e_x2 - math_ops.square(e_x), 1)
def training_graph(self, input_data, input_labels, random_seed,
data_spec, epoch=None):
"""Constructs a TF graph for training a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
random_seed: The random number generator seed to use for this tree. 0
means use the current time as the seed.
data_spec: A list of tf.dtype values specifying the original types of
each column.
epoch: A tensor or placeholder for the epoch the training data comes from.
Returns:
The last op in the random tree training graph.
"""
epoch = [0] if epoch is None else epoch
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
input_data = []
# Count extremely random stats.
(node_sums, node_squares, splits_indices, splits_sums,
splits_squares, totals_indices, totals_sums,
totals_squares, input_leaves) = (
self.training_ops.count_extremely_random_stats(
input_data, sparse_indices, sparse_values, sparse_shape,
data_spec, input_labels, self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_to_accumulator_map,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
self.variables.start_epoch, epoch,
num_classes=self.params.num_output_columns,
regression=self.params.regression))
node_update_ops = []
node_update_ops.append(
state_ops.assign_add(self.variables.node_sums, node_sums))
splits_update_ops = []
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.candidate_split_sums,
splits_indices, splits_sums))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.accumulator_sums, totals_indices,
totals_sums))
if self.params.regression:
node_update_ops.append(state_ops.assign_add(self.variables.node_squares,
node_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.candidate_split_squares,
splits_indices, splits_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.accumulator_squares, totals_indices,
totals_squares))
# Sample inputs.
update_indices, feature_updates, threshold_updates = (
self.training_ops.sample_inputs(
input_data, sparse_indices, sparse_values, sparse_shape,
self.variables.node_to_accumulator_map,
input_leaves, self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
split_initializations_per_input=(
self.params.split_initializations_per_input),
split_sampling_random_seed=random_seed))
update_features_op = state_ops.scatter_update(
self.variables.candidate_split_features, update_indices,
feature_updates)
update_thresholds_op = state_ops.scatter_update(
self.variables.candidate_split_thresholds, update_indices,
threshold_updates)
# Calculate finished nodes.
with ops.control_dependencies(splits_update_ops):
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
finished, stale = self.training_ops.finished_nodes(
leaves, self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
self.variables.start_epoch, epoch,
num_split_after_samples=self.params.split_after_samples,
min_split_samples=self.params.min_split_samples)
# Update leaf scores.
non_fertile_leaves = array_ops.boolean_mask(
leaves, math_ops.less(array_ops.gather(
self.variables.node_to_accumulator_map, leaves), 0))
# TODO(gilberth): It should be possible to limit the number of non
# fertile leaves we calculate scores for, especially since we can only take
# at most array_ops.shape(finished)[0] of them.
with ops.control_dependencies(node_update_ops):
sums = array_ops.gather(self.variables.node_sums, non_fertile_leaves)
if self.params.regression:
squares = array_ops.gather(self.variables.node_squares,
non_fertile_leaves)
non_fertile_leaf_scores = self._variance(sums, squares)
else:
non_fertile_leaf_scores = self._weighted_gini(sums)
# Calculate best splits.
with ops.control_dependencies(splits_update_ops):
split_indices = self.training_ops.best_splits(
finished, self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
regression=self.params.regression)
# Grow tree.
with ops.control_dependencies([update_features_op, update_thresholds_op]):
(tree_update_indices, tree_children_updates,
tree_threshold_updates, tree_depth_updates, new_eot) = (
self.training_ops.grow_tree(
self.variables.end_of_tree, self.variables.tree_depths,
self.variables.node_to_accumulator_map, finished, split_indices,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds))
tree_update_op = state_ops.scatter_update(
self.variables.tree, tree_update_indices, tree_children_updates)
thresholds_update_op = state_ops.scatter_update(
self.variables.tree_thresholds, tree_update_indices,
tree_threshold_updates)
depth_update_op = state_ops.scatter_update(
self.variables.tree_depths, tree_update_indices, tree_depth_updates)
# TODO(thomaswc): Only update the epoch on the new leaves.
new_epoch_updates = epoch * array_ops.ones_like(tree_depth_updates)
epoch_update_op = state_ops.scatter_update(
self.variables.start_epoch, tree_update_indices,
new_epoch_updates)
# Update fertile slots.
with ops.control_dependencies([depth_update_op]):
(node_map_updates, accumulators_cleared, accumulators_allocated) = (
self.training_ops.update_fertile_slots(
finished, non_fertile_leaves,
non_fertile_leaf_scores,
self.variables.end_of_tree, self.variables.tree_depths,
self.variables.accumulator_sums,
self.variables.node_to_accumulator_map,
stale,
max_depth=self.params.max_depth,
regression=self.params.regression))
# Ensure end_of_tree doesn't get updated until UpdateFertileSlots has
# used it to calculate new leaves.
gated_new_eot, = control_flow_ops.tuple([new_eot],
control_inputs=[node_map_updates])
eot_update_op = state_ops.assign(self.variables.end_of_tree, gated_new_eot)
updates = []
updates.append(eot_update_op)
updates.append(tree_update_op)
updates.append(thresholds_update_op)
updates.append(epoch_update_op)
updates.append(state_ops.scatter_update(
self.variables.node_to_accumulator_map,
array_ops.squeeze(array_ops.slice(node_map_updates, [0, 0], [1, -1]),
squeeze_dims=[0]),
array_ops.squeeze(array_ops.slice(node_map_updates, [1, 0], [1, -1]),
squeeze_dims=[0])))
cleared_and_allocated_accumulators = array_ops.concat(
0, [accumulators_cleared, accumulators_allocated])
# Calculate values to put into scatter update for candidate counts.
# Candidate split counts are always reset back to 0 for both cleared
# and allocated accumulators. This means some accumulators might be doubly
# reset to 0 if the were released and not allocated, then later allocated.
split_values = array_ops.tile(
array_ops.expand_dims(array_ops.expand_dims(
array_ops.zeros_like(cleared_and_allocated_accumulators,
dtype=dtypes.float32), 1), 2),
[1, self.params.num_splits_to_consider, self.params.num_output_columns])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_sums,
cleared_and_allocated_accumulators, split_values))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.candidate_split_squares,
cleared_and_allocated_accumulators, split_values))
# Calculate values to put into scatter update for total counts.
total_cleared = array_ops.tile(
array_ops.expand_dims(
math_ops.neg(array_ops.ones_like(accumulators_cleared,
dtype=dtypes.float32)), 1),
[1, self.params.num_output_columns])
total_reset = array_ops.tile(
array_ops.expand_dims(
array_ops.zeros_like(accumulators_allocated,
dtype=dtypes.float32), 1),
[1, self.params.num_output_columns])
accumulator_updates = array_ops.concat(0, [total_cleared, total_reset])
updates.append(state_ops.scatter_update(
self.variables.accumulator_sums,
cleared_and_allocated_accumulators, accumulator_updates))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.accumulator_squares,
cleared_and_allocated_accumulators, accumulator_updates))
# Calculate values to put into scatter update for candidate splits.
split_features_updates = array_ops.tile(
array_ops.expand_dims(
math_ops.neg(array_ops.ones_like(
cleared_and_allocated_accumulators)), 1),
[1, self.params.num_splits_to_consider])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_features,
cleared_and_allocated_accumulators, split_features_updates))
updates += self.finish_iteration()
return control_flow_ops.group(*updates)
def finish_iteration(self):
"""Perform any operations that should be done at the end of an iteration.
This is mostly useful for subclasses that need to reset variables after
an iteration, such as ones that are used to finish nodes.
Returns:
A list of operations.
"""
return []
def inference_graph(self, input_data, data_spec):
"""Constructs a TF graph for evaluating a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random tree inference graph.
"""
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
input_data = []
return self.inference_ops.tree_predictions(
input_data, sparse_indices, sparse_values, sparse_shape, data_spec,
self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_sums,
valid_leaf_threshold=self.params.valid_leaf_threshold)
def average_impurity(self):
"""Constructs a TF graph for evaluating the average leaf impurity of a tree.
If in regression mode, this is the leaf variance. If in classification mode,
this is the gini impurity.
Returns:
The last op in the graph.
"""
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
counts = array_ops.gather(self.variables.node_sums, leaves)
gini = self._weighted_gini(counts)
# Guard against step 1, when there often are no leaves yet.
def impurity():
return gini
# Since average impurity can be used for loss, when there's no data just
# return a big number so that loss always decreases.
def big():
return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000.
return control_flow_ops.cond(math_ops.greater(
array_ops.shape(leaves)[0], 0), impurity, big)
def size(self):
"""Constructs a TF graph for evaluating the current number of nodes.
Returns:
The current number of nodes in the tree.
"""
return self.variables.end_of_tree - 1
def get_stats(self, session):
num_nodes = self.variables.end_of_tree.eval(session=session) - 1
num_leaves = array_ops.where(
math_ops.equal(array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1])), constants.LEAF_NODE)
).eval(session=session).shape[0]
return TreeStats(num_nodes, num_leaves)
|
def inference_graph(self, input_data, data_spec=None):
"""Constructs a TF graph for evaluating a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random forest inference graph.
"""
data_spec = ([constants.DATA_FLOAT] * self.params.num_features
if data_spec is None else data_spec)
probabilities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
tree_data = input_data
if self.params.bagged_features:
tree_data = self._bag_features(i, input_data)
probabilities.append(self.trees[i].inference_graph(tree_data,
data_spec))
with ops.device(self.device_assigner.get_device(0)):
all_predict = array_ops.pack(probabilities)
return math_ops.reduce_sum(all_predict, 0) / self.params.num_trees
| 389 | 412 |
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extremely random forest graph builder. go/brain-tree."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
from tensorflow.contrib.tensor_forest.python import constants
from tensorflow.contrib.tensor_forest.python.ops import inference_ops
from tensorflow.contrib.tensor_forest.python.ops import training_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
# A convenience class for holding random forest hyperparameters.
#
# To just get some good default parameters, use:
# hparams = ForestHParams(num_classes=2, num_features=40).fill()
#
# Note that num_classes can not be inferred and so must always be specified.
# Also, either num_splits_to_consider or num_features should be set.
#
# To override specific values, pass them to the constructor:
# hparams = ForestHParams(num_classes=5, num_trees=10, num_features=5).fill()
#
# TODO(thomaswc): Inherit from tf.HParams when that is publicly available.
class ForestHParams(object):
"""A base class for holding hyperparameters and calculating good defaults."""
def __init__(self, num_trees=100, max_nodes=10000, bagging_fraction=1.0,
max_depth=0, num_splits_to_consider=0,
feature_bagging_fraction=1.0,
max_fertile_nodes=0, split_after_samples=250,
min_split_samples=5,
valid_leaf_threshold=1, **kwargs):
self.num_trees = num_trees
self.max_nodes = max_nodes
self.bagging_fraction = bagging_fraction
self.feature_bagging_fraction = feature_bagging_fraction
self.max_depth = max_depth
self.num_splits_to_consider = num_splits_to_consider
self.max_fertile_nodes = max_fertile_nodes
self.split_after_samples = split_after_samples
self.min_split_samples = min_split_samples
self.valid_leaf_threshold = valid_leaf_threshold
for name, value in kwargs.items():
setattr(self, name, value)
def values(self):
return self.__dict__
def fill(self):
"""Intelligently sets any non-specific parameters."""
# Fail fast if num_classes or num_features isn't set.
_ = getattr(self, 'num_classes')
_ = getattr(self, 'num_features')
self.bagged_num_features = int(self.feature_bagging_fraction *
self.num_features)
self.bagged_features = None
if self.feature_bagging_fraction < 1.0:
self.bagged_features = [random.sample(
range(self.num_features),
self.bagged_num_features) for _ in range(self.num_trees)]
self.regression = getattr(self, 'regression', False)
# Num_outputs is the actual number of outputs (a single prediction for
# classification, a N-dimenensional point for regression).
self.num_outputs = self.num_classes if self.regression else 1
# Add an extra column to classes for storing counts, which is needed for
# regression and avoids having to recompute sums for classification.
self.num_output_columns = self.num_classes + 1
# Allow each tree to be unbalanced by up to a factor of 2.
self.max_depth = (self.max_depth or
int(2 * math.ceil(math.log(self.max_nodes, 2))))
# The Random Forest literature recommends sqrt(# features) for
# classification problems, and p/3 for regression problems.
# TODO(thomaswc): Consider capping this for large number of features.
self.num_splits_to_consider = (
self.num_splits_to_consider or
max(10, int(math.ceil(math.sqrt(self.num_features)))))
# max_fertile_nodes doesn't effect performance, only training speed.
# We therefore set it primarily based upon space considerations.
# Each fertile node takes up num_splits_to_consider times as much
# as space as a non-fertile node. We want the fertile nodes to in
# total only take up as much space as the non-fertile nodes, so
num_fertile = int(math.ceil(self.max_nodes / self.num_splits_to_consider))
# But always use at least 1000 accumulate slots.
num_fertile = max(num_fertile, 1000)
self.max_fertile_nodes = self.max_fertile_nodes or num_fertile
# But it also never needs to be larger than the number of leaves,
# which is max_nodes / 2.
self.max_fertile_nodes = min(self.max_fertile_nodes,
int(math.ceil(self.max_nodes / 2.0)))
# We have num_splits_to_consider slots to fill, and we want to spend
# approximately split_after_samples samples initializing them.
num_split_initializiations_per_input = max(1, int(math.floor(
self.num_splits_to_consider / self.split_after_samples)))
self.split_initializations_per_input = getattr(
self, 'split_initializations_per_input',
num_split_initializiations_per_input)
# If base_random_seed is 0, the current time will be used to seed the
# random number generators for each tree. If non-zero, the i-th tree
# will be seeded with base_random_seed + i.
self.base_random_seed = getattr(self, 'base_random_seed', 0)
return self
# A simple container to hold the training variables for a single tree.
class TreeTrainingVariables(object):
"""Stores tf.Variables for training a single random tree.
Uses tf.get_variable to get tree-specific names so that this can be used
with a tf.learn-style implementation (one that trains a model, saves it,
then relies on restoring that model to evaluate).
"""
def __init__(self, params, tree_num, training):
self.tree = variable_scope.get_variable(
name=self.get_tree_name('tree', tree_num), dtype=dtypes.int32,
shape=[params.max_nodes, 2],
initializer=init_ops.constant_initializer(-2))
self.tree_thresholds = variable_scope.get_variable(
name=self.get_tree_name('tree_thresholds', tree_num),
shape=[params.max_nodes],
initializer=init_ops.constant_initializer(-1.0))
self.tree_depths = variable_scope.get_variable(
name=self.get_tree_name('tree_depths', tree_num),
shape=[params.max_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(1))
self.end_of_tree = variable_scope.get_variable(
name=self.get_tree_name('end_of_tree', tree_num),
dtype=dtypes.int32,
initializer=constant_op.constant([1]))
self.start_epoch = tf_variables.Variable(
[0] * (params.max_nodes), name='start_epoch')
if training:
self.node_to_accumulator_map = variable_scope.get_variable(
name=self.get_tree_name('node_to_accumulator_map', tree_num),
shape=[params.max_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_features = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_features', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_thresholds = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_thresholds', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
initializer=init_ops.constant_initializer(0.0))
# Statistics shared by classification and regression.
self.node_sums = variable_scope.get_variable(
name=self.get_tree_name('node_sums', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
if training:
self.candidate_split_sums = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_sums = variable_scope.get_variable(
name=self.get_tree_name('accumulator_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
# Regression also tracks second order stats.
if params.regression:
self.node_squares = variable_scope.get_variable(
name=self.get_tree_name('node_squares', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.candidate_split_squares = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_squares = variable_scope.get_variable(
name=self.get_tree_name('accumulator_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
else:
self.node_squares = constant_op.constant(
0.0, name=self.get_tree_name('node_squares', tree_num))
self.candidate_split_squares = constant_op.constant(
0.0, name=self.get_tree_name('candidate_split_squares', tree_num))
self.accumulator_squares = constant_op.constant(
0.0, name=self.get_tree_name('accumulator_squares', tree_num))
def get_tree_name(self, name, num):
return '{0}-{1}'.format(name, num)
class ForestStats(object):
def __init__(self, tree_stats, params):
"""A simple container for stats about a forest."""
self.tree_stats = tree_stats
self.params = params
def get_average(self, thing):
val = 0.0
for i in range(self.params.num_trees):
val += getattr(self.tree_stats[i], thing)
return val / self.params.num_trees
class TreeStats(object):
def __init__(self, num_nodes, num_leaves):
self.num_nodes = num_nodes
self.num_leaves = num_leaves
class ForestTrainingVariables(object):
"""A container for a forests training data, consisting of multiple trees.
Instantiates a TreeTrainingVariables object for each tree. We override the
__getitem__ and __setitem__ function so that usage looks like this:
forest_variables = ForestTrainingVariables(params)
... forest_variables.tree ...
"""
def __init__(self, params, device_assigner, training=True,
tree_variables_class=TreeTrainingVariables):
self.variables = []
for i in range(params.num_trees):
with ops.device(device_assigner.get_device(i)):
self.variables.append(tree_variables_class(params, i, training))
def __setitem__(self, t, val):
self.variables[t] = val
def __getitem__(self, t):
return self.variables[t]
class RandomForestDeviceAssigner(object):
"""A device assigner that uses the default device.
Write subclasses that implement get_device for control over how trees
get assigned to devices. This assumes that whole trees are assigned
to a device.
"""
def __init__(self):
self.cached = None
def get_device(self, unused_tree_num):
if not self.cached:
dummy = constant_op.constant(0)
self.cached = dummy.device
return self.cached
class RandomForestGraphs(object):
"""Builds TF graphs for random forest training and inference."""
def __init__(self, params, device_assigner=None,
variables=None, tree_variables_class=TreeTrainingVariables,
tree_graphs=None, training=True,
t_ops=training_ops,
i_ops=inference_ops):
self.params = params
self.device_assigner = device_assigner or RandomForestDeviceAssigner()
logging.info('Constructing forest with params = ')
logging.info(self.params.__dict__)
self.variables = variables or ForestTrainingVariables(
self.params, device_assigner=self.device_assigner, training=training,
tree_variables_class=tree_variables_class)
tree_graph_class = tree_graphs or RandomTreeGraphs
self.trees = [
tree_graph_class(
self.variables[i], self.params,
t_ops.Load(), i_ops.Load(), i)
for i in range(self.params.num_trees)]
def _bag_features(self, tree_num, input_data):
split_data = array_ops.split(1, self.params.num_features, input_data)
return array_ops.concat(
1, [split_data[ind] for ind in self.params.bagged_features[tree_num]])
def training_graph(self, input_data, input_labels, data_spec=None,
epoch=None, **tree_kwargs):
"""Constructs a TF graph for training a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
epoch: A tensor or placeholder for the epoch the training data comes from.
**tree_kwargs: Keyword arguments passed to each tree's training_graph.
Returns:
The last op in the random forest training graph.
"""
data_spec = ([constants.DATA_FLOAT] * self.params.num_features
if data_spec is None else data_spec)
tree_graphs = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
seed = self.params.base_random_seed
if seed != 0:
seed += i
# If using bagging, randomly select some of the input.
tree_data = input_data
tree_labels = input_labels
if self.params.bagging_fraction < 1.0:
# TODO(thomaswc): This does sampling without replacment. Consider
# also allowing sampling with replacement as an option.
batch_size = array_ops.slice(array_ops.shape(input_data), [0], [1])
r = random_ops.random_uniform(batch_size, seed=seed)
mask = math_ops.less(
r, array_ops.ones_like(r) * self.params.bagging_fraction)
gather_indices = array_ops.squeeze(
array_ops.where(mask), squeeze_dims=[1])
# TODO(thomaswc): Calculate out-of-bag data and labels, and store
# them for use in calculating statistics later.
tree_data = array_ops.gather(input_data, gather_indices)
tree_labels = array_ops.gather(input_labels, gather_indices)
if self.params.bagged_features:
tree_data = self._bag_features(i, tree_data)
initialization = self.trees[i].tree_initialization()
with ops.control_dependencies([initialization]):
tree_graphs.append(
self.trees[i].training_graph(
tree_data, tree_labels, seed, data_spec=data_spec,
epoch=([0] if epoch is None else epoch),
**tree_kwargs))
return control_flow_ops.group(*tree_graphs)
def inference_graph(self, input_data, data_spec=None):
"""Constructs a TF graph for evaluating a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random forest inference graph.
"""
data_spec = ([constants.DATA_FLOAT] * self.params.num_features
if data_spec is None else data_spec)
probabilities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
tree_data = input_data
if self.params.bagged_features:
tree_data = self._bag_features(i, input_data)
probabilities.append(self.trees[i].inference_graph(tree_data,
data_spec))
with ops.device(self.device_assigner.get_device(0)):
all_predict = array_ops.pack(probabilities)
return math_ops.reduce_sum(all_predict, 0) / self.params.num_trees
def average_size(self):
"""Constructs a TF graph for evaluating the average size of a forest.
Returns:
The average number of nodes over the trees.
"""
sizes = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
sizes.append(self.trees[i].size())
return math_ops.reduce_mean(array_ops.pack(sizes))
def training_loss(self):
return math_ops.neg(self.average_size())
# pylint: disable=unused-argument
def validation_loss(self, features, labels):
return math_ops.neg(self.average_size())
def average_impurity(self):
"""Constructs a TF graph for evaluating the leaf impurity of a forest.
Returns:
The last op in the graph.
"""
impurities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
impurities.append(self.trees[i].average_impurity())
return math_ops.reduce_mean(array_ops.pack(impurities))
def get_stats(self, session):
tree_stats = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
tree_stats.append(self.trees[i].get_stats(session))
return ForestStats(tree_stats, self.params)
class RandomTreeGraphs(object):
"""Builds TF graphs for random tree training and inference."""
def __init__(self, variables, params, t_ops, i_ops, tree_num):
self.training_ops = t_ops
self.inference_ops = i_ops
self.variables = variables
self.params = params
self.tree_num = tree_num
def tree_initialization(self):
def _init_tree():
return state_ops.scatter_update(self.variables.tree, [0], [[-1, -1]]).op
def _nothing():
return control_flow_ops.no_op()
return control_flow_ops.cond(
math_ops.equal(array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [1, 1])), -2),
_init_tree, _nothing)
def _gini(self, class_counts):
"""Calculate the Gini impurity.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = 1 - sum_i ( c(i) / c )^2
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return 1.0 - sum_squares / (sums * sums)
def _weighted_gini(self, class_counts):
"""Our split score is the Gini impurity times the number of examples.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = c * (1 - sum_i ( c(i) / c )^2 )
= c - sum_i c(i)^2 / c
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return sums - sum_squares / sums
def _variance(self, sums, squares):
"""Calculate the variance for each row of the input tensors.
Variance is V = E[x^2] - (E[x])^2.
Args:
sums: A tensor containing output sums, usually a slice from
variables.node_sums. Should contain the number of examples seen
in index 0 so we can calculate expected value.
squares: Same as sums, but sums of squares.
Returns:
A 1-D tensor of the variances for each row in the input.
"""
total_count = array_ops.slice(sums, [0, 0], [-1, 1])
e_x = sums / total_count
e_x2 = squares / total_count
return math_ops.reduce_sum(e_x2 - math_ops.square(e_x), 1)
def training_graph(self, input_data, input_labels, random_seed,
data_spec, epoch=None):
"""Constructs a TF graph for training a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
random_seed: The random number generator seed to use for this tree. 0
means use the current time as the seed.
data_spec: A list of tf.dtype values specifying the original types of
each column.
epoch: A tensor or placeholder for the epoch the training data comes from.
Returns:
The last op in the random tree training graph.
"""
epoch = [0] if epoch is None else epoch
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
input_data = []
# Count extremely random stats.
(node_sums, node_squares, splits_indices, splits_sums,
splits_squares, totals_indices, totals_sums,
totals_squares, input_leaves) = (
self.training_ops.count_extremely_random_stats(
input_data, sparse_indices, sparse_values, sparse_shape,
data_spec, input_labels, self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_to_accumulator_map,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
self.variables.start_epoch, epoch,
num_classes=self.params.num_output_columns,
regression=self.params.regression))
node_update_ops = []
node_update_ops.append(
state_ops.assign_add(self.variables.node_sums, node_sums))
splits_update_ops = []
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.candidate_split_sums,
splits_indices, splits_sums))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.accumulator_sums, totals_indices,
totals_sums))
if self.params.regression:
node_update_ops.append(state_ops.assign_add(self.variables.node_squares,
node_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.candidate_split_squares,
splits_indices, splits_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.accumulator_squares, totals_indices,
totals_squares))
# Sample inputs.
update_indices, feature_updates, threshold_updates = (
self.training_ops.sample_inputs(
input_data, sparse_indices, sparse_values, sparse_shape,
self.variables.node_to_accumulator_map,
input_leaves, self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
split_initializations_per_input=(
self.params.split_initializations_per_input),
split_sampling_random_seed=random_seed))
update_features_op = state_ops.scatter_update(
self.variables.candidate_split_features, update_indices,
feature_updates)
update_thresholds_op = state_ops.scatter_update(
self.variables.candidate_split_thresholds, update_indices,
threshold_updates)
# Calculate finished nodes.
with ops.control_dependencies(splits_update_ops):
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
finished, stale = self.training_ops.finished_nodes(
leaves, self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
self.variables.start_epoch, epoch,
num_split_after_samples=self.params.split_after_samples,
min_split_samples=self.params.min_split_samples)
# Update leaf scores.
non_fertile_leaves = array_ops.boolean_mask(
leaves, math_ops.less(array_ops.gather(
self.variables.node_to_accumulator_map, leaves), 0))
# TODO(gilberth): It should be possible to limit the number of non
# fertile leaves we calculate scores for, especially since we can only take
# at most array_ops.shape(finished)[0] of them.
with ops.control_dependencies(node_update_ops):
sums = array_ops.gather(self.variables.node_sums, non_fertile_leaves)
if self.params.regression:
squares = array_ops.gather(self.variables.node_squares,
non_fertile_leaves)
non_fertile_leaf_scores = self._variance(sums, squares)
else:
non_fertile_leaf_scores = self._weighted_gini(sums)
# Calculate best splits.
with ops.control_dependencies(splits_update_ops):
split_indices = self.training_ops.best_splits(
finished, self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
regression=self.params.regression)
# Grow tree.
with ops.control_dependencies([update_features_op, update_thresholds_op]):
(tree_update_indices, tree_children_updates,
tree_threshold_updates, tree_depth_updates, new_eot) = (
self.training_ops.grow_tree(
self.variables.end_of_tree, self.variables.tree_depths,
self.variables.node_to_accumulator_map, finished, split_indices,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds))
tree_update_op = state_ops.scatter_update(
self.variables.tree, tree_update_indices, tree_children_updates)
thresholds_update_op = state_ops.scatter_update(
self.variables.tree_thresholds, tree_update_indices,
tree_threshold_updates)
depth_update_op = state_ops.scatter_update(
self.variables.tree_depths, tree_update_indices, tree_depth_updates)
# TODO(thomaswc): Only update the epoch on the new leaves.
new_epoch_updates = epoch * array_ops.ones_like(tree_depth_updates)
epoch_update_op = state_ops.scatter_update(
self.variables.start_epoch, tree_update_indices,
new_epoch_updates)
# Update fertile slots.
with ops.control_dependencies([depth_update_op]):
(node_map_updates, accumulators_cleared, accumulators_allocated) = (
self.training_ops.update_fertile_slots(
finished, non_fertile_leaves,
non_fertile_leaf_scores,
self.variables.end_of_tree, self.variables.tree_depths,
self.variables.accumulator_sums,
self.variables.node_to_accumulator_map,
stale,
max_depth=self.params.max_depth,
regression=self.params.regression))
# Ensure end_of_tree doesn't get updated until UpdateFertileSlots has
# used it to calculate new leaves.
gated_new_eot, = control_flow_ops.tuple([new_eot],
control_inputs=[node_map_updates])
eot_update_op = state_ops.assign(self.variables.end_of_tree, gated_new_eot)
updates = []
updates.append(eot_update_op)
updates.append(tree_update_op)
updates.append(thresholds_update_op)
updates.append(epoch_update_op)
updates.append(state_ops.scatter_update(
self.variables.node_to_accumulator_map,
array_ops.squeeze(array_ops.slice(node_map_updates, [0, 0], [1, -1]),
squeeze_dims=[0]),
array_ops.squeeze(array_ops.slice(node_map_updates, [1, 0], [1, -1]),
squeeze_dims=[0])))
cleared_and_allocated_accumulators = array_ops.concat(
0, [accumulators_cleared, accumulators_allocated])
# Calculate values to put into scatter update for candidate counts.
# Candidate split counts are always reset back to 0 for both cleared
# and allocated accumulators. This means some accumulators might be doubly
# reset to 0 if the were released and not allocated, then later allocated.
split_values = array_ops.tile(
array_ops.expand_dims(array_ops.expand_dims(
array_ops.zeros_like(cleared_and_allocated_accumulators,
dtype=dtypes.float32), 1), 2),
[1, self.params.num_splits_to_consider, self.params.num_output_columns])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_sums,
cleared_and_allocated_accumulators, split_values))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.candidate_split_squares,
cleared_and_allocated_accumulators, split_values))
# Calculate values to put into scatter update for total counts.
total_cleared = array_ops.tile(
array_ops.expand_dims(
math_ops.neg(array_ops.ones_like(accumulators_cleared,
dtype=dtypes.float32)), 1),
[1, self.params.num_output_columns])
total_reset = array_ops.tile(
array_ops.expand_dims(
array_ops.zeros_like(accumulators_allocated,
dtype=dtypes.float32), 1),
[1, self.params.num_output_columns])
accumulator_updates = array_ops.concat(0, [total_cleared, total_reset])
updates.append(state_ops.scatter_update(
self.variables.accumulator_sums,
cleared_and_allocated_accumulators, accumulator_updates))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.accumulator_squares,
cleared_and_allocated_accumulators, accumulator_updates))
# Calculate values to put into scatter update for candidate splits.
split_features_updates = array_ops.tile(
array_ops.expand_dims(
math_ops.neg(array_ops.ones_like(
cleared_and_allocated_accumulators)), 1),
[1, self.params.num_splits_to_consider])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_features,
cleared_and_allocated_accumulators, split_features_updates))
updates += self.finish_iteration()
return control_flow_ops.group(*updates)
def finish_iteration(self):
"""Perform any operations that should be done at the end of an iteration.
This is mostly useful for subclasses that need to reset variables after
an iteration, such as ones that are used to finish nodes.
Returns:
A list of operations.
"""
return []
def inference_graph(self, input_data, data_spec):
"""Constructs a TF graph for evaluating a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random tree inference graph.
"""
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
input_data = []
return self.inference_ops.tree_predictions(
input_data, sparse_indices, sparse_values, sparse_shape, data_spec,
self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_sums,
valid_leaf_threshold=self.params.valid_leaf_threshold)
def average_impurity(self):
"""Constructs a TF graph for evaluating the average leaf impurity of a tree.
If in regression mode, this is the leaf variance. If in classification mode,
this is the gini impurity.
Returns:
The last op in the graph.
"""
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
counts = array_ops.gather(self.variables.node_sums, leaves)
gini = self._weighted_gini(counts)
# Guard against step 1, when there often are no leaves yet.
def impurity():
return gini
# Since average impurity can be used for loss, when there's no data just
# return a big number so that loss always decreases.
def big():
return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000.
return control_flow_ops.cond(math_ops.greater(
array_ops.shape(leaves)[0], 0), impurity, big)
def size(self):
"""Constructs a TF graph for evaluating the current number of nodes.
Returns:
The current number of nodes in the tree.
"""
return self.variables.end_of_tree - 1
def get_stats(self, session):
num_nodes = self.variables.end_of_tree.eval(session=session) - 1
num_leaves = array_ops.where(
math_ops.equal(array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1])), constants.LEAF_NODE)
).eval(session=session).shape[0]
return TreeStats(num_nodes, num_leaves)
|
average_size
|
Constructs a TF graph for evaluating the average size of a forest.
Returns:
The average number of nodes over the trees.
|
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extremely random forest graph builder. go/brain-tree."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
from tensorflow.contrib.tensor_forest.python import constants
from tensorflow.contrib.tensor_forest.python.ops import inference_ops
from tensorflow.contrib.tensor_forest.python.ops import training_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
# A convenience class for holding random forest hyperparameters.
#
# To just get some good default parameters, use:
# hparams = ForestHParams(num_classes=2, num_features=40).fill()
#
# Note that num_classes can not be inferred and so must always be specified.
# Also, either num_splits_to_consider or num_features should be set.
#
# To override specific values, pass them to the constructor:
# hparams = ForestHParams(num_classes=5, num_trees=10, num_features=5).fill()
#
# TODO(thomaswc): Inherit from tf.HParams when that is publicly available.
class ForestHParams(object):
"""A base class for holding hyperparameters and calculating good defaults."""
def __init__(self, num_trees=100, max_nodes=10000, bagging_fraction=1.0,
max_depth=0, num_splits_to_consider=0,
feature_bagging_fraction=1.0,
max_fertile_nodes=0, split_after_samples=250,
min_split_samples=5,
valid_leaf_threshold=1, **kwargs):
self.num_trees = num_trees
self.max_nodes = max_nodes
self.bagging_fraction = bagging_fraction
self.feature_bagging_fraction = feature_bagging_fraction
self.max_depth = max_depth
self.num_splits_to_consider = num_splits_to_consider
self.max_fertile_nodes = max_fertile_nodes
self.split_after_samples = split_after_samples
self.min_split_samples = min_split_samples
self.valid_leaf_threshold = valid_leaf_threshold
for name, value in kwargs.items():
setattr(self, name, value)
def values(self):
return self.__dict__
def fill(self):
"""Intelligently sets any non-specific parameters."""
# Fail fast if num_classes or num_features isn't set.
_ = getattr(self, 'num_classes')
_ = getattr(self, 'num_features')
self.bagged_num_features = int(self.feature_bagging_fraction *
self.num_features)
self.bagged_features = None
if self.feature_bagging_fraction < 1.0:
self.bagged_features = [random.sample(
range(self.num_features),
self.bagged_num_features) for _ in range(self.num_trees)]
self.regression = getattr(self, 'regression', False)
# Num_outputs is the actual number of outputs (a single prediction for
# classification, a N-dimenensional point for regression).
self.num_outputs = self.num_classes if self.regression else 1
# Add an extra column to classes for storing counts, which is needed for
# regression and avoids having to recompute sums for classification.
self.num_output_columns = self.num_classes + 1
# Allow each tree to be unbalanced by up to a factor of 2.
self.max_depth = (self.max_depth or
int(2 * math.ceil(math.log(self.max_nodes, 2))))
# The Random Forest literature recommends sqrt(# features) for
# classification problems, and p/3 for regression problems.
# TODO(thomaswc): Consider capping this for large number of features.
self.num_splits_to_consider = (
self.num_splits_to_consider or
max(10, int(math.ceil(math.sqrt(self.num_features)))))
# max_fertile_nodes doesn't effect performance, only training speed.
# We therefore set it primarily based upon space considerations.
# Each fertile node takes up num_splits_to_consider times as much
# as space as a non-fertile node. We want the fertile nodes to in
# total only take up as much space as the non-fertile nodes, so
num_fertile = int(math.ceil(self.max_nodes / self.num_splits_to_consider))
# But always use at least 1000 accumulate slots.
num_fertile = max(num_fertile, 1000)
self.max_fertile_nodes = self.max_fertile_nodes or num_fertile
# But it also never needs to be larger than the number of leaves,
# which is max_nodes / 2.
self.max_fertile_nodes = min(self.max_fertile_nodes,
int(math.ceil(self.max_nodes / 2.0)))
# We have num_splits_to_consider slots to fill, and we want to spend
# approximately split_after_samples samples initializing them.
num_split_initializiations_per_input = max(1, int(math.floor(
self.num_splits_to_consider / self.split_after_samples)))
self.split_initializations_per_input = getattr(
self, 'split_initializations_per_input',
num_split_initializiations_per_input)
# If base_random_seed is 0, the current time will be used to seed the
# random number generators for each tree. If non-zero, the i-th tree
# will be seeded with base_random_seed + i.
self.base_random_seed = getattr(self, 'base_random_seed', 0)
return self
# A simple container to hold the training variables for a single tree.
class TreeTrainingVariables(object):
"""Stores tf.Variables for training a single random tree.
Uses tf.get_variable to get tree-specific names so that this can be used
with a tf.learn-style implementation (one that trains a model, saves it,
then relies on restoring that model to evaluate).
"""
def __init__(self, params, tree_num, training):
self.tree = variable_scope.get_variable(
name=self.get_tree_name('tree', tree_num), dtype=dtypes.int32,
shape=[params.max_nodes, 2],
initializer=init_ops.constant_initializer(-2))
self.tree_thresholds = variable_scope.get_variable(
name=self.get_tree_name('tree_thresholds', tree_num),
shape=[params.max_nodes],
initializer=init_ops.constant_initializer(-1.0))
self.tree_depths = variable_scope.get_variable(
name=self.get_tree_name('tree_depths', tree_num),
shape=[params.max_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(1))
self.end_of_tree = variable_scope.get_variable(
name=self.get_tree_name('end_of_tree', tree_num),
dtype=dtypes.int32,
initializer=constant_op.constant([1]))
self.start_epoch = tf_variables.Variable(
[0] * (params.max_nodes), name='start_epoch')
if training:
self.node_to_accumulator_map = variable_scope.get_variable(
name=self.get_tree_name('node_to_accumulator_map', tree_num),
shape=[params.max_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_features = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_features', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_thresholds = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_thresholds', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
initializer=init_ops.constant_initializer(0.0))
# Statistics shared by classification and regression.
self.node_sums = variable_scope.get_variable(
name=self.get_tree_name('node_sums', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
if training:
self.candidate_split_sums = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_sums = variable_scope.get_variable(
name=self.get_tree_name('accumulator_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
# Regression also tracks second order stats.
if params.regression:
self.node_squares = variable_scope.get_variable(
name=self.get_tree_name('node_squares', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.candidate_split_squares = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_squares = variable_scope.get_variable(
name=self.get_tree_name('accumulator_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
else:
self.node_squares = constant_op.constant(
0.0, name=self.get_tree_name('node_squares', tree_num))
self.candidate_split_squares = constant_op.constant(
0.0, name=self.get_tree_name('candidate_split_squares', tree_num))
self.accumulator_squares = constant_op.constant(
0.0, name=self.get_tree_name('accumulator_squares', tree_num))
def get_tree_name(self, name, num):
return '{0}-{1}'.format(name, num)
class ForestStats(object):
def __init__(self, tree_stats, params):
"""A simple container for stats about a forest."""
self.tree_stats = tree_stats
self.params = params
def get_average(self, thing):
val = 0.0
for i in range(self.params.num_trees):
val += getattr(self.tree_stats[i], thing)
return val / self.params.num_trees
class TreeStats(object):
def __init__(self, num_nodes, num_leaves):
self.num_nodes = num_nodes
self.num_leaves = num_leaves
class ForestTrainingVariables(object):
"""A container for a forests training data, consisting of multiple trees.
Instantiates a TreeTrainingVariables object for each tree. We override the
__getitem__ and __setitem__ function so that usage looks like this:
forest_variables = ForestTrainingVariables(params)
... forest_variables.tree ...
"""
def __init__(self, params, device_assigner, training=True,
tree_variables_class=TreeTrainingVariables):
self.variables = []
for i in range(params.num_trees):
with ops.device(device_assigner.get_device(i)):
self.variables.append(tree_variables_class(params, i, training))
def __setitem__(self, t, val):
self.variables[t] = val
def __getitem__(self, t):
return self.variables[t]
class RandomForestDeviceAssigner(object):
"""A device assigner that uses the default device.
Write subclasses that implement get_device for control over how trees
get assigned to devices. This assumes that whole trees are assigned
to a device.
"""
def __init__(self):
self.cached = None
def get_device(self, unused_tree_num):
if not self.cached:
dummy = constant_op.constant(0)
self.cached = dummy.device
return self.cached
class RandomForestGraphs(object):
"""Builds TF graphs for random forest training and inference."""
def __init__(self, params, device_assigner=None,
variables=None, tree_variables_class=TreeTrainingVariables,
tree_graphs=None, training=True,
t_ops=training_ops,
i_ops=inference_ops):
self.params = params
self.device_assigner = device_assigner or RandomForestDeviceAssigner()
logging.info('Constructing forest with params = ')
logging.info(self.params.__dict__)
self.variables = variables or ForestTrainingVariables(
self.params, device_assigner=self.device_assigner, training=training,
tree_variables_class=tree_variables_class)
tree_graph_class = tree_graphs or RandomTreeGraphs
self.trees = [
tree_graph_class(
self.variables[i], self.params,
t_ops.Load(), i_ops.Load(), i)
for i in range(self.params.num_trees)]
def _bag_features(self, tree_num, input_data):
split_data = array_ops.split(1, self.params.num_features, input_data)
return array_ops.concat(
1, [split_data[ind] for ind in self.params.bagged_features[tree_num]])
def training_graph(self, input_data, input_labels, data_spec=None,
epoch=None, **tree_kwargs):
"""Constructs a TF graph for training a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
epoch: A tensor or placeholder for the epoch the training data comes from.
**tree_kwargs: Keyword arguments passed to each tree's training_graph.
Returns:
The last op in the random forest training graph.
"""
data_spec = ([constants.DATA_FLOAT] * self.params.num_features
if data_spec is None else data_spec)
tree_graphs = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
seed = self.params.base_random_seed
if seed != 0:
seed += i
# If using bagging, randomly select some of the input.
tree_data = input_data
tree_labels = input_labels
if self.params.bagging_fraction < 1.0:
# TODO(thomaswc): This does sampling without replacment. Consider
# also allowing sampling with replacement as an option.
batch_size = array_ops.slice(array_ops.shape(input_data), [0], [1])
r = random_ops.random_uniform(batch_size, seed=seed)
mask = math_ops.less(
r, array_ops.ones_like(r) * self.params.bagging_fraction)
gather_indices = array_ops.squeeze(
array_ops.where(mask), squeeze_dims=[1])
# TODO(thomaswc): Calculate out-of-bag data and labels, and store
# them for use in calculating statistics later.
tree_data = array_ops.gather(input_data, gather_indices)
tree_labels = array_ops.gather(input_labels, gather_indices)
if self.params.bagged_features:
tree_data = self._bag_features(i, tree_data)
initialization = self.trees[i].tree_initialization()
with ops.control_dependencies([initialization]):
tree_graphs.append(
self.trees[i].training_graph(
tree_data, tree_labels, seed, data_spec=data_spec,
epoch=([0] if epoch is None else epoch),
**tree_kwargs))
return control_flow_ops.group(*tree_graphs)
def inference_graph(self, input_data, data_spec=None):
"""Constructs a TF graph for evaluating a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random forest inference graph.
"""
data_spec = ([constants.DATA_FLOAT] * self.params.num_features
if data_spec is None else data_spec)
probabilities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
tree_data = input_data
if self.params.bagged_features:
tree_data = self._bag_features(i, input_data)
probabilities.append(self.trees[i].inference_graph(tree_data,
data_spec))
with ops.device(self.device_assigner.get_device(0)):
all_predict = array_ops.pack(probabilities)
return math_ops.reduce_sum(all_predict, 0) / self.params.num_trees
# MASKED: average_size function (lines 414-424)
def training_loss(self):
return math_ops.neg(self.average_size())
# pylint: disable=unused-argument
def validation_loss(self, features, labels):
return math_ops.neg(self.average_size())
def average_impurity(self):
"""Constructs a TF graph for evaluating the leaf impurity of a forest.
Returns:
The last op in the graph.
"""
impurities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
impurities.append(self.trees[i].average_impurity())
return math_ops.reduce_mean(array_ops.pack(impurities))
def get_stats(self, session):
tree_stats = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
tree_stats.append(self.trees[i].get_stats(session))
return ForestStats(tree_stats, self.params)
class RandomTreeGraphs(object):
"""Builds TF graphs for random tree training and inference."""
def __init__(self, variables, params, t_ops, i_ops, tree_num):
self.training_ops = t_ops
self.inference_ops = i_ops
self.variables = variables
self.params = params
self.tree_num = tree_num
def tree_initialization(self):
def _init_tree():
return state_ops.scatter_update(self.variables.tree, [0], [[-1, -1]]).op
def _nothing():
return control_flow_ops.no_op()
return control_flow_ops.cond(
math_ops.equal(array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [1, 1])), -2),
_init_tree, _nothing)
def _gini(self, class_counts):
"""Calculate the Gini impurity.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = 1 - sum_i ( c(i) / c )^2
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return 1.0 - sum_squares / (sums * sums)
def _weighted_gini(self, class_counts):
"""Our split score is the Gini impurity times the number of examples.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = c * (1 - sum_i ( c(i) / c )^2 )
= c - sum_i c(i)^2 / c
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return sums - sum_squares / sums
def _variance(self, sums, squares):
"""Calculate the variance for each row of the input tensors.
Variance is V = E[x^2] - (E[x])^2.
Args:
sums: A tensor containing output sums, usually a slice from
variables.node_sums. Should contain the number of examples seen
in index 0 so we can calculate expected value.
squares: Same as sums, but sums of squares.
Returns:
A 1-D tensor of the variances for each row in the input.
"""
total_count = array_ops.slice(sums, [0, 0], [-1, 1])
e_x = sums / total_count
e_x2 = squares / total_count
return math_ops.reduce_sum(e_x2 - math_ops.square(e_x), 1)
def training_graph(self, input_data, input_labels, random_seed,
data_spec, epoch=None):
"""Constructs a TF graph for training a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
random_seed: The random number generator seed to use for this tree. 0
means use the current time as the seed.
data_spec: A list of tf.dtype values specifying the original types of
each column.
epoch: A tensor or placeholder for the epoch the training data comes from.
Returns:
The last op in the random tree training graph.
"""
epoch = [0] if epoch is None else epoch
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
input_data = []
# Count extremely random stats.
(node_sums, node_squares, splits_indices, splits_sums,
splits_squares, totals_indices, totals_sums,
totals_squares, input_leaves) = (
self.training_ops.count_extremely_random_stats(
input_data, sparse_indices, sparse_values, sparse_shape,
data_spec, input_labels, self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_to_accumulator_map,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
self.variables.start_epoch, epoch,
num_classes=self.params.num_output_columns,
regression=self.params.regression))
node_update_ops = []
node_update_ops.append(
state_ops.assign_add(self.variables.node_sums, node_sums))
splits_update_ops = []
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.candidate_split_sums,
splits_indices, splits_sums))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.accumulator_sums, totals_indices,
totals_sums))
if self.params.regression:
node_update_ops.append(state_ops.assign_add(self.variables.node_squares,
node_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.candidate_split_squares,
splits_indices, splits_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.accumulator_squares, totals_indices,
totals_squares))
# Sample inputs.
update_indices, feature_updates, threshold_updates = (
self.training_ops.sample_inputs(
input_data, sparse_indices, sparse_values, sparse_shape,
self.variables.node_to_accumulator_map,
input_leaves, self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
split_initializations_per_input=(
self.params.split_initializations_per_input),
split_sampling_random_seed=random_seed))
update_features_op = state_ops.scatter_update(
self.variables.candidate_split_features, update_indices,
feature_updates)
update_thresholds_op = state_ops.scatter_update(
self.variables.candidate_split_thresholds, update_indices,
threshold_updates)
# Calculate finished nodes.
with ops.control_dependencies(splits_update_ops):
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
finished, stale = self.training_ops.finished_nodes(
leaves, self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
self.variables.start_epoch, epoch,
num_split_after_samples=self.params.split_after_samples,
min_split_samples=self.params.min_split_samples)
# Update leaf scores.
non_fertile_leaves = array_ops.boolean_mask(
leaves, math_ops.less(array_ops.gather(
self.variables.node_to_accumulator_map, leaves), 0))
# TODO(gilberth): It should be possible to limit the number of non
# fertile leaves we calculate scores for, especially since we can only take
# at most array_ops.shape(finished)[0] of them.
with ops.control_dependencies(node_update_ops):
sums = array_ops.gather(self.variables.node_sums, non_fertile_leaves)
if self.params.regression:
squares = array_ops.gather(self.variables.node_squares,
non_fertile_leaves)
non_fertile_leaf_scores = self._variance(sums, squares)
else:
non_fertile_leaf_scores = self._weighted_gini(sums)
# Calculate best splits.
with ops.control_dependencies(splits_update_ops):
split_indices = self.training_ops.best_splits(
finished, self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
regression=self.params.regression)
# Grow tree.
with ops.control_dependencies([update_features_op, update_thresholds_op]):
(tree_update_indices, tree_children_updates,
tree_threshold_updates, tree_depth_updates, new_eot) = (
self.training_ops.grow_tree(
self.variables.end_of_tree, self.variables.tree_depths,
self.variables.node_to_accumulator_map, finished, split_indices,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds))
tree_update_op = state_ops.scatter_update(
self.variables.tree, tree_update_indices, tree_children_updates)
thresholds_update_op = state_ops.scatter_update(
self.variables.tree_thresholds, tree_update_indices,
tree_threshold_updates)
depth_update_op = state_ops.scatter_update(
self.variables.tree_depths, tree_update_indices, tree_depth_updates)
# TODO(thomaswc): Only update the epoch on the new leaves.
new_epoch_updates = epoch * array_ops.ones_like(tree_depth_updates)
epoch_update_op = state_ops.scatter_update(
self.variables.start_epoch, tree_update_indices,
new_epoch_updates)
# Update fertile slots.
with ops.control_dependencies([depth_update_op]):
(node_map_updates, accumulators_cleared, accumulators_allocated) = (
self.training_ops.update_fertile_slots(
finished, non_fertile_leaves,
non_fertile_leaf_scores,
self.variables.end_of_tree, self.variables.tree_depths,
self.variables.accumulator_sums,
self.variables.node_to_accumulator_map,
stale,
max_depth=self.params.max_depth,
regression=self.params.regression))
# Ensure end_of_tree doesn't get updated until UpdateFertileSlots has
# used it to calculate new leaves.
gated_new_eot, = control_flow_ops.tuple([new_eot],
control_inputs=[node_map_updates])
eot_update_op = state_ops.assign(self.variables.end_of_tree, gated_new_eot)
updates = []
updates.append(eot_update_op)
updates.append(tree_update_op)
updates.append(thresholds_update_op)
updates.append(epoch_update_op)
updates.append(state_ops.scatter_update(
self.variables.node_to_accumulator_map,
array_ops.squeeze(array_ops.slice(node_map_updates, [0, 0], [1, -1]),
squeeze_dims=[0]),
array_ops.squeeze(array_ops.slice(node_map_updates, [1, 0], [1, -1]),
squeeze_dims=[0])))
cleared_and_allocated_accumulators = array_ops.concat(
0, [accumulators_cleared, accumulators_allocated])
# Calculate values to put into scatter update for candidate counts.
# Candidate split counts are always reset back to 0 for both cleared
# and allocated accumulators. This means some accumulators might be doubly
# reset to 0 if the were released and not allocated, then later allocated.
split_values = array_ops.tile(
array_ops.expand_dims(array_ops.expand_dims(
array_ops.zeros_like(cleared_and_allocated_accumulators,
dtype=dtypes.float32), 1), 2),
[1, self.params.num_splits_to_consider, self.params.num_output_columns])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_sums,
cleared_and_allocated_accumulators, split_values))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.candidate_split_squares,
cleared_and_allocated_accumulators, split_values))
# Calculate values to put into scatter update for total counts.
total_cleared = array_ops.tile(
array_ops.expand_dims(
math_ops.neg(array_ops.ones_like(accumulators_cleared,
dtype=dtypes.float32)), 1),
[1, self.params.num_output_columns])
total_reset = array_ops.tile(
array_ops.expand_dims(
array_ops.zeros_like(accumulators_allocated,
dtype=dtypes.float32), 1),
[1, self.params.num_output_columns])
accumulator_updates = array_ops.concat(0, [total_cleared, total_reset])
updates.append(state_ops.scatter_update(
self.variables.accumulator_sums,
cleared_and_allocated_accumulators, accumulator_updates))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.accumulator_squares,
cleared_and_allocated_accumulators, accumulator_updates))
# Calculate values to put into scatter update for candidate splits.
split_features_updates = array_ops.tile(
array_ops.expand_dims(
math_ops.neg(array_ops.ones_like(
cleared_and_allocated_accumulators)), 1),
[1, self.params.num_splits_to_consider])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_features,
cleared_and_allocated_accumulators, split_features_updates))
updates += self.finish_iteration()
return control_flow_ops.group(*updates)
def finish_iteration(self):
"""Perform any operations that should be done at the end of an iteration.
This is mostly useful for subclasses that need to reset variables after
an iteration, such as ones that are used to finish nodes.
Returns:
A list of operations.
"""
return []
def inference_graph(self, input_data, data_spec):
"""Constructs a TF graph for evaluating a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random tree inference graph.
"""
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
input_data = []
return self.inference_ops.tree_predictions(
input_data, sparse_indices, sparse_values, sparse_shape, data_spec,
self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_sums,
valid_leaf_threshold=self.params.valid_leaf_threshold)
def average_impurity(self):
"""Constructs a TF graph for evaluating the average leaf impurity of a tree.
If in regression mode, this is the leaf variance. If in classification mode,
this is the gini impurity.
Returns:
The last op in the graph.
"""
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
counts = array_ops.gather(self.variables.node_sums, leaves)
gini = self._weighted_gini(counts)
# Guard against step 1, when there often are no leaves yet.
def impurity():
return gini
# Since average impurity can be used for loss, when there's no data just
# return a big number so that loss always decreases.
def big():
return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000.
return control_flow_ops.cond(math_ops.greater(
array_ops.shape(leaves)[0], 0), impurity, big)
def size(self):
"""Constructs a TF graph for evaluating the current number of nodes.
Returns:
The current number of nodes in the tree.
"""
return self.variables.end_of_tree - 1
def get_stats(self, session):
num_nodes = self.variables.end_of_tree.eval(session=session) - 1
num_leaves = array_ops.where(
math_ops.equal(array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1])), constants.LEAF_NODE)
).eval(session=session).shape[0]
return TreeStats(num_nodes, num_leaves)
|
def average_size(self):
"""Constructs a TF graph for evaluating the average size of a forest.
Returns:
The average number of nodes over the trees.
"""
sizes = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
sizes.append(self.trees[i].size())
return math_ops.reduce_mean(array_ops.pack(sizes))
| 414 | 424 |
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extremely random forest graph builder. go/brain-tree."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
from tensorflow.contrib.tensor_forest.python import constants
from tensorflow.contrib.tensor_forest.python.ops import inference_ops
from tensorflow.contrib.tensor_forest.python.ops import training_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
# A convenience class for holding random forest hyperparameters.
#
# To just get some good default parameters, use:
# hparams = ForestHParams(num_classes=2, num_features=40).fill()
#
# Note that num_classes can not be inferred and so must always be specified.
# Also, either num_splits_to_consider or num_features should be set.
#
# To override specific values, pass them to the constructor:
# hparams = ForestHParams(num_classes=5, num_trees=10, num_features=5).fill()
#
# TODO(thomaswc): Inherit from tf.HParams when that is publicly available.
class ForestHParams(object):
"""A base class for holding hyperparameters and calculating good defaults."""
def __init__(self, num_trees=100, max_nodes=10000, bagging_fraction=1.0,
max_depth=0, num_splits_to_consider=0,
feature_bagging_fraction=1.0,
max_fertile_nodes=0, split_after_samples=250,
min_split_samples=5,
valid_leaf_threshold=1, **kwargs):
self.num_trees = num_trees
self.max_nodes = max_nodes
self.bagging_fraction = bagging_fraction
self.feature_bagging_fraction = feature_bagging_fraction
self.max_depth = max_depth
self.num_splits_to_consider = num_splits_to_consider
self.max_fertile_nodes = max_fertile_nodes
self.split_after_samples = split_after_samples
self.min_split_samples = min_split_samples
self.valid_leaf_threshold = valid_leaf_threshold
for name, value in kwargs.items():
setattr(self, name, value)
def values(self):
return self.__dict__
def fill(self):
"""Intelligently sets any non-specific parameters."""
# Fail fast if num_classes or num_features isn't set.
_ = getattr(self, 'num_classes')
_ = getattr(self, 'num_features')
self.bagged_num_features = int(self.feature_bagging_fraction *
self.num_features)
self.bagged_features = None
if self.feature_bagging_fraction < 1.0:
self.bagged_features = [random.sample(
range(self.num_features),
self.bagged_num_features) for _ in range(self.num_trees)]
self.regression = getattr(self, 'regression', False)
# Num_outputs is the actual number of outputs (a single prediction for
# classification, a N-dimenensional point for regression).
self.num_outputs = self.num_classes if self.regression else 1
# Add an extra column to classes for storing counts, which is needed for
# regression and avoids having to recompute sums for classification.
self.num_output_columns = self.num_classes + 1
# Allow each tree to be unbalanced by up to a factor of 2.
self.max_depth = (self.max_depth or
int(2 * math.ceil(math.log(self.max_nodes, 2))))
# The Random Forest literature recommends sqrt(# features) for
# classification problems, and p/3 for regression problems.
# TODO(thomaswc): Consider capping this for large number of features.
self.num_splits_to_consider = (
self.num_splits_to_consider or
max(10, int(math.ceil(math.sqrt(self.num_features)))))
# max_fertile_nodes doesn't effect performance, only training speed.
# We therefore set it primarily based upon space considerations.
# Each fertile node takes up num_splits_to_consider times as much
# as space as a non-fertile node. We want the fertile nodes to in
# total only take up as much space as the non-fertile nodes, so
num_fertile = int(math.ceil(self.max_nodes / self.num_splits_to_consider))
# But always use at least 1000 accumulate slots.
num_fertile = max(num_fertile, 1000)
self.max_fertile_nodes = self.max_fertile_nodes or num_fertile
# But it also never needs to be larger than the number of leaves,
# which is max_nodes / 2.
self.max_fertile_nodes = min(self.max_fertile_nodes,
int(math.ceil(self.max_nodes / 2.0)))
# We have num_splits_to_consider slots to fill, and we want to spend
# approximately split_after_samples samples initializing them.
num_split_initializiations_per_input = max(1, int(math.floor(
self.num_splits_to_consider / self.split_after_samples)))
self.split_initializations_per_input = getattr(
self, 'split_initializations_per_input',
num_split_initializiations_per_input)
# If base_random_seed is 0, the current time will be used to seed the
# random number generators for each tree. If non-zero, the i-th tree
# will be seeded with base_random_seed + i.
self.base_random_seed = getattr(self, 'base_random_seed', 0)
return self
# A simple container to hold the training variables for a single tree.
class TreeTrainingVariables(object):
"""Stores tf.Variables for training a single random tree.
Uses tf.get_variable to get tree-specific names so that this can be used
with a tf.learn-style implementation (one that trains a model, saves it,
then relies on restoring that model to evaluate).
"""
def __init__(self, params, tree_num, training):
self.tree = variable_scope.get_variable(
name=self.get_tree_name('tree', tree_num), dtype=dtypes.int32,
shape=[params.max_nodes, 2],
initializer=init_ops.constant_initializer(-2))
self.tree_thresholds = variable_scope.get_variable(
name=self.get_tree_name('tree_thresholds', tree_num),
shape=[params.max_nodes],
initializer=init_ops.constant_initializer(-1.0))
self.tree_depths = variable_scope.get_variable(
name=self.get_tree_name('tree_depths', tree_num),
shape=[params.max_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(1))
self.end_of_tree = variable_scope.get_variable(
name=self.get_tree_name('end_of_tree', tree_num),
dtype=dtypes.int32,
initializer=constant_op.constant([1]))
self.start_epoch = tf_variables.Variable(
[0] * (params.max_nodes), name='start_epoch')
if training:
self.node_to_accumulator_map = variable_scope.get_variable(
name=self.get_tree_name('node_to_accumulator_map', tree_num),
shape=[params.max_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_features = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_features', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_thresholds = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_thresholds', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
initializer=init_ops.constant_initializer(0.0))
# Statistics shared by classification and regression.
self.node_sums = variable_scope.get_variable(
name=self.get_tree_name('node_sums', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
if training:
self.candidate_split_sums = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_sums = variable_scope.get_variable(
name=self.get_tree_name('accumulator_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
# Regression also tracks second order stats.
if params.regression:
self.node_squares = variable_scope.get_variable(
name=self.get_tree_name('node_squares', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.candidate_split_squares = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_squares = variable_scope.get_variable(
name=self.get_tree_name('accumulator_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
else:
self.node_squares = constant_op.constant(
0.0, name=self.get_tree_name('node_squares', tree_num))
self.candidate_split_squares = constant_op.constant(
0.0, name=self.get_tree_name('candidate_split_squares', tree_num))
self.accumulator_squares = constant_op.constant(
0.0, name=self.get_tree_name('accumulator_squares', tree_num))
def get_tree_name(self, name, num):
return '{0}-{1}'.format(name, num)
class ForestStats(object):
def __init__(self, tree_stats, params):
"""A simple container for stats about a forest."""
self.tree_stats = tree_stats
self.params = params
def get_average(self, thing):
val = 0.0
for i in range(self.params.num_trees):
val += getattr(self.tree_stats[i], thing)
return val / self.params.num_trees
class TreeStats(object):
def __init__(self, num_nodes, num_leaves):
self.num_nodes = num_nodes
self.num_leaves = num_leaves
class ForestTrainingVariables(object):
"""A container for a forests training data, consisting of multiple trees.
Instantiates a TreeTrainingVariables object for each tree. We override the
__getitem__ and __setitem__ function so that usage looks like this:
forest_variables = ForestTrainingVariables(params)
... forest_variables.tree ...
"""
def __init__(self, params, device_assigner, training=True,
tree_variables_class=TreeTrainingVariables):
self.variables = []
for i in range(params.num_trees):
with ops.device(device_assigner.get_device(i)):
self.variables.append(tree_variables_class(params, i, training))
def __setitem__(self, t, val):
self.variables[t] = val
def __getitem__(self, t):
return self.variables[t]
class RandomForestDeviceAssigner(object):
"""A device assigner that uses the default device.
Write subclasses that implement get_device for control over how trees
get assigned to devices. This assumes that whole trees are assigned
to a device.
"""
def __init__(self):
self.cached = None
def get_device(self, unused_tree_num):
if not self.cached:
dummy = constant_op.constant(0)
self.cached = dummy.device
return self.cached
class RandomForestGraphs(object):
"""Builds TF graphs for random forest training and inference."""
def __init__(self, params, device_assigner=None,
variables=None, tree_variables_class=TreeTrainingVariables,
tree_graphs=None, training=True,
t_ops=training_ops,
i_ops=inference_ops):
self.params = params
self.device_assigner = device_assigner or RandomForestDeviceAssigner()
logging.info('Constructing forest with params = ')
logging.info(self.params.__dict__)
self.variables = variables or ForestTrainingVariables(
self.params, device_assigner=self.device_assigner, training=training,
tree_variables_class=tree_variables_class)
tree_graph_class = tree_graphs or RandomTreeGraphs
self.trees = [
tree_graph_class(
self.variables[i], self.params,
t_ops.Load(), i_ops.Load(), i)
for i in range(self.params.num_trees)]
def _bag_features(self, tree_num, input_data):
split_data = array_ops.split(1, self.params.num_features, input_data)
return array_ops.concat(
1, [split_data[ind] for ind in self.params.bagged_features[tree_num]])
def training_graph(self, input_data, input_labels, data_spec=None,
epoch=None, **tree_kwargs):
"""Constructs a TF graph for training a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
epoch: A tensor or placeholder for the epoch the training data comes from.
**tree_kwargs: Keyword arguments passed to each tree's training_graph.
Returns:
The last op in the random forest training graph.
"""
data_spec = ([constants.DATA_FLOAT] * self.params.num_features
if data_spec is None else data_spec)
tree_graphs = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
seed = self.params.base_random_seed
if seed != 0:
seed += i
# If using bagging, randomly select some of the input.
tree_data = input_data
tree_labels = input_labels
if self.params.bagging_fraction < 1.0:
# TODO(thomaswc): This does sampling without replacment. Consider
# also allowing sampling with replacement as an option.
batch_size = array_ops.slice(array_ops.shape(input_data), [0], [1])
r = random_ops.random_uniform(batch_size, seed=seed)
mask = math_ops.less(
r, array_ops.ones_like(r) * self.params.bagging_fraction)
gather_indices = array_ops.squeeze(
array_ops.where(mask), squeeze_dims=[1])
# TODO(thomaswc): Calculate out-of-bag data and labels, and store
# them for use in calculating statistics later.
tree_data = array_ops.gather(input_data, gather_indices)
tree_labels = array_ops.gather(input_labels, gather_indices)
if self.params.bagged_features:
tree_data = self._bag_features(i, tree_data)
initialization = self.trees[i].tree_initialization()
with ops.control_dependencies([initialization]):
tree_graphs.append(
self.trees[i].training_graph(
tree_data, tree_labels, seed, data_spec=data_spec,
epoch=([0] if epoch is None else epoch),
**tree_kwargs))
return control_flow_ops.group(*tree_graphs)
def inference_graph(self, input_data, data_spec=None):
"""Constructs a TF graph for evaluating a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random forest inference graph.
"""
data_spec = ([constants.DATA_FLOAT] * self.params.num_features
if data_spec is None else data_spec)
probabilities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
tree_data = input_data
if self.params.bagged_features:
tree_data = self._bag_features(i, input_data)
probabilities.append(self.trees[i].inference_graph(tree_data,
data_spec))
with ops.device(self.device_assigner.get_device(0)):
all_predict = array_ops.pack(probabilities)
return math_ops.reduce_sum(all_predict, 0) / self.params.num_trees
def average_size(self):
"""Constructs a TF graph for evaluating the average size of a forest.
Returns:
The average number of nodes over the trees.
"""
sizes = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
sizes.append(self.trees[i].size())
return math_ops.reduce_mean(array_ops.pack(sizes))
def training_loss(self):
return math_ops.neg(self.average_size())
# pylint: disable=unused-argument
def validation_loss(self, features, labels):
return math_ops.neg(self.average_size())
def average_impurity(self):
"""Constructs a TF graph for evaluating the leaf impurity of a forest.
Returns:
The last op in the graph.
"""
impurities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
impurities.append(self.trees[i].average_impurity())
return math_ops.reduce_mean(array_ops.pack(impurities))
def get_stats(self, session):
tree_stats = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
tree_stats.append(self.trees[i].get_stats(session))
return ForestStats(tree_stats, self.params)
class RandomTreeGraphs(object):
"""Builds TF graphs for random tree training and inference."""
def __init__(self, variables, params, t_ops, i_ops, tree_num):
self.training_ops = t_ops
self.inference_ops = i_ops
self.variables = variables
self.params = params
self.tree_num = tree_num
def tree_initialization(self):
def _init_tree():
return state_ops.scatter_update(self.variables.tree, [0], [[-1, -1]]).op
def _nothing():
return control_flow_ops.no_op()
return control_flow_ops.cond(
math_ops.equal(array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [1, 1])), -2),
_init_tree, _nothing)
def _gini(self, class_counts):
"""Calculate the Gini impurity.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = 1 - sum_i ( c(i) / c )^2
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return 1.0 - sum_squares / (sums * sums)
def _weighted_gini(self, class_counts):
"""Our split score is the Gini impurity times the number of examples.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = c * (1 - sum_i ( c(i) / c )^2 )
= c - sum_i c(i)^2 / c
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return sums - sum_squares / sums
def _variance(self, sums, squares):
"""Calculate the variance for each row of the input tensors.
Variance is V = E[x^2] - (E[x])^2.
Args:
sums: A tensor containing output sums, usually a slice from
variables.node_sums. Should contain the number of examples seen
in index 0 so we can calculate expected value.
squares: Same as sums, but sums of squares.
Returns:
A 1-D tensor of the variances for each row in the input.
"""
total_count = array_ops.slice(sums, [0, 0], [-1, 1])
e_x = sums / total_count
e_x2 = squares / total_count
return math_ops.reduce_sum(e_x2 - math_ops.square(e_x), 1)
def training_graph(self, input_data, input_labels, random_seed,
data_spec, epoch=None):
"""Constructs a TF graph for training a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
random_seed: The random number generator seed to use for this tree. 0
means use the current time as the seed.
data_spec: A list of tf.dtype values specifying the original types of
each column.
epoch: A tensor or placeholder for the epoch the training data comes from.
Returns:
The last op in the random tree training graph.
"""
epoch = [0] if epoch is None else epoch
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
input_data = []
# Count extremely random stats.
(node_sums, node_squares, splits_indices, splits_sums,
splits_squares, totals_indices, totals_sums,
totals_squares, input_leaves) = (
self.training_ops.count_extremely_random_stats(
input_data, sparse_indices, sparse_values, sparse_shape,
data_spec, input_labels, self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_to_accumulator_map,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
self.variables.start_epoch, epoch,
num_classes=self.params.num_output_columns,
regression=self.params.regression))
node_update_ops = []
node_update_ops.append(
state_ops.assign_add(self.variables.node_sums, node_sums))
splits_update_ops = []
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.candidate_split_sums,
splits_indices, splits_sums))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.accumulator_sums, totals_indices,
totals_sums))
if self.params.regression:
node_update_ops.append(state_ops.assign_add(self.variables.node_squares,
node_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.candidate_split_squares,
splits_indices, splits_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.accumulator_squares, totals_indices,
totals_squares))
# Sample inputs.
update_indices, feature_updates, threshold_updates = (
self.training_ops.sample_inputs(
input_data, sparse_indices, sparse_values, sparse_shape,
self.variables.node_to_accumulator_map,
input_leaves, self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
split_initializations_per_input=(
self.params.split_initializations_per_input),
split_sampling_random_seed=random_seed))
update_features_op = state_ops.scatter_update(
self.variables.candidate_split_features, update_indices,
feature_updates)
update_thresholds_op = state_ops.scatter_update(
self.variables.candidate_split_thresholds, update_indices,
threshold_updates)
# Calculate finished nodes.
with ops.control_dependencies(splits_update_ops):
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
finished, stale = self.training_ops.finished_nodes(
leaves, self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
self.variables.start_epoch, epoch,
num_split_after_samples=self.params.split_after_samples,
min_split_samples=self.params.min_split_samples)
# Update leaf scores.
non_fertile_leaves = array_ops.boolean_mask(
leaves, math_ops.less(array_ops.gather(
self.variables.node_to_accumulator_map, leaves), 0))
# TODO(gilberth): It should be possible to limit the number of non
# fertile leaves we calculate scores for, especially since we can only take
# at most array_ops.shape(finished)[0] of them.
with ops.control_dependencies(node_update_ops):
sums = array_ops.gather(self.variables.node_sums, non_fertile_leaves)
if self.params.regression:
squares = array_ops.gather(self.variables.node_squares,
non_fertile_leaves)
non_fertile_leaf_scores = self._variance(sums, squares)
else:
non_fertile_leaf_scores = self._weighted_gini(sums)
# Calculate best splits.
with ops.control_dependencies(splits_update_ops):
split_indices = self.training_ops.best_splits(
finished, self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
regression=self.params.regression)
# Grow tree.
with ops.control_dependencies([update_features_op, update_thresholds_op]):
(tree_update_indices, tree_children_updates,
tree_threshold_updates, tree_depth_updates, new_eot) = (
self.training_ops.grow_tree(
self.variables.end_of_tree, self.variables.tree_depths,
self.variables.node_to_accumulator_map, finished, split_indices,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds))
tree_update_op = state_ops.scatter_update(
self.variables.tree, tree_update_indices, tree_children_updates)
thresholds_update_op = state_ops.scatter_update(
self.variables.tree_thresholds, tree_update_indices,
tree_threshold_updates)
depth_update_op = state_ops.scatter_update(
self.variables.tree_depths, tree_update_indices, tree_depth_updates)
# TODO(thomaswc): Only update the epoch on the new leaves.
new_epoch_updates = epoch * array_ops.ones_like(tree_depth_updates)
epoch_update_op = state_ops.scatter_update(
self.variables.start_epoch, tree_update_indices,
new_epoch_updates)
# Update fertile slots.
with ops.control_dependencies([depth_update_op]):
(node_map_updates, accumulators_cleared, accumulators_allocated) = (
self.training_ops.update_fertile_slots(
finished, non_fertile_leaves,
non_fertile_leaf_scores,
self.variables.end_of_tree, self.variables.tree_depths,
self.variables.accumulator_sums,
self.variables.node_to_accumulator_map,
stale,
max_depth=self.params.max_depth,
regression=self.params.regression))
# Ensure end_of_tree doesn't get updated until UpdateFertileSlots has
# used it to calculate new leaves.
gated_new_eot, = control_flow_ops.tuple([new_eot],
control_inputs=[node_map_updates])
eot_update_op = state_ops.assign(self.variables.end_of_tree, gated_new_eot)
updates = []
updates.append(eot_update_op)
updates.append(tree_update_op)
updates.append(thresholds_update_op)
updates.append(epoch_update_op)
updates.append(state_ops.scatter_update(
self.variables.node_to_accumulator_map,
array_ops.squeeze(array_ops.slice(node_map_updates, [0, 0], [1, -1]),
squeeze_dims=[0]),
array_ops.squeeze(array_ops.slice(node_map_updates, [1, 0], [1, -1]),
squeeze_dims=[0])))
cleared_and_allocated_accumulators = array_ops.concat(
0, [accumulators_cleared, accumulators_allocated])
# Calculate values to put into scatter update for candidate counts.
# Candidate split counts are always reset back to 0 for both cleared
# and allocated accumulators. This means some accumulators might be doubly
# reset to 0 if the were released and not allocated, then later allocated.
split_values = array_ops.tile(
array_ops.expand_dims(array_ops.expand_dims(
array_ops.zeros_like(cleared_and_allocated_accumulators,
dtype=dtypes.float32), 1), 2),
[1, self.params.num_splits_to_consider, self.params.num_output_columns])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_sums,
cleared_and_allocated_accumulators, split_values))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.candidate_split_squares,
cleared_and_allocated_accumulators, split_values))
# Calculate values to put into scatter update for total counts.
total_cleared = array_ops.tile(
array_ops.expand_dims(
math_ops.neg(array_ops.ones_like(accumulators_cleared,
dtype=dtypes.float32)), 1),
[1, self.params.num_output_columns])
total_reset = array_ops.tile(
array_ops.expand_dims(
array_ops.zeros_like(accumulators_allocated,
dtype=dtypes.float32), 1),
[1, self.params.num_output_columns])
accumulator_updates = array_ops.concat(0, [total_cleared, total_reset])
updates.append(state_ops.scatter_update(
self.variables.accumulator_sums,
cleared_and_allocated_accumulators, accumulator_updates))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.accumulator_squares,
cleared_and_allocated_accumulators, accumulator_updates))
# Calculate values to put into scatter update for candidate splits.
split_features_updates = array_ops.tile(
array_ops.expand_dims(
math_ops.neg(array_ops.ones_like(
cleared_and_allocated_accumulators)), 1),
[1, self.params.num_splits_to_consider])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_features,
cleared_and_allocated_accumulators, split_features_updates))
updates += self.finish_iteration()
return control_flow_ops.group(*updates)
def finish_iteration(self):
"""Perform any operations that should be done at the end of an iteration.
This is mostly useful for subclasses that need to reset variables after
an iteration, such as ones that are used to finish nodes.
Returns:
A list of operations.
"""
return []
def inference_graph(self, input_data, data_spec):
"""Constructs a TF graph for evaluating a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random tree inference graph.
"""
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
input_data = []
return self.inference_ops.tree_predictions(
input_data, sparse_indices, sparse_values, sparse_shape, data_spec,
self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_sums,
valid_leaf_threshold=self.params.valid_leaf_threshold)
def average_impurity(self):
"""Constructs a TF graph for evaluating the average leaf impurity of a tree.
If in regression mode, this is the leaf variance. If in classification mode,
this is the gini impurity.
Returns:
The last op in the graph.
"""
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
counts = array_ops.gather(self.variables.node_sums, leaves)
gini = self._weighted_gini(counts)
# Guard against step 1, when there often are no leaves yet.
def impurity():
return gini
# Since average impurity can be used for loss, when there's no data just
# return a big number so that loss always decreases.
def big():
return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000.
return control_flow_ops.cond(math_ops.greater(
array_ops.shape(leaves)[0], 0), impurity, big)
def size(self):
"""Constructs a TF graph for evaluating the current number of nodes.
Returns:
The current number of nodes in the tree.
"""
return self.variables.end_of_tree - 1
def get_stats(self, session):
num_nodes = self.variables.end_of_tree.eval(session=session) - 1
num_leaves = array_ops.where(
math_ops.equal(array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1])), constants.LEAF_NODE)
).eval(session=session).shape[0]
return TreeStats(num_nodes, num_leaves)
|
average_impurity
|
Constructs a TF graph for evaluating the leaf impurity of a forest.
Returns:
The last op in the graph.
|
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extremely random forest graph builder. go/brain-tree."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
from tensorflow.contrib.tensor_forest.python import constants
from tensorflow.contrib.tensor_forest.python.ops import inference_ops
from tensorflow.contrib.tensor_forest.python.ops import training_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
# A convenience class for holding random forest hyperparameters.
#
# To just get some good default parameters, use:
# hparams = ForestHParams(num_classes=2, num_features=40).fill()
#
# Note that num_classes can not be inferred and so must always be specified.
# Also, either num_splits_to_consider or num_features should be set.
#
# To override specific values, pass them to the constructor:
# hparams = ForestHParams(num_classes=5, num_trees=10, num_features=5).fill()
#
# TODO(thomaswc): Inherit from tf.HParams when that is publicly available.
class ForestHParams(object):
"""A base class for holding hyperparameters and calculating good defaults."""
def __init__(self, num_trees=100, max_nodes=10000, bagging_fraction=1.0,
max_depth=0, num_splits_to_consider=0,
feature_bagging_fraction=1.0,
max_fertile_nodes=0, split_after_samples=250,
min_split_samples=5,
valid_leaf_threshold=1, **kwargs):
self.num_trees = num_trees
self.max_nodes = max_nodes
self.bagging_fraction = bagging_fraction
self.feature_bagging_fraction = feature_bagging_fraction
self.max_depth = max_depth
self.num_splits_to_consider = num_splits_to_consider
self.max_fertile_nodes = max_fertile_nodes
self.split_after_samples = split_after_samples
self.min_split_samples = min_split_samples
self.valid_leaf_threshold = valid_leaf_threshold
for name, value in kwargs.items():
setattr(self, name, value)
def values(self):
return self.__dict__
def fill(self):
"""Intelligently sets any non-specific parameters."""
# Fail fast if num_classes or num_features isn't set.
_ = getattr(self, 'num_classes')
_ = getattr(self, 'num_features')
self.bagged_num_features = int(self.feature_bagging_fraction *
self.num_features)
self.bagged_features = None
if self.feature_bagging_fraction < 1.0:
self.bagged_features = [random.sample(
range(self.num_features),
self.bagged_num_features) for _ in range(self.num_trees)]
self.regression = getattr(self, 'regression', False)
# Num_outputs is the actual number of outputs (a single prediction for
# classification, a N-dimenensional point for regression).
self.num_outputs = self.num_classes if self.regression else 1
# Add an extra column to classes for storing counts, which is needed for
# regression and avoids having to recompute sums for classification.
self.num_output_columns = self.num_classes + 1
# Allow each tree to be unbalanced by up to a factor of 2.
self.max_depth = (self.max_depth or
int(2 * math.ceil(math.log(self.max_nodes, 2))))
# The Random Forest literature recommends sqrt(# features) for
# classification problems, and p/3 for regression problems.
# TODO(thomaswc): Consider capping this for large number of features.
self.num_splits_to_consider = (
self.num_splits_to_consider or
max(10, int(math.ceil(math.sqrt(self.num_features)))))
# max_fertile_nodes doesn't effect performance, only training speed.
# We therefore set it primarily based upon space considerations.
# Each fertile node takes up num_splits_to_consider times as much
# as space as a non-fertile node. We want the fertile nodes to in
# total only take up as much space as the non-fertile nodes, so
num_fertile = int(math.ceil(self.max_nodes / self.num_splits_to_consider))
# But always use at least 1000 accumulate slots.
num_fertile = max(num_fertile, 1000)
self.max_fertile_nodes = self.max_fertile_nodes or num_fertile
# But it also never needs to be larger than the number of leaves,
# which is max_nodes / 2.
self.max_fertile_nodes = min(self.max_fertile_nodes,
int(math.ceil(self.max_nodes / 2.0)))
# We have num_splits_to_consider slots to fill, and we want to spend
# approximately split_after_samples samples initializing them.
num_split_initializiations_per_input = max(1, int(math.floor(
self.num_splits_to_consider / self.split_after_samples)))
self.split_initializations_per_input = getattr(
self, 'split_initializations_per_input',
num_split_initializiations_per_input)
# If base_random_seed is 0, the current time will be used to seed the
# random number generators for each tree. If non-zero, the i-th tree
# will be seeded with base_random_seed + i.
self.base_random_seed = getattr(self, 'base_random_seed', 0)
return self
# A simple container to hold the training variables for a single tree.
class TreeTrainingVariables(object):
"""Stores tf.Variables for training a single random tree.
Uses tf.get_variable to get tree-specific names so that this can be used
with a tf.learn-style implementation (one that trains a model, saves it,
then relies on restoring that model to evaluate).
"""
def __init__(self, params, tree_num, training):
self.tree = variable_scope.get_variable(
name=self.get_tree_name('tree', tree_num), dtype=dtypes.int32,
shape=[params.max_nodes, 2],
initializer=init_ops.constant_initializer(-2))
self.tree_thresholds = variable_scope.get_variable(
name=self.get_tree_name('tree_thresholds', tree_num),
shape=[params.max_nodes],
initializer=init_ops.constant_initializer(-1.0))
self.tree_depths = variable_scope.get_variable(
name=self.get_tree_name('tree_depths', tree_num),
shape=[params.max_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(1))
self.end_of_tree = variable_scope.get_variable(
name=self.get_tree_name('end_of_tree', tree_num),
dtype=dtypes.int32,
initializer=constant_op.constant([1]))
self.start_epoch = tf_variables.Variable(
[0] * (params.max_nodes), name='start_epoch')
if training:
self.node_to_accumulator_map = variable_scope.get_variable(
name=self.get_tree_name('node_to_accumulator_map', tree_num),
shape=[params.max_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_features = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_features', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_thresholds = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_thresholds', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
initializer=init_ops.constant_initializer(0.0))
# Statistics shared by classification and regression.
self.node_sums = variable_scope.get_variable(
name=self.get_tree_name('node_sums', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
if training:
self.candidate_split_sums = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_sums = variable_scope.get_variable(
name=self.get_tree_name('accumulator_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
# Regression also tracks second order stats.
if params.regression:
self.node_squares = variable_scope.get_variable(
name=self.get_tree_name('node_squares', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.candidate_split_squares = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_squares = variable_scope.get_variable(
name=self.get_tree_name('accumulator_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
else:
self.node_squares = constant_op.constant(
0.0, name=self.get_tree_name('node_squares', tree_num))
self.candidate_split_squares = constant_op.constant(
0.0, name=self.get_tree_name('candidate_split_squares', tree_num))
self.accumulator_squares = constant_op.constant(
0.0, name=self.get_tree_name('accumulator_squares', tree_num))
def get_tree_name(self, name, num):
return '{0}-{1}'.format(name, num)
class ForestStats(object):
def __init__(self, tree_stats, params):
"""A simple container for stats about a forest."""
self.tree_stats = tree_stats
self.params = params
def get_average(self, thing):
val = 0.0
for i in range(self.params.num_trees):
val += getattr(self.tree_stats[i], thing)
return val / self.params.num_trees
class TreeStats(object):
def __init__(self, num_nodes, num_leaves):
self.num_nodes = num_nodes
self.num_leaves = num_leaves
class ForestTrainingVariables(object):
"""A container for a forests training data, consisting of multiple trees.
Instantiates a TreeTrainingVariables object for each tree. We override the
__getitem__ and __setitem__ function so that usage looks like this:
forest_variables = ForestTrainingVariables(params)
... forest_variables.tree ...
"""
def __init__(self, params, device_assigner, training=True,
tree_variables_class=TreeTrainingVariables):
self.variables = []
for i in range(params.num_trees):
with ops.device(device_assigner.get_device(i)):
self.variables.append(tree_variables_class(params, i, training))
def __setitem__(self, t, val):
self.variables[t] = val
def __getitem__(self, t):
return self.variables[t]
class RandomForestDeviceAssigner(object):
"""A device assigner that uses the default device.
Write subclasses that implement get_device for control over how trees
get assigned to devices. This assumes that whole trees are assigned
to a device.
"""
def __init__(self):
self.cached = None
def get_device(self, unused_tree_num):
if not self.cached:
dummy = constant_op.constant(0)
self.cached = dummy.device
return self.cached
class RandomForestGraphs(object):
"""Builds TF graphs for random forest training and inference."""
def __init__(self, params, device_assigner=None,
variables=None, tree_variables_class=TreeTrainingVariables,
tree_graphs=None, training=True,
t_ops=training_ops,
i_ops=inference_ops):
self.params = params
self.device_assigner = device_assigner or RandomForestDeviceAssigner()
logging.info('Constructing forest with params = ')
logging.info(self.params.__dict__)
self.variables = variables or ForestTrainingVariables(
self.params, device_assigner=self.device_assigner, training=training,
tree_variables_class=tree_variables_class)
tree_graph_class = tree_graphs or RandomTreeGraphs
self.trees = [
tree_graph_class(
self.variables[i], self.params,
t_ops.Load(), i_ops.Load(), i)
for i in range(self.params.num_trees)]
def _bag_features(self, tree_num, input_data):
split_data = array_ops.split(1, self.params.num_features, input_data)
return array_ops.concat(
1, [split_data[ind] for ind in self.params.bagged_features[tree_num]])
def training_graph(self, input_data, input_labels, data_spec=None,
epoch=None, **tree_kwargs):
"""Constructs a TF graph for training a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
epoch: A tensor or placeholder for the epoch the training data comes from.
**tree_kwargs: Keyword arguments passed to each tree's training_graph.
Returns:
The last op in the random forest training graph.
"""
data_spec = ([constants.DATA_FLOAT] * self.params.num_features
if data_spec is None else data_spec)
tree_graphs = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
seed = self.params.base_random_seed
if seed != 0:
seed += i
# If using bagging, randomly select some of the input.
tree_data = input_data
tree_labels = input_labels
if self.params.bagging_fraction < 1.0:
# TODO(thomaswc): This does sampling without replacment. Consider
# also allowing sampling with replacement as an option.
batch_size = array_ops.slice(array_ops.shape(input_data), [0], [1])
r = random_ops.random_uniform(batch_size, seed=seed)
mask = math_ops.less(
r, array_ops.ones_like(r) * self.params.bagging_fraction)
gather_indices = array_ops.squeeze(
array_ops.where(mask), squeeze_dims=[1])
# TODO(thomaswc): Calculate out-of-bag data and labels, and store
# them for use in calculating statistics later.
tree_data = array_ops.gather(input_data, gather_indices)
tree_labels = array_ops.gather(input_labels, gather_indices)
if self.params.bagged_features:
tree_data = self._bag_features(i, tree_data)
initialization = self.trees[i].tree_initialization()
with ops.control_dependencies([initialization]):
tree_graphs.append(
self.trees[i].training_graph(
tree_data, tree_labels, seed, data_spec=data_spec,
epoch=([0] if epoch is None else epoch),
**tree_kwargs))
return control_flow_ops.group(*tree_graphs)
def inference_graph(self, input_data, data_spec=None):
"""Constructs a TF graph for evaluating a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random forest inference graph.
"""
data_spec = ([constants.DATA_FLOAT] * self.params.num_features
if data_spec is None else data_spec)
probabilities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
tree_data = input_data
if self.params.bagged_features:
tree_data = self._bag_features(i, input_data)
probabilities.append(self.trees[i].inference_graph(tree_data,
data_spec))
with ops.device(self.device_assigner.get_device(0)):
all_predict = array_ops.pack(probabilities)
return math_ops.reduce_sum(all_predict, 0) / self.params.num_trees
def average_size(self):
"""Constructs a TF graph for evaluating the average size of a forest.
Returns:
The average number of nodes over the trees.
"""
sizes = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
sizes.append(self.trees[i].size())
return math_ops.reduce_mean(array_ops.pack(sizes))
def training_loss(self):
return math_ops.neg(self.average_size())
# pylint: disable=unused-argument
def validation_loss(self, features, labels):
return math_ops.neg(self.average_size())
# MASKED: average_impurity function (lines 433-443)
def get_stats(self, session):
tree_stats = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
tree_stats.append(self.trees[i].get_stats(session))
return ForestStats(tree_stats, self.params)
class RandomTreeGraphs(object):
"""Builds TF graphs for random tree training and inference."""
def __init__(self, variables, params, t_ops, i_ops, tree_num):
self.training_ops = t_ops
self.inference_ops = i_ops
self.variables = variables
self.params = params
self.tree_num = tree_num
def tree_initialization(self):
def _init_tree():
return state_ops.scatter_update(self.variables.tree, [0], [[-1, -1]]).op
def _nothing():
return control_flow_ops.no_op()
return control_flow_ops.cond(
math_ops.equal(array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [1, 1])), -2),
_init_tree, _nothing)
def _gini(self, class_counts):
"""Calculate the Gini impurity.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = 1 - sum_i ( c(i) / c )^2
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return 1.0 - sum_squares / (sums * sums)
def _weighted_gini(self, class_counts):
"""Our split score is the Gini impurity times the number of examples.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = c * (1 - sum_i ( c(i) / c )^2 )
= c - sum_i c(i)^2 / c
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return sums - sum_squares / sums
def _variance(self, sums, squares):
"""Calculate the variance for each row of the input tensors.
Variance is V = E[x^2] - (E[x])^2.
Args:
sums: A tensor containing output sums, usually a slice from
variables.node_sums. Should contain the number of examples seen
in index 0 so we can calculate expected value.
squares: Same as sums, but sums of squares.
Returns:
A 1-D tensor of the variances for each row in the input.
"""
total_count = array_ops.slice(sums, [0, 0], [-1, 1])
e_x = sums / total_count
e_x2 = squares / total_count
return math_ops.reduce_sum(e_x2 - math_ops.square(e_x), 1)
def training_graph(self, input_data, input_labels, random_seed,
data_spec, epoch=None):
"""Constructs a TF graph for training a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
random_seed: The random number generator seed to use for this tree. 0
means use the current time as the seed.
data_spec: A list of tf.dtype values specifying the original types of
each column.
epoch: A tensor or placeholder for the epoch the training data comes from.
Returns:
The last op in the random tree training graph.
"""
epoch = [0] if epoch is None else epoch
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
input_data = []
# Count extremely random stats.
(node_sums, node_squares, splits_indices, splits_sums,
splits_squares, totals_indices, totals_sums,
totals_squares, input_leaves) = (
self.training_ops.count_extremely_random_stats(
input_data, sparse_indices, sparse_values, sparse_shape,
data_spec, input_labels, self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_to_accumulator_map,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
self.variables.start_epoch, epoch,
num_classes=self.params.num_output_columns,
regression=self.params.regression))
node_update_ops = []
node_update_ops.append(
state_ops.assign_add(self.variables.node_sums, node_sums))
splits_update_ops = []
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.candidate_split_sums,
splits_indices, splits_sums))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.accumulator_sums, totals_indices,
totals_sums))
if self.params.regression:
node_update_ops.append(state_ops.assign_add(self.variables.node_squares,
node_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.candidate_split_squares,
splits_indices, splits_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.accumulator_squares, totals_indices,
totals_squares))
# Sample inputs.
update_indices, feature_updates, threshold_updates = (
self.training_ops.sample_inputs(
input_data, sparse_indices, sparse_values, sparse_shape,
self.variables.node_to_accumulator_map,
input_leaves, self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
split_initializations_per_input=(
self.params.split_initializations_per_input),
split_sampling_random_seed=random_seed))
update_features_op = state_ops.scatter_update(
self.variables.candidate_split_features, update_indices,
feature_updates)
update_thresholds_op = state_ops.scatter_update(
self.variables.candidate_split_thresholds, update_indices,
threshold_updates)
# Calculate finished nodes.
with ops.control_dependencies(splits_update_ops):
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
finished, stale = self.training_ops.finished_nodes(
leaves, self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
self.variables.start_epoch, epoch,
num_split_after_samples=self.params.split_after_samples,
min_split_samples=self.params.min_split_samples)
# Update leaf scores.
non_fertile_leaves = array_ops.boolean_mask(
leaves, math_ops.less(array_ops.gather(
self.variables.node_to_accumulator_map, leaves), 0))
# TODO(gilberth): It should be possible to limit the number of non
# fertile leaves we calculate scores for, especially since we can only take
# at most array_ops.shape(finished)[0] of them.
with ops.control_dependencies(node_update_ops):
sums = array_ops.gather(self.variables.node_sums, non_fertile_leaves)
if self.params.regression:
squares = array_ops.gather(self.variables.node_squares,
non_fertile_leaves)
non_fertile_leaf_scores = self._variance(sums, squares)
else:
non_fertile_leaf_scores = self._weighted_gini(sums)
# Calculate best splits.
with ops.control_dependencies(splits_update_ops):
split_indices = self.training_ops.best_splits(
finished, self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
regression=self.params.regression)
# Grow tree.
with ops.control_dependencies([update_features_op, update_thresholds_op]):
(tree_update_indices, tree_children_updates,
tree_threshold_updates, tree_depth_updates, new_eot) = (
self.training_ops.grow_tree(
self.variables.end_of_tree, self.variables.tree_depths,
self.variables.node_to_accumulator_map, finished, split_indices,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds))
tree_update_op = state_ops.scatter_update(
self.variables.tree, tree_update_indices, tree_children_updates)
thresholds_update_op = state_ops.scatter_update(
self.variables.tree_thresholds, tree_update_indices,
tree_threshold_updates)
depth_update_op = state_ops.scatter_update(
self.variables.tree_depths, tree_update_indices, tree_depth_updates)
# TODO(thomaswc): Only update the epoch on the new leaves.
new_epoch_updates = epoch * array_ops.ones_like(tree_depth_updates)
epoch_update_op = state_ops.scatter_update(
self.variables.start_epoch, tree_update_indices,
new_epoch_updates)
# Update fertile slots.
with ops.control_dependencies([depth_update_op]):
(node_map_updates, accumulators_cleared, accumulators_allocated) = (
self.training_ops.update_fertile_slots(
finished, non_fertile_leaves,
non_fertile_leaf_scores,
self.variables.end_of_tree, self.variables.tree_depths,
self.variables.accumulator_sums,
self.variables.node_to_accumulator_map,
stale,
max_depth=self.params.max_depth,
regression=self.params.regression))
# Ensure end_of_tree doesn't get updated until UpdateFertileSlots has
# used it to calculate new leaves.
gated_new_eot, = control_flow_ops.tuple([new_eot],
control_inputs=[node_map_updates])
eot_update_op = state_ops.assign(self.variables.end_of_tree, gated_new_eot)
updates = []
updates.append(eot_update_op)
updates.append(tree_update_op)
updates.append(thresholds_update_op)
updates.append(epoch_update_op)
updates.append(state_ops.scatter_update(
self.variables.node_to_accumulator_map,
array_ops.squeeze(array_ops.slice(node_map_updates, [0, 0], [1, -1]),
squeeze_dims=[0]),
array_ops.squeeze(array_ops.slice(node_map_updates, [1, 0], [1, -1]),
squeeze_dims=[0])))
cleared_and_allocated_accumulators = array_ops.concat(
0, [accumulators_cleared, accumulators_allocated])
# Calculate values to put into scatter update for candidate counts.
# Candidate split counts are always reset back to 0 for both cleared
# and allocated accumulators. This means some accumulators might be doubly
# reset to 0 if the were released and not allocated, then later allocated.
split_values = array_ops.tile(
array_ops.expand_dims(array_ops.expand_dims(
array_ops.zeros_like(cleared_and_allocated_accumulators,
dtype=dtypes.float32), 1), 2),
[1, self.params.num_splits_to_consider, self.params.num_output_columns])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_sums,
cleared_and_allocated_accumulators, split_values))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.candidate_split_squares,
cleared_and_allocated_accumulators, split_values))
# Calculate values to put into scatter update for total counts.
total_cleared = array_ops.tile(
array_ops.expand_dims(
math_ops.neg(array_ops.ones_like(accumulators_cleared,
dtype=dtypes.float32)), 1),
[1, self.params.num_output_columns])
total_reset = array_ops.tile(
array_ops.expand_dims(
array_ops.zeros_like(accumulators_allocated,
dtype=dtypes.float32), 1),
[1, self.params.num_output_columns])
accumulator_updates = array_ops.concat(0, [total_cleared, total_reset])
updates.append(state_ops.scatter_update(
self.variables.accumulator_sums,
cleared_and_allocated_accumulators, accumulator_updates))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.accumulator_squares,
cleared_and_allocated_accumulators, accumulator_updates))
# Calculate values to put into scatter update for candidate splits.
split_features_updates = array_ops.tile(
array_ops.expand_dims(
math_ops.neg(array_ops.ones_like(
cleared_and_allocated_accumulators)), 1),
[1, self.params.num_splits_to_consider])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_features,
cleared_and_allocated_accumulators, split_features_updates))
updates += self.finish_iteration()
return control_flow_ops.group(*updates)
def finish_iteration(self):
"""Perform any operations that should be done at the end of an iteration.
This is mostly useful for subclasses that need to reset variables after
an iteration, such as ones that are used to finish nodes.
Returns:
A list of operations.
"""
return []
def inference_graph(self, input_data, data_spec):
"""Constructs a TF graph for evaluating a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random tree inference graph.
"""
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
input_data = []
return self.inference_ops.tree_predictions(
input_data, sparse_indices, sparse_values, sparse_shape, data_spec,
self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_sums,
valid_leaf_threshold=self.params.valid_leaf_threshold)
def average_impurity(self):
"""Constructs a TF graph for evaluating the average leaf impurity of a tree.
If in regression mode, this is the leaf variance. If in classification mode,
this is the gini impurity.
Returns:
The last op in the graph.
"""
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
counts = array_ops.gather(self.variables.node_sums, leaves)
gini = self._weighted_gini(counts)
# Guard against step 1, when there often are no leaves yet.
def impurity():
return gini
# Since average impurity can be used for loss, when there's no data just
# return a big number so that loss always decreases.
def big():
return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000.
return control_flow_ops.cond(math_ops.greater(
array_ops.shape(leaves)[0], 0), impurity, big)
def size(self):
"""Constructs a TF graph for evaluating the current number of nodes.
Returns:
The current number of nodes in the tree.
"""
return self.variables.end_of_tree - 1
def get_stats(self, session):
num_nodes = self.variables.end_of_tree.eval(session=session) - 1
num_leaves = array_ops.where(
math_ops.equal(array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1])), constants.LEAF_NODE)
).eval(session=session).shape[0]
return TreeStats(num_nodes, num_leaves)
|
def average_impurity(self):
"""Constructs a TF graph for evaluating the leaf impurity of a forest.
Returns:
The last op in the graph.
"""
impurities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
impurities.append(self.trees[i].average_impurity())
return math_ops.reduce_mean(array_ops.pack(impurities))
| 433 | 443 |
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extremely random forest graph builder. go/brain-tree."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
from tensorflow.contrib.tensor_forest.python import constants
from tensorflow.contrib.tensor_forest.python.ops import inference_ops
from tensorflow.contrib.tensor_forest.python.ops import training_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
# A convenience class for holding random forest hyperparameters.
#
# To just get some good default parameters, use:
# hparams = ForestHParams(num_classes=2, num_features=40).fill()
#
# Note that num_classes can not be inferred and so must always be specified.
# Also, either num_splits_to_consider or num_features should be set.
#
# To override specific values, pass them to the constructor:
# hparams = ForestHParams(num_classes=5, num_trees=10, num_features=5).fill()
#
# TODO(thomaswc): Inherit from tf.HParams when that is publicly available.
class ForestHParams(object):
"""A base class for holding hyperparameters and calculating good defaults."""
def __init__(self, num_trees=100, max_nodes=10000, bagging_fraction=1.0,
max_depth=0, num_splits_to_consider=0,
feature_bagging_fraction=1.0,
max_fertile_nodes=0, split_after_samples=250,
min_split_samples=5,
valid_leaf_threshold=1, **kwargs):
self.num_trees = num_trees
self.max_nodes = max_nodes
self.bagging_fraction = bagging_fraction
self.feature_bagging_fraction = feature_bagging_fraction
self.max_depth = max_depth
self.num_splits_to_consider = num_splits_to_consider
self.max_fertile_nodes = max_fertile_nodes
self.split_after_samples = split_after_samples
self.min_split_samples = min_split_samples
self.valid_leaf_threshold = valid_leaf_threshold
for name, value in kwargs.items():
setattr(self, name, value)
def values(self):
return self.__dict__
def fill(self):
"""Intelligently sets any non-specific parameters."""
# Fail fast if num_classes or num_features isn't set.
_ = getattr(self, 'num_classes')
_ = getattr(self, 'num_features')
self.bagged_num_features = int(self.feature_bagging_fraction *
self.num_features)
self.bagged_features = None
if self.feature_bagging_fraction < 1.0:
self.bagged_features = [random.sample(
range(self.num_features),
self.bagged_num_features) for _ in range(self.num_trees)]
self.regression = getattr(self, 'regression', False)
# Num_outputs is the actual number of outputs (a single prediction for
# classification, a N-dimenensional point for regression).
self.num_outputs = self.num_classes if self.regression else 1
# Add an extra column to classes for storing counts, which is needed for
# regression and avoids having to recompute sums for classification.
self.num_output_columns = self.num_classes + 1
# Allow each tree to be unbalanced by up to a factor of 2.
self.max_depth = (self.max_depth or
int(2 * math.ceil(math.log(self.max_nodes, 2))))
# The Random Forest literature recommends sqrt(# features) for
# classification problems, and p/3 for regression problems.
# TODO(thomaswc): Consider capping this for large number of features.
self.num_splits_to_consider = (
self.num_splits_to_consider or
max(10, int(math.ceil(math.sqrt(self.num_features)))))
# max_fertile_nodes doesn't effect performance, only training speed.
# We therefore set it primarily based upon space considerations.
# Each fertile node takes up num_splits_to_consider times as much
# as space as a non-fertile node. We want the fertile nodes to in
# total only take up as much space as the non-fertile nodes, so
num_fertile = int(math.ceil(self.max_nodes / self.num_splits_to_consider))
# But always use at least 1000 accumulate slots.
num_fertile = max(num_fertile, 1000)
self.max_fertile_nodes = self.max_fertile_nodes or num_fertile
# But it also never needs to be larger than the number of leaves,
# which is max_nodes / 2.
self.max_fertile_nodes = min(self.max_fertile_nodes,
int(math.ceil(self.max_nodes / 2.0)))
# We have num_splits_to_consider slots to fill, and we want to spend
# approximately split_after_samples samples initializing them.
num_split_initializiations_per_input = max(1, int(math.floor(
self.num_splits_to_consider / self.split_after_samples)))
self.split_initializations_per_input = getattr(
self, 'split_initializations_per_input',
num_split_initializiations_per_input)
# If base_random_seed is 0, the current time will be used to seed the
# random number generators for each tree. If non-zero, the i-th tree
# will be seeded with base_random_seed + i.
self.base_random_seed = getattr(self, 'base_random_seed', 0)
return self
# A simple container to hold the training variables for a single tree.
class TreeTrainingVariables(object):
"""Stores tf.Variables for training a single random tree.
Uses tf.get_variable to get tree-specific names so that this can be used
with a tf.learn-style implementation (one that trains a model, saves it,
then relies on restoring that model to evaluate).
"""
def __init__(self, params, tree_num, training):
self.tree = variable_scope.get_variable(
name=self.get_tree_name('tree', tree_num), dtype=dtypes.int32,
shape=[params.max_nodes, 2],
initializer=init_ops.constant_initializer(-2))
self.tree_thresholds = variable_scope.get_variable(
name=self.get_tree_name('tree_thresholds', tree_num),
shape=[params.max_nodes],
initializer=init_ops.constant_initializer(-1.0))
self.tree_depths = variable_scope.get_variable(
name=self.get_tree_name('tree_depths', tree_num),
shape=[params.max_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(1))
self.end_of_tree = variable_scope.get_variable(
name=self.get_tree_name('end_of_tree', tree_num),
dtype=dtypes.int32,
initializer=constant_op.constant([1]))
self.start_epoch = tf_variables.Variable(
[0] * (params.max_nodes), name='start_epoch')
if training:
self.node_to_accumulator_map = variable_scope.get_variable(
name=self.get_tree_name('node_to_accumulator_map', tree_num),
shape=[params.max_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_features = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_features', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_thresholds = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_thresholds', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
initializer=init_ops.constant_initializer(0.0))
# Statistics shared by classification and regression.
self.node_sums = variable_scope.get_variable(
name=self.get_tree_name('node_sums', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
if training:
self.candidate_split_sums = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_sums = variable_scope.get_variable(
name=self.get_tree_name('accumulator_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
# Regression also tracks second order stats.
if params.regression:
self.node_squares = variable_scope.get_variable(
name=self.get_tree_name('node_squares', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.candidate_split_squares = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_squares = variable_scope.get_variable(
name=self.get_tree_name('accumulator_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
else:
self.node_squares = constant_op.constant(
0.0, name=self.get_tree_name('node_squares', tree_num))
self.candidate_split_squares = constant_op.constant(
0.0, name=self.get_tree_name('candidate_split_squares', tree_num))
self.accumulator_squares = constant_op.constant(
0.0, name=self.get_tree_name('accumulator_squares', tree_num))
def get_tree_name(self, name, num):
return '{0}-{1}'.format(name, num)
class ForestStats(object):
def __init__(self, tree_stats, params):
"""A simple container for stats about a forest."""
self.tree_stats = tree_stats
self.params = params
def get_average(self, thing):
val = 0.0
for i in range(self.params.num_trees):
val += getattr(self.tree_stats[i], thing)
return val / self.params.num_trees
class TreeStats(object):
def __init__(self, num_nodes, num_leaves):
self.num_nodes = num_nodes
self.num_leaves = num_leaves
class ForestTrainingVariables(object):
"""A container for a forests training data, consisting of multiple trees.
Instantiates a TreeTrainingVariables object for each tree. We override the
__getitem__ and __setitem__ function so that usage looks like this:
forest_variables = ForestTrainingVariables(params)
... forest_variables.tree ...
"""
def __init__(self, params, device_assigner, training=True,
tree_variables_class=TreeTrainingVariables):
self.variables = []
for i in range(params.num_trees):
with ops.device(device_assigner.get_device(i)):
self.variables.append(tree_variables_class(params, i, training))
def __setitem__(self, t, val):
self.variables[t] = val
def __getitem__(self, t):
return self.variables[t]
class RandomForestDeviceAssigner(object):
"""A device assigner that uses the default device.
Write subclasses that implement get_device for control over how trees
get assigned to devices. This assumes that whole trees are assigned
to a device.
"""
def __init__(self):
self.cached = None
def get_device(self, unused_tree_num):
if not self.cached:
dummy = constant_op.constant(0)
self.cached = dummy.device
return self.cached
class RandomForestGraphs(object):
"""Builds TF graphs for random forest training and inference."""
def __init__(self, params, device_assigner=None,
variables=None, tree_variables_class=TreeTrainingVariables,
tree_graphs=None, training=True,
t_ops=training_ops,
i_ops=inference_ops):
self.params = params
self.device_assigner = device_assigner or RandomForestDeviceAssigner()
logging.info('Constructing forest with params = ')
logging.info(self.params.__dict__)
self.variables = variables or ForestTrainingVariables(
self.params, device_assigner=self.device_assigner, training=training,
tree_variables_class=tree_variables_class)
tree_graph_class = tree_graphs or RandomTreeGraphs
self.trees = [
tree_graph_class(
self.variables[i], self.params,
t_ops.Load(), i_ops.Load(), i)
for i in range(self.params.num_trees)]
def _bag_features(self, tree_num, input_data):
split_data = array_ops.split(1, self.params.num_features, input_data)
return array_ops.concat(
1, [split_data[ind] for ind in self.params.bagged_features[tree_num]])
def training_graph(self, input_data, input_labels, data_spec=None,
epoch=None, **tree_kwargs):
"""Constructs a TF graph for training a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
epoch: A tensor or placeholder for the epoch the training data comes from.
**tree_kwargs: Keyword arguments passed to each tree's training_graph.
Returns:
The last op in the random forest training graph.
"""
data_spec = ([constants.DATA_FLOAT] * self.params.num_features
if data_spec is None else data_spec)
tree_graphs = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
seed = self.params.base_random_seed
if seed != 0:
seed += i
# If using bagging, randomly select some of the input.
tree_data = input_data
tree_labels = input_labels
if self.params.bagging_fraction < 1.0:
# TODO(thomaswc): This does sampling without replacment. Consider
# also allowing sampling with replacement as an option.
batch_size = array_ops.slice(array_ops.shape(input_data), [0], [1])
r = random_ops.random_uniform(batch_size, seed=seed)
mask = math_ops.less(
r, array_ops.ones_like(r) * self.params.bagging_fraction)
gather_indices = array_ops.squeeze(
array_ops.where(mask), squeeze_dims=[1])
# TODO(thomaswc): Calculate out-of-bag data and labels, and store
# them for use in calculating statistics later.
tree_data = array_ops.gather(input_data, gather_indices)
tree_labels = array_ops.gather(input_labels, gather_indices)
if self.params.bagged_features:
tree_data = self._bag_features(i, tree_data)
initialization = self.trees[i].tree_initialization()
with ops.control_dependencies([initialization]):
tree_graphs.append(
self.trees[i].training_graph(
tree_data, tree_labels, seed, data_spec=data_spec,
epoch=([0] if epoch is None else epoch),
**tree_kwargs))
return control_flow_ops.group(*tree_graphs)
def inference_graph(self, input_data, data_spec=None):
"""Constructs a TF graph for evaluating a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random forest inference graph.
"""
data_spec = ([constants.DATA_FLOAT] * self.params.num_features
if data_spec is None else data_spec)
probabilities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
tree_data = input_data
if self.params.bagged_features:
tree_data = self._bag_features(i, input_data)
probabilities.append(self.trees[i].inference_graph(tree_data,
data_spec))
with ops.device(self.device_assigner.get_device(0)):
all_predict = array_ops.pack(probabilities)
return math_ops.reduce_sum(all_predict, 0) / self.params.num_trees
def average_size(self):
"""Constructs a TF graph for evaluating the average size of a forest.
Returns:
The average number of nodes over the trees.
"""
sizes = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
sizes.append(self.trees[i].size())
return math_ops.reduce_mean(array_ops.pack(sizes))
def training_loss(self):
return math_ops.neg(self.average_size())
# pylint: disable=unused-argument
def validation_loss(self, features, labels):
return math_ops.neg(self.average_size())
def average_impurity(self):
"""Constructs a TF graph for evaluating the leaf impurity of a forest.
Returns:
The last op in the graph.
"""
impurities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
impurities.append(self.trees[i].average_impurity())
return math_ops.reduce_mean(array_ops.pack(impurities))
def get_stats(self, session):
tree_stats = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
tree_stats.append(self.trees[i].get_stats(session))
return ForestStats(tree_stats, self.params)
class RandomTreeGraphs(object):
"""Builds TF graphs for random tree training and inference."""
def __init__(self, variables, params, t_ops, i_ops, tree_num):
self.training_ops = t_ops
self.inference_ops = i_ops
self.variables = variables
self.params = params
self.tree_num = tree_num
def tree_initialization(self):
def _init_tree():
return state_ops.scatter_update(self.variables.tree, [0], [[-1, -1]]).op
def _nothing():
return control_flow_ops.no_op()
return control_flow_ops.cond(
math_ops.equal(array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [1, 1])), -2),
_init_tree, _nothing)
def _gini(self, class_counts):
"""Calculate the Gini impurity.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = 1 - sum_i ( c(i) / c )^2
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return 1.0 - sum_squares / (sums * sums)
def _weighted_gini(self, class_counts):
"""Our split score is the Gini impurity times the number of examples.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = c * (1 - sum_i ( c(i) / c )^2 )
= c - sum_i c(i)^2 / c
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return sums - sum_squares / sums
def _variance(self, sums, squares):
"""Calculate the variance for each row of the input tensors.
Variance is V = E[x^2] - (E[x])^2.
Args:
sums: A tensor containing output sums, usually a slice from
variables.node_sums. Should contain the number of examples seen
in index 0 so we can calculate expected value.
squares: Same as sums, but sums of squares.
Returns:
A 1-D tensor of the variances for each row in the input.
"""
total_count = array_ops.slice(sums, [0, 0], [-1, 1])
e_x = sums / total_count
e_x2 = squares / total_count
return math_ops.reduce_sum(e_x2 - math_ops.square(e_x), 1)
def training_graph(self, input_data, input_labels, random_seed,
data_spec, epoch=None):
"""Constructs a TF graph for training a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
random_seed: The random number generator seed to use for this tree. 0
means use the current time as the seed.
data_spec: A list of tf.dtype values specifying the original types of
each column.
epoch: A tensor or placeholder for the epoch the training data comes from.
Returns:
The last op in the random tree training graph.
"""
epoch = [0] if epoch is None else epoch
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
input_data = []
# Count extremely random stats.
(node_sums, node_squares, splits_indices, splits_sums,
splits_squares, totals_indices, totals_sums,
totals_squares, input_leaves) = (
self.training_ops.count_extremely_random_stats(
input_data, sparse_indices, sparse_values, sparse_shape,
data_spec, input_labels, self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_to_accumulator_map,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
self.variables.start_epoch, epoch,
num_classes=self.params.num_output_columns,
regression=self.params.regression))
node_update_ops = []
node_update_ops.append(
state_ops.assign_add(self.variables.node_sums, node_sums))
splits_update_ops = []
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.candidate_split_sums,
splits_indices, splits_sums))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.accumulator_sums, totals_indices,
totals_sums))
if self.params.regression:
node_update_ops.append(state_ops.assign_add(self.variables.node_squares,
node_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.candidate_split_squares,
splits_indices, splits_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.accumulator_squares, totals_indices,
totals_squares))
# Sample inputs.
update_indices, feature_updates, threshold_updates = (
self.training_ops.sample_inputs(
input_data, sparse_indices, sparse_values, sparse_shape,
self.variables.node_to_accumulator_map,
input_leaves, self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
split_initializations_per_input=(
self.params.split_initializations_per_input),
split_sampling_random_seed=random_seed))
update_features_op = state_ops.scatter_update(
self.variables.candidate_split_features, update_indices,
feature_updates)
update_thresholds_op = state_ops.scatter_update(
self.variables.candidate_split_thresholds, update_indices,
threshold_updates)
# Calculate finished nodes.
with ops.control_dependencies(splits_update_ops):
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
finished, stale = self.training_ops.finished_nodes(
leaves, self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
self.variables.start_epoch, epoch,
num_split_after_samples=self.params.split_after_samples,
min_split_samples=self.params.min_split_samples)
# Update leaf scores.
non_fertile_leaves = array_ops.boolean_mask(
leaves, math_ops.less(array_ops.gather(
self.variables.node_to_accumulator_map, leaves), 0))
# TODO(gilberth): It should be possible to limit the number of non
# fertile leaves we calculate scores for, especially since we can only take
# at most array_ops.shape(finished)[0] of them.
with ops.control_dependencies(node_update_ops):
sums = array_ops.gather(self.variables.node_sums, non_fertile_leaves)
if self.params.regression:
squares = array_ops.gather(self.variables.node_squares,
non_fertile_leaves)
non_fertile_leaf_scores = self._variance(sums, squares)
else:
non_fertile_leaf_scores = self._weighted_gini(sums)
# Calculate best splits.
with ops.control_dependencies(splits_update_ops):
split_indices = self.training_ops.best_splits(
finished, self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
regression=self.params.regression)
# Grow tree.
with ops.control_dependencies([update_features_op, update_thresholds_op]):
(tree_update_indices, tree_children_updates,
tree_threshold_updates, tree_depth_updates, new_eot) = (
self.training_ops.grow_tree(
self.variables.end_of_tree, self.variables.tree_depths,
self.variables.node_to_accumulator_map, finished, split_indices,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds))
tree_update_op = state_ops.scatter_update(
self.variables.tree, tree_update_indices, tree_children_updates)
thresholds_update_op = state_ops.scatter_update(
self.variables.tree_thresholds, tree_update_indices,
tree_threshold_updates)
depth_update_op = state_ops.scatter_update(
self.variables.tree_depths, tree_update_indices, tree_depth_updates)
# TODO(thomaswc): Only update the epoch on the new leaves.
new_epoch_updates = epoch * array_ops.ones_like(tree_depth_updates)
epoch_update_op = state_ops.scatter_update(
self.variables.start_epoch, tree_update_indices,
new_epoch_updates)
# Update fertile slots.
with ops.control_dependencies([depth_update_op]):
(node_map_updates, accumulators_cleared, accumulators_allocated) = (
self.training_ops.update_fertile_slots(
finished, non_fertile_leaves,
non_fertile_leaf_scores,
self.variables.end_of_tree, self.variables.tree_depths,
self.variables.accumulator_sums,
self.variables.node_to_accumulator_map,
stale,
max_depth=self.params.max_depth,
regression=self.params.regression))
# Ensure end_of_tree doesn't get updated until UpdateFertileSlots has
# used it to calculate new leaves.
gated_new_eot, = control_flow_ops.tuple([new_eot],
control_inputs=[node_map_updates])
eot_update_op = state_ops.assign(self.variables.end_of_tree, gated_new_eot)
updates = []
updates.append(eot_update_op)
updates.append(tree_update_op)
updates.append(thresholds_update_op)
updates.append(epoch_update_op)
updates.append(state_ops.scatter_update(
self.variables.node_to_accumulator_map,
array_ops.squeeze(array_ops.slice(node_map_updates, [0, 0], [1, -1]),
squeeze_dims=[0]),
array_ops.squeeze(array_ops.slice(node_map_updates, [1, 0], [1, -1]),
squeeze_dims=[0])))
cleared_and_allocated_accumulators = array_ops.concat(
0, [accumulators_cleared, accumulators_allocated])
# Calculate values to put into scatter update for candidate counts.
# Candidate split counts are always reset back to 0 for both cleared
# and allocated accumulators. This means some accumulators might be doubly
# reset to 0 if the were released and not allocated, then later allocated.
split_values = array_ops.tile(
array_ops.expand_dims(array_ops.expand_dims(
array_ops.zeros_like(cleared_and_allocated_accumulators,
dtype=dtypes.float32), 1), 2),
[1, self.params.num_splits_to_consider, self.params.num_output_columns])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_sums,
cleared_and_allocated_accumulators, split_values))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.candidate_split_squares,
cleared_and_allocated_accumulators, split_values))
# Calculate values to put into scatter update for total counts.
total_cleared = array_ops.tile(
array_ops.expand_dims(
math_ops.neg(array_ops.ones_like(accumulators_cleared,
dtype=dtypes.float32)), 1),
[1, self.params.num_output_columns])
total_reset = array_ops.tile(
array_ops.expand_dims(
array_ops.zeros_like(accumulators_allocated,
dtype=dtypes.float32), 1),
[1, self.params.num_output_columns])
accumulator_updates = array_ops.concat(0, [total_cleared, total_reset])
updates.append(state_ops.scatter_update(
self.variables.accumulator_sums,
cleared_and_allocated_accumulators, accumulator_updates))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.accumulator_squares,
cleared_and_allocated_accumulators, accumulator_updates))
# Calculate values to put into scatter update for candidate splits.
split_features_updates = array_ops.tile(
array_ops.expand_dims(
math_ops.neg(array_ops.ones_like(
cleared_and_allocated_accumulators)), 1),
[1, self.params.num_splits_to_consider])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_features,
cleared_and_allocated_accumulators, split_features_updates))
updates += self.finish_iteration()
return control_flow_ops.group(*updates)
def finish_iteration(self):
"""Perform any operations that should be done at the end of an iteration.
This is mostly useful for subclasses that need to reset variables after
an iteration, such as ones that are used to finish nodes.
Returns:
A list of operations.
"""
return []
def inference_graph(self, input_data, data_spec):
"""Constructs a TF graph for evaluating a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random tree inference graph.
"""
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
input_data = []
return self.inference_ops.tree_predictions(
input_data, sparse_indices, sparse_values, sparse_shape, data_spec,
self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_sums,
valid_leaf_threshold=self.params.valid_leaf_threshold)
def average_impurity(self):
"""Constructs a TF graph for evaluating the average leaf impurity of a tree.
If in regression mode, this is the leaf variance. If in classification mode,
this is the gini impurity.
Returns:
The last op in the graph.
"""
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
counts = array_ops.gather(self.variables.node_sums, leaves)
gini = self._weighted_gini(counts)
# Guard against step 1, when there often are no leaves yet.
def impurity():
return gini
# Since average impurity can be used for loss, when there's no data just
# return a big number so that loss always decreases.
def big():
return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000.
return control_flow_ops.cond(math_ops.greater(
array_ops.shape(leaves)[0], 0), impurity, big)
def size(self):
"""Constructs a TF graph for evaluating the current number of nodes.
Returns:
The current number of nodes in the tree.
"""
return self.variables.end_of_tree - 1
def get_stats(self, session):
num_nodes = self.variables.end_of_tree.eval(session=session) - 1
num_leaves = array_ops.where(
math_ops.equal(array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1])), constants.LEAF_NODE)
).eval(session=session).shape[0]
return TreeStats(num_nodes, num_leaves)
|
inference_graph
|
Constructs a TF graph for evaluating a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random tree inference graph.
|
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extremely random forest graph builder. go/brain-tree."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
from tensorflow.contrib.tensor_forest.python import constants
from tensorflow.contrib.tensor_forest.python.ops import inference_ops
from tensorflow.contrib.tensor_forest.python.ops import training_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
# A convenience class for holding random forest hyperparameters.
#
# To just get some good default parameters, use:
# hparams = ForestHParams(num_classes=2, num_features=40).fill()
#
# Note that num_classes can not be inferred and so must always be specified.
# Also, either num_splits_to_consider or num_features should be set.
#
# To override specific values, pass them to the constructor:
# hparams = ForestHParams(num_classes=5, num_trees=10, num_features=5).fill()
#
# TODO(thomaswc): Inherit from tf.HParams when that is publicly available.
class ForestHParams(object):
"""A base class for holding hyperparameters and calculating good defaults."""
def __init__(self, num_trees=100, max_nodes=10000, bagging_fraction=1.0,
max_depth=0, num_splits_to_consider=0,
feature_bagging_fraction=1.0,
max_fertile_nodes=0, split_after_samples=250,
min_split_samples=5,
valid_leaf_threshold=1, **kwargs):
self.num_trees = num_trees
self.max_nodes = max_nodes
self.bagging_fraction = bagging_fraction
self.feature_bagging_fraction = feature_bagging_fraction
self.max_depth = max_depth
self.num_splits_to_consider = num_splits_to_consider
self.max_fertile_nodes = max_fertile_nodes
self.split_after_samples = split_after_samples
self.min_split_samples = min_split_samples
self.valid_leaf_threshold = valid_leaf_threshold
for name, value in kwargs.items():
setattr(self, name, value)
def values(self):
return self.__dict__
def fill(self):
"""Intelligently sets any non-specific parameters."""
# Fail fast if num_classes or num_features isn't set.
_ = getattr(self, 'num_classes')
_ = getattr(self, 'num_features')
self.bagged_num_features = int(self.feature_bagging_fraction *
self.num_features)
self.bagged_features = None
if self.feature_bagging_fraction < 1.0:
self.bagged_features = [random.sample(
range(self.num_features),
self.bagged_num_features) for _ in range(self.num_trees)]
self.regression = getattr(self, 'regression', False)
# Num_outputs is the actual number of outputs (a single prediction for
# classification, a N-dimenensional point for regression).
self.num_outputs = self.num_classes if self.regression else 1
# Add an extra column to classes for storing counts, which is needed for
# regression and avoids having to recompute sums for classification.
self.num_output_columns = self.num_classes + 1
# Allow each tree to be unbalanced by up to a factor of 2.
self.max_depth = (self.max_depth or
int(2 * math.ceil(math.log(self.max_nodes, 2))))
# The Random Forest literature recommends sqrt(# features) for
# classification problems, and p/3 for regression problems.
# TODO(thomaswc): Consider capping this for large number of features.
self.num_splits_to_consider = (
self.num_splits_to_consider or
max(10, int(math.ceil(math.sqrt(self.num_features)))))
# max_fertile_nodes doesn't effect performance, only training speed.
# We therefore set it primarily based upon space considerations.
# Each fertile node takes up num_splits_to_consider times as much
# as space as a non-fertile node. We want the fertile nodes to in
# total only take up as much space as the non-fertile nodes, so
num_fertile = int(math.ceil(self.max_nodes / self.num_splits_to_consider))
# But always use at least 1000 accumulate slots.
num_fertile = max(num_fertile, 1000)
self.max_fertile_nodes = self.max_fertile_nodes or num_fertile
# But it also never needs to be larger than the number of leaves,
# which is max_nodes / 2.
self.max_fertile_nodes = min(self.max_fertile_nodes,
int(math.ceil(self.max_nodes / 2.0)))
# We have num_splits_to_consider slots to fill, and we want to spend
# approximately split_after_samples samples initializing them.
num_split_initializiations_per_input = max(1, int(math.floor(
self.num_splits_to_consider / self.split_after_samples)))
self.split_initializations_per_input = getattr(
self, 'split_initializations_per_input',
num_split_initializiations_per_input)
# If base_random_seed is 0, the current time will be used to seed the
# random number generators for each tree. If non-zero, the i-th tree
# will be seeded with base_random_seed + i.
self.base_random_seed = getattr(self, 'base_random_seed', 0)
return self
# A simple container to hold the training variables for a single tree.
class TreeTrainingVariables(object):
"""Stores tf.Variables for training a single random tree.
Uses tf.get_variable to get tree-specific names so that this can be used
with a tf.learn-style implementation (one that trains a model, saves it,
then relies on restoring that model to evaluate).
"""
def __init__(self, params, tree_num, training):
self.tree = variable_scope.get_variable(
name=self.get_tree_name('tree', tree_num), dtype=dtypes.int32,
shape=[params.max_nodes, 2],
initializer=init_ops.constant_initializer(-2))
self.tree_thresholds = variable_scope.get_variable(
name=self.get_tree_name('tree_thresholds', tree_num),
shape=[params.max_nodes],
initializer=init_ops.constant_initializer(-1.0))
self.tree_depths = variable_scope.get_variable(
name=self.get_tree_name('tree_depths', tree_num),
shape=[params.max_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(1))
self.end_of_tree = variable_scope.get_variable(
name=self.get_tree_name('end_of_tree', tree_num),
dtype=dtypes.int32,
initializer=constant_op.constant([1]))
self.start_epoch = tf_variables.Variable(
[0] * (params.max_nodes), name='start_epoch')
if training:
self.node_to_accumulator_map = variable_scope.get_variable(
name=self.get_tree_name('node_to_accumulator_map', tree_num),
shape=[params.max_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_features = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_features', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_thresholds = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_thresholds', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
initializer=init_ops.constant_initializer(0.0))
# Statistics shared by classification and regression.
self.node_sums = variable_scope.get_variable(
name=self.get_tree_name('node_sums', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
if training:
self.candidate_split_sums = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_sums = variable_scope.get_variable(
name=self.get_tree_name('accumulator_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
# Regression also tracks second order stats.
if params.regression:
self.node_squares = variable_scope.get_variable(
name=self.get_tree_name('node_squares', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.candidate_split_squares = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_squares = variable_scope.get_variable(
name=self.get_tree_name('accumulator_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
else:
self.node_squares = constant_op.constant(
0.0, name=self.get_tree_name('node_squares', tree_num))
self.candidate_split_squares = constant_op.constant(
0.0, name=self.get_tree_name('candidate_split_squares', tree_num))
self.accumulator_squares = constant_op.constant(
0.0, name=self.get_tree_name('accumulator_squares', tree_num))
def get_tree_name(self, name, num):
return '{0}-{1}'.format(name, num)
class ForestStats(object):
def __init__(self, tree_stats, params):
"""A simple container for stats about a forest."""
self.tree_stats = tree_stats
self.params = params
def get_average(self, thing):
val = 0.0
for i in range(self.params.num_trees):
val += getattr(self.tree_stats[i], thing)
return val / self.params.num_trees
class TreeStats(object):
def __init__(self, num_nodes, num_leaves):
self.num_nodes = num_nodes
self.num_leaves = num_leaves
class ForestTrainingVariables(object):
"""A container for a forests training data, consisting of multiple trees.
Instantiates a TreeTrainingVariables object for each tree. We override the
__getitem__ and __setitem__ function so that usage looks like this:
forest_variables = ForestTrainingVariables(params)
... forest_variables.tree ...
"""
def __init__(self, params, device_assigner, training=True,
tree_variables_class=TreeTrainingVariables):
self.variables = []
for i in range(params.num_trees):
with ops.device(device_assigner.get_device(i)):
self.variables.append(tree_variables_class(params, i, training))
def __setitem__(self, t, val):
self.variables[t] = val
def __getitem__(self, t):
return self.variables[t]
class RandomForestDeviceAssigner(object):
"""A device assigner that uses the default device.
Write subclasses that implement get_device for control over how trees
get assigned to devices. This assumes that whole trees are assigned
to a device.
"""
def __init__(self):
self.cached = None
def get_device(self, unused_tree_num):
if not self.cached:
dummy = constant_op.constant(0)
self.cached = dummy.device
return self.cached
class RandomForestGraphs(object):
"""Builds TF graphs for random forest training and inference."""
def __init__(self, params, device_assigner=None,
variables=None, tree_variables_class=TreeTrainingVariables,
tree_graphs=None, training=True,
t_ops=training_ops,
i_ops=inference_ops):
self.params = params
self.device_assigner = device_assigner or RandomForestDeviceAssigner()
logging.info('Constructing forest with params = ')
logging.info(self.params.__dict__)
self.variables = variables or ForestTrainingVariables(
self.params, device_assigner=self.device_assigner, training=training,
tree_variables_class=tree_variables_class)
tree_graph_class = tree_graphs or RandomTreeGraphs
self.trees = [
tree_graph_class(
self.variables[i], self.params,
t_ops.Load(), i_ops.Load(), i)
for i in range(self.params.num_trees)]
def _bag_features(self, tree_num, input_data):
split_data = array_ops.split(1, self.params.num_features, input_data)
return array_ops.concat(
1, [split_data[ind] for ind in self.params.bagged_features[tree_num]])
def training_graph(self, input_data, input_labels, data_spec=None,
epoch=None, **tree_kwargs):
"""Constructs a TF graph for training a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
epoch: A tensor or placeholder for the epoch the training data comes from.
**tree_kwargs: Keyword arguments passed to each tree's training_graph.
Returns:
The last op in the random forest training graph.
"""
data_spec = ([constants.DATA_FLOAT] * self.params.num_features
if data_spec is None else data_spec)
tree_graphs = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
seed = self.params.base_random_seed
if seed != 0:
seed += i
# If using bagging, randomly select some of the input.
tree_data = input_data
tree_labels = input_labels
if self.params.bagging_fraction < 1.0:
# TODO(thomaswc): This does sampling without replacment. Consider
# also allowing sampling with replacement as an option.
batch_size = array_ops.slice(array_ops.shape(input_data), [0], [1])
r = random_ops.random_uniform(batch_size, seed=seed)
mask = math_ops.less(
r, array_ops.ones_like(r) * self.params.bagging_fraction)
gather_indices = array_ops.squeeze(
array_ops.where(mask), squeeze_dims=[1])
# TODO(thomaswc): Calculate out-of-bag data and labels, and store
# them for use in calculating statistics later.
tree_data = array_ops.gather(input_data, gather_indices)
tree_labels = array_ops.gather(input_labels, gather_indices)
if self.params.bagged_features:
tree_data = self._bag_features(i, tree_data)
initialization = self.trees[i].tree_initialization()
with ops.control_dependencies([initialization]):
tree_graphs.append(
self.trees[i].training_graph(
tree_data, tree_labels, seed, data_spec=data_spec,
epoch=([0] if epoch is None else epoch),
**tree_kwargs))
return control_flow_ops.group(*tree_graphs)
def inference_graph(self, input_data, data_spec=None):
"""Constructs a TF graph for evaluating a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random forest inference graph.
"""
data_spec = ([constants.DATA_FLOAT] * self.params.num_features
if data_spec is None else data_spec)
probabilities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
tree_data = input_data
if self.params.bagged_features:
tree_data = self._bag_features(i, input_data)
probabilities.append(self.trees[i].inference_graph(tree_data,
data_spec))
with ops.device(self.device_assigner.get_device(0)):
all_predict = array_ops.pack(probabilities)
return math_ops.reduce_sum(all_predict, 0) / self.params.num_trees
def average_size(self):
"""Constructs a TF graph for evaluating the average size of a forest.
Returns:
The average number of nodes over the trees.
"""
sizes = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
sizes.append(self.trees[i].size())
return math_ops.reduce_mean(array_ops.pack(sizes))
def training_loss(self):
return math_ops.neg(self.average_size())
# pylint: disable=unused-argument
def validation_loss(self, features, labels):
return math_ops.neg(self.average_size())
def average_impurity(self):
"""Constructs a TF graph for evaluating the leaf impurity of a forest.
Returns:
The last op in the graph.
"""
impurities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
impurities.append(self.trees[i].average_impurity())
return math_ops.reduce_mean(array_ops.pack(impurities))
def get_stats(self, session):
tree_stats = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
tree_stats.append(self.trees[i].get_stats(session))
return ForestStats(tree_stats, self.params)
class RandomTreeGraphs(object):
"""Builds TF graphs for random tree training and inference."""
def __init__(self, variables, params, t_ops, i_ops, tree_num):
self.training_ops = t_ops
self.inference_ops = i_ops
self.variables = variables
self.params = params
self.tree_num = tree_num
def tree_initialization(self):
def _init_tree():
return state_ops.scatter_update(self.variables.tree, [0], [[-1, -1]]).op
def _nothing():
return control_flow_ops.no_op()
return control_flow_ops.cond(
math_ops.equal(array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [1, 1])), -2),
_init_tree, _nothing)
def _gini(self, class_counts):
"""Calculate the Gini impurity.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = 1 - sum_i ( c(i) / c )^2
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return 1.0 - sum_squares / (sums * sums)
def _weighted_gini(self, class_counts):
"""Our split score is the Gini impurity times the number of examples.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = c * (1 - sum_i ( c(i) / c )^2 )
= c - sum_i c(i)^2 / c
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return sums - sum_squares / sums
def _variance(self, sums, squares):
"""Calculate the variance for each row of the input tensors.
Variance is V = E[x^2] - (E[x])^2.
Args:
sums: A tensor containing output sums, usually a slice from
variables.node_sums. Should contain the number of examples seen
in index 0 so we can calculate expected value.
squares: Same as sums, but sums of squares.
Returns:
A 1-D tensor of the variances for each row in the input.
"""
total_count = array_ops.slice(sums, [0, 0], [-1, 1])
e_x = sums / total_count
e_x2 = squares / total_count
return math_ops.reduce_sum(e_x2 - math_ops.square(e_x), 1)
def training_graph(self, input_data, input_labels, random_seed,
data_spec, epoch=None):
"""Constructs a TF graph for training a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
random_seed: The random number generator seed to use for this tree. 0
means use the current time as the seed.
data_spec: A list of tf.dtype values specifying the original types of
each column.
epoch: A tensor or placeholder for the epoch the training data comes from.
Returns:
The last op in the random tree training graph.
"""
epoch = [0] if epoch is None else epoch
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
input_data = []
# Count extremely random stats.
(node_sums, node_squares, splits_indices, splits_sums,
splits_squares, totals_indices, totals_sums,
totals_squares, input_leaves) = (
self.training_ops.count_extremely_random_stats(
input_data, sparse_indices, sparse_values, sparse_shape,
data_spec, input_labels, self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_to_accumulator_map,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
self.variables.start_epoch, epoch,
num_classes=self.params.num_output_columns,
regression=self.params.regression))
node_update_ops = []
node_update_ops.append(
state_ops.assign_add(self.variables.node_sums, node_sums))
splits_update_ops = []
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.candidate_split_sums,
splits_indices, splits_sums))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.accumulator_sums, totals_indices,
totals_sums))
if self.params.regression:
node_update_ops.append(state_ops.assign_add(self.variables.node_squares,
node_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.candidate_split_squares,
splits_indices, splits_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.accumulator_squares, totals_indices,
totals_squares))
# Sample inputs.
update_indices, feature_updates, threshold_updates = (
self.training_ops.sample_inputs(
input_data, sparse_indices, sparse_values, sparse_shape,
self.variables.node_to_accumulator_map,
input_leaves, self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
split_initializations_per_input=(
self.params.split_initializations_per_input),
split_sampling_random_seed=random_seed))
update_features_op = state_ops.scatter_update(
self.variables.candidate_split_features, update_indices,
feature_updates)
update_thresholds_op = state_ops.scatter_update(
self.variables.candidate_split_thresholds, update_indices,
threshold_updates)
# Calculate finished nodes.
with ops.control_dependencies(splits_update_ops):
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
finished, stale = self.training_ops.finished_nodes(
leaves, self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
self.variables.start_epoch, epoch,
num_split_after_samples=self.params.split_after_samples,
min_split_samples=self.params.min_split_samples)
# Update leaf scores.
non_fertile_leaves = array_ops.boolean_mask(
leaves, math_ops.less(array_ops.gather(
self.variables.node_to_accumulator_map, leaves), 0))
# TODO(gilberth): It should be possible to limit the number of non
# fertile leaves we calculate scores for, especially since we can only take
# at most array_ops.shape(finished)[0] of them.
with ops.control_dependencies(node_update_ops):
sums = array_ops.gather(self.variables.node_sums, non_fertile_leaves)
if self.params.regression:
squares = array_ops.gather(self.variables.node_squares,
non_fertile_leaves)
non_fertile_leaf_scores = self._variance(sums, squares)
else:
non_fertile_leaf_scores = self._weighted_gini(sums)
# Calculate best splits.
with ops.control_dependencies(splits_update_ops):
split_indices = self.training_ops.best_splits(
finished, self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
regression=self.params.regression)
# Grow tree.
with ops.control_dependencies([update_features_op, update_thresholds_op]):
(tree_update_indices, tree_children_updates,
tree_threshold_updates, tree_depth_updates, new_eot) = (
self.training_ops.grow_tree(
self.variables.end_of_tree, self.variables.tree_depths,
self.variables.node_to_accumulator_map, finished, split_indices,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds))
tree_update_op = state_ops.scatter_update(
self.variables.tree, tree_update_indices, tree_children_updates)
thresholds_update_op = state_ops.scatter_update(
self.variables.tree_thresholds, tree_update_indices,
tree_threshold_updates)
depth_update_op = state_ops.scatter_update(
self.variables.tree_depths, tree_update_indices, tree_depth_updates)
# TODO(thomaswc): Only update the epoch on the new leaves.
new_epoch_updates = epoch * array_ops.ones_like(tree_depth_updates)
epoch_update_op = state_ops.scatter_update(
self.variables.start_epoch, tree_update_indices,
new_epoch_updates)
# Update fertile slots.
with ops.control_dependencies([depth_update_op]):
(node_map_updates, accumulators_cleared, accumulators_allocated) = (
self.training_ops.update_fertile_slots(
finished, non_fertile_leaves,
non_fertile_leaf_scores,
self.variables.end_of_tree, self.variables.tree_depths,
self.variables.accumulator_sums,
self.variables.node_to_accumulator_map,
stale,
max_depth=self.params.max_depth,
regression=self.params.regression))
# Ensure end_of_tree doesn't get updated until UpdateFertileSlots has
# used it to calculate new leaves.
gated_new_eot, = control_flow_ops.tuple([new_eot],
control_inputs=[node_map_updates])
eot_update_op = state_ops.assign(self.variables.end_of_tree, gated_new_eot)
updates = []
updates.append(eot_update_op)
updates.append(tree_update_op)
updates.append(thresholds_update_op)
updates.append(epoch_update_op)
updates.append(state_ops.scatter_update(
self.variables.node_to_accumulator_map,
array_ops.squeeze(array_ops.slice(node_map_updates, [0, 0], [1, -1]),
squeeze_dims=[0]),
array_ops.squeeze(array_ops.slice(node_map_updates, [1, 0], [1, -1]),
squeeze_dims=[0])))
cleared_and_allocated_accumulators = array_ops.concat(
0, [accumulators_cleared, accumulators_allocated])
# Calculate values to put into scatter update for candidate counts.
# Candidate split counts are always reset back to 0 for both cleared
# and allocated accumulators. This means some accumulators might be doubly
# reset to 0 if the were released and not allocated, then later allocated.
split_values = array_ops.tile(
array_ops.expand_dims(array_ops.expand_dims(
array_ops.zeros_like(cleared_and_allocated_accumulators,
dtype=dtypes.float32), 1), 2),
[1, self.params.num_splits_to_consider, self.params.num_output_columns])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_sums,
cleared_and_allocated_accumulators, split_values))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.candidate_split_squares,
cleared_and_allocated_accumulators, split_values))
# Calculate values to put into scatter update for total counts.
total_cleared = array_ops.tile(
array_ops.expand_dims(
math_ops.neg(array_ops.ones_like(accumulators_cleared,
dtype=dtypes.float32)), 1),
[1, self.params.num_output_columns])
total_reset = array_ops.tile(
array_ops.expand_dims(
array_ops.zeros_like(accumulators_allocated,
dtype=dtypes.float32), 1),
[1, self.params.num_output_columns])
accumulator_updates = array_ops.concat(0, [total_cleared, total_reset])
updates.append(state_ops.scatter_update(
self.variables.accumulator_sums,
cleared_and_allocated_accumulators, accumulator_updates))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.accumulator_squares,
cleared_and_allocated_accumulators, accumulator_updates))
# Calculate values to put into scatter update for candidate splits.
split_features_updates = array_ops.tile(
array_ops.expand_dims(
math_ops.neg(array_ops.ones_like(
cleared_and_allocated_accumulators)), 1),
[1, self.params.num_splits_to_consider])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_features,
cleared_and_allocated_accumulators, split_features_updates))
updates += self.finish_iteration()
return control_flow_ops.group(*updates)
def finish_iteration(self):
"""Perform any operations that should be done at the end of an iteration.
This is mostly useful for subclasses that need to reset variables after
an iteration, such as ones that are used to finish nodes.
Returns:
A list of operations.
"""
return []
# MASKED: inference_graph function (lines 777-801)
def average_impurity(self):
"""Constructs a TF graph for evaluating the average leaf impurity of a tree.
If in regression mode, this is the leaf variance. If in classification mode,
this is the gini impurity.
Returns:
The last op in the graph.
"""
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
counts = array_ops.gather(self.variables.node_sums, leaves)
gini = self._weighted_gini(counts)
# Guard against step 1, when there often are no leaves yet.
def impurity():
return gini
# Since average impurity can be used for loss, when there's no data just
# return a big number so that loss always decreases.
def big():
return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000.
return control_flow_ops.cond(math_ops.greater(
array_ops.shape(leaves)[0], 0), impurity, big)
def size(self):
"""Constructs a TF graph for evaluating the current number of nodes.
Returns:
The current number of nodes in the tree.
"""
return self.variables.end_of_tree - 1
def get_stats(self, session):
num_nodes = self.variables.end_of_tree.eval(session=session) - 1
num_leaves = array_ops.where(
math_ops.equal(array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1])), constants.LEAF_NODE)
).eval(session=session).shape[0]
return TreeStats(num_nodes, num_leaves)
|
def inference_graph(self, input_data, data_spec):
"""Constructs a TF graph for evaluating a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random tree inference graph.
"""
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
input_data = []
return self.inference_ops.tree_predictions(
input_data, sparse_indices, sparse_values, sparse_shape, data_spec,
self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_sums,
valid_leaf_threshold=self.params.valid_leaf_threshold)
| 777 | 801 |
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extremely random forest graph builder. go/brain-tree."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
from tensorflow.contrib.tensor_forest.python import constants
from tensorflow.contrib.tensor_forest.python.ops import inference_ops
from tensorflow.contrib.tensor_forest.python.ops import training_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
# A convenience class for holding random forest hyperparameters.
#
# To just get some good default parameters, use:
# hparams = ForestHParams(num_classes=2, num_features=40).fill()
#
# Note that num_classes can not be inferred and so must always be specified.
# Also, either num_splits_to_consider or num_features should be set.
#
# To override specific values, pass them to the constructor:
# hparams = ForestHParams(num_classes=5, num_trees=10, num_features=5).fill()
#
# TODO(thomaswc): Inherit from tf.HParams when that is publicly available.
class ForestHParams(object):
"""A base class for holding hyperparameters and calculating good defaults."""
def __init__(self, num_trees=100, max_nodes=10000, bagging_fraction=1.0,
max_depth=0, num_splits_to_consider=0,
feature_bagging_fraction=1.0,
max_fertile_nodes=0, split_after_samples=250,
min_split_samples=5,
valid_leaf_threshold=1, **kwargs):
self.num_trees = num_trees
self.max_nodes = max_nodes
self.bagging_fraction = bagging_fraction
self.feature_bagging_fraction = feature_bagging_fraction
self.max_depth = max_depth
self.num_splits_to_consider = num_splits_to_consider
self.max_fertile_nodes = max_fertile_nodes
self.split_after_samples = split_after_samples
self.min_split_samples = min_split_samples
self.valid_leaf_threshold = valid_leaf_threshold
for name, value in kwargs.items():
setattr(self, name, value)
def values(self):
return self.__dict__
def fill(self):
"""Intelligently sets any non-specific parameters."""
# Fail fast if num_classes or num_features isn't set.
_ = getattr(self, 'num_classes')
_ = getattr(self, 'num_features')
self.bagged_num_features = int(self.feature_bagging_fraction *
self.num_features)
self.bagged_features = None
if self.feature_bagging_fraction < 1.0:
self.bagged_features = [random.sample(
range(self.num_features),
self.bagged_num_features) for _ in range(self.num_trees)]
self.regression = getattr(self, 'regression', False)
# Num_outputs is the actual number of outputs (a single prediction for
# classification, a N-dimenensional point for regression).
self.num_outputs = self.num_classes if self.regression else 1
# Add an extra column to classes for storing counts, which is needed for
# regression and avoids having to recompute sums for classification.
self.num_output_columns = self.num_classes + 1
# Allow each tree to be unbalanced by up to a factor of 2.
self.max_depth = (self.max_depth or
int(2 * math.ceil(math.log(self.max_nodes, 2))))
# The Random Forest literature recommends sqrt(# features) for
# classification problems, and p/3 for regression problems.
# TODO(thomaswc): Consider capping this for large number of features.
self.num_splits_to_consider = (
self.num_splits_to_consider or
max(10, int(math.ceil(math.sqrt(self.num_features)))))
# max_fertile_nodes doesn't effect performance, only training speed.
# We therefore set it primarily based upon space considerations.
# Each fertile node takes up num_splits_to_consider times as much
# as space as a non-fertile node. We want the fertile nodes to in
# total only take up as much space as the non-fertile nodes, so
num_fertile = int(math.ceil(self.max_nodes / self.num_splits_to_consider))
# But always use at least 1000 accumulate slots.
num_fertile = max(num_fertile, 1000)
self.max_fertile_nodes = self.max_fertile_nodes or num_fertile
# But it also never needs to be larger than the number of leaves,
# which is max_nodes / 2.
self.max_fertile_nodes = min(self.max_fertile_nodes,
int(math.ceil(self.max_nodes / 2.0)))
# We have num_splits_to_consider slots to fill, and we want to spend
# approximately split_after_samples samples initializing them.
num_split_initializiations_per_input = max(1, int(math.floor(
self.num_splits_to_consider / self.split_after_samples)))
self.split_initializations_per_input = getattr(
self, 'split_initializations_per_input',
num_split_initializiations_per_input)
# If base_random_seed is 0, the current time will be used to seed the
# random number generators for each tree. If non-zero, the i-th tree
# will be seeded with base_random_seed + i.
self.base_random_seed = getattr(self, 'base_random_seed', 0)
return self
# A simple container to hold the training variables for a single tree.
class TreeTrainingVariables(object):
"""Stores tf.Variables for training a single random tree.
Uses tf.get_variable to get tree-specific names so that this can be used
with a tf.learn-style implementation (one that trains a model, saves it,
then relies on restoring that model to evaluate).
"""
def __init__(self, params, tree_num, training):
self.tree = variable_scope.get_variable(
name=self.get_tree_name('tree', tree_num), dtype=dtypes.int32,
shape=[params.max_nodes, 2],
initializer=init_ops.constant_initializer(-2))
self.tree_thresholds = variable_scope.get_variable(
name=self.get_tree_name('tree_thresholds', tree_num),
shape=[params.max_nodes],
initializer=init_ops.constant_initializer(-1.0))
self.tree_depths = variable_scope.get_variable(
name=self.get_tree_name('tree_depths', tree_num),
shape=[params.max_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(1))
self.end_of_tree = variable_scope.get_variable(
name=self.get_tree_name('end_of_tree', tree_num),
dtype=dtypes.int32,
initializer=constant_op.constant([1]))
self.start_epoch = tf_variables.Variable(
[0] * (params.max_nodes), name='start_epoch')
if training:
self.node_to_accumulator_map = variable_scope.get_variable(
name=self.get_tree_name('node_to_accumulator_map', tree_num),
shape=[params.max_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_features = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_features', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_thresholds = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_thresholds', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
initializer=init_ops.constant_initializer(0.0))
# Statistics shared by classification and regression.
self.node_sums = variable_scope.get_variable(
name=self.get_tree_name('node_sums', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
if training:
self.candidate_split_sums = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_sums = variable_scope.get_variable(
name=self.get_tree_name('accumulator_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
# Regression also tracks second order stats.
if params.regression:
self.node_squares = variable_scope.get_variable(
name=self.get_tree_name('node_squares', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.candidate_split_squares = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_squares = variable_scope.get_variable(
name=self.get_tree_name('accumulator_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
else:
self.node_squares = constant_op.constant(
0.0, name=self.get_tree_name('node_squares', tree_num))
self.candidate_split_squares = constant_op.constant(
0.0, name=self.get_tree_name('candidate_split_squares', tree_num))
self.accumulator_squares = constant_op.constant(
0.0, name=self.get_tree_name('accumulator_squares', tree_num))
def get_tree_name(self, name, num):
return '{0}-{1}'.format(name, num)
class ForestStats(object):
def __init__(self, tree_stats, params):
"""A simple container for stats about a forest."""
self.tree_stats = tree_stats
self.params = params
def get_average(self, thing):
val = 0.0
for i in range(self.params.num_trees):
val += getattr(self.tree_stats[i], thing)
return val / self.params.num_trees
class TreeStats(object):
def __init__(self, num_nodes, num_leaves):
self.num_nodes = num_nodes
self.num_leaves = num_leaves
class ForestTrainingVariables(object):
"""A container for a forests training data, consisting of multiple trees.
Instantiates a TreeTrainingVariables object for each tree. We override the
__getitem__ and __setitem__ function so that usage looks like this:
forest_variables = ForestTrainingVariables(params)
... forest_variables.tree ...
"""
def __init__(self, params, device_assigner, training=True,
tree_variables_class=TreeTrainingVariables):
self.variables = []
for i in range(params.num_trees):
with ops.device(device_assigner.get_device(i)):
self.variables.append(tree_variables_class(params, i, training))
def __setitem__(self, t, val):
self.variables[t] = val
def __getitem__(self, t):
return self.variables[t]
class RandomForestDeviceAssigner(object):
"""A device assigner that uses the default device.
Write subclasses that implement get_device for control over how trees
get assigned to devices. This assumes that whole trees are assigned
to a device.
"""
def __init__(self):
self.cached = None
def get_device(self, unused_tree_num):
if not self.cached:
dummy = constant_op.constant(0)
self.cached = dummy.device
return self.cached
class RandomForestGraphs(object):
"""Builds TF graphs for random forest training and inference."""
def __init__(self, params, device_assigner=None,
variables=None, tree_variables_class=TreeTrainingVariables,
tree_graphs=None, training=True,
t_ops=training_ops,
i_ops=inference_ops):
self.params = params
self.device_assigner = device_assigner or RandomForestDeviceAssigner()
logging.info('Constructing forest with params = ')
logging.info(self.params.__dict__)
self.variables = variables or ForestTrainingVariables(
self.params, device_assigner=self.device_assigner, training=training,
tree_variables_class=tree_variables_class)
tree_graph_class = tree_graphs or RandomTreeGraphs
self.trees = [
tree_graph_class(
self.variables[i], self.params,
t_ops.Load(), i_ops.Load(), i)
for i in range(self.params.num_trees)]
def _bag_features(self, tree_num, input_data):
split_data = array_ops.split(1, self.params.num_features, input_data)
return array_ops.concat(
1, [split_data[ind] for ind in self.params.bagged_features[tree_num]])
def training_graph(self, input_data, input_labels, data_spec=None,
epoch=None, **tree_kwargs):
"""Constructs a TF graph for training a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
epoch: A tensor or placeholder for the epoch the training data comes from.
**tree_kwargs: Keyword arguments passed to each tree's training_graph.
Returns:
The last op in the random forest training graph.
"""
data_spec = ([constants.DATA_FLOAT] * self.params.num_features
if data_spec is None else data_spec)
tree_graphs = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
seed = self.params.base_random_seed
if seed != 0:
seed += i
# If using bagging, randomly select some of the input.
tree_data = input_data
tree_labels = input_labels
if self.params.bagging_fraction < 1.0:
# TODO(thomaswc): This does sampling without replacment. Consider
# also allowing sampling with replacement as an option.
batch_size = array_ops.slice(array_ops.shape(input_data), [0], [1])
r = random_ops.random_uniform(batch_size, seed=seed)
mask = math_ops.less(
r, array_ops.ones_like(r) * self.params.bagging_fraction)
gather_indices = array_ops.squeeze(
array_ops.where(mask), squeeze_dims=[1])
# TODO(thomaswc): Calculate out-of-bag data and labels, and store
# them for use in calculating statistics later.
tree_data = array_ops.gather(input_data, gather_indices)
tree_labels = array_ops.gather(input_labels, gather_indices)
if self.params.bagged_features:
tree_data = self._bag_features(i, tree_data)
initialization = self.trees[i].tree_initialization()
with ops.control_dependencies([initialization]):
tree_graphs.append(
self.trees[i].training_graph(
tree_data, tree_labels, seed, data_spec=data_spec,
epoch=([0] if epoch is None else epoch),
**tree_kwargs))
return control_flow_ops.group(*tree_graphs)
def inference_graph(self, input_data, data_spec=None):
"""Constructs a TF graph for evaluating a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random forest inference graph.
"""
data_spec = ([constants.DATA_FLOAT] * self.params.num_features
if data_spec is None else data_spec)
probabilities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
tree_data = input_data
if self.params.bagged_features:
tree_data = self._bag_features(i, input_data)
probabilities.append(self.trees[i].inference_graph(tree_data,
data_spec))
with ops.device(self.device_assigner.get_device(0)):
all_predict = array_ops.pack(probabilities)
return math_ops.reduce_sum(all_predict, 0) / self.params.num_trees
def average_size(self):
"""Constructs a TF graph for evaluating the average size of a forest.
Returns:
The average number of nodes over the trees.
"""
sizes = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
sizes.append(self.trees[i].size())
return math_ops.reduce_mean(array_ops.pack(sizes))
def training_loss(self):
return math_ops.neg(self.average_size())
# pylint: disable=unused-argument
def validation_loss(self, features, labels):
return math_ops.neg(self.average_size())
def average_impurity(self):
"""Constructs a TF graph for evaluating the leaf impurity of a forest.
Returns:
The last op in the graph.
"""
impurities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
impurities.append(self.trees[i].average_impurity())
return math_ops.reduce_mean(array_ops.pack(impurities))
def get_stats(self, session):
tree_stats = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
tree_stats.append(self.trees[i].get_stats(session))
return ForestStats(tree_stats, self.params)
class RandomTreeGraphs(object):
"""Builds TF graphs for random tree training and inference."""
def __init__(self, variables, params, t_ops, i_ops, tree_num):
self.training_ops = t_ops
self.inference_ops = i_ops
self.variables = variables
self.params = params
self.tree_num = tree_num
def tree_initialization(self):
def _init_tree():
return state_ops.scatter_update(self.variables.tree, [0], [[-1, -1]]).op
def _nothing():
return control_flow_ops.no_op()
return control_flow_ops.cond(
math_ops.equal(array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [1, 1])), -2),
_init_tree, _nothing)
def _gini(self, class_counts):
"""Calculate the Gini impurity.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = 1 - sum_i ( c(i) / c )^2
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return 1.0 - sum_squares / (sums * sums)
def _weighted_gini(self, class_counts):
"""Our split score is the Gini impurity times the number of examples.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = c * (1 - sum_i ( c(i) / c )^2 )
= c - sum_i c(i)^2 / c
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return sums - sum_squares / sums
def _variance(self, sums, squares):
"""Calculate the variance for each row of the input tensors.
Variance is V = E[x^2] - (E[x])^2.
Args:
sums: A tensor containing output sums, usually a slice from
variables.node_sums. Should contain the number of examples seen
in index 0 so we can calculate expected value.
squares: Same as sums, but sums of squares.
Returns:
A 1-D tensor of the variances for each row in the input.
"""
total_count = array_ops.slice(sums, [0, 0], [-1, 1])
e_x = sums / total_count
e_x2 = squares / total_count
return math_ops.reduce_sum(e_x2 - math_ops.square(e_x), 1)
def training_graph(self, input_data, input_labels, random_seed,
data_spec, epoch=None):
"""Constructs a TF graph for training a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
random_seed: The random number generator seed to use for this tree. 0
means use the current time as the seed.
data_spec: A list of tf.dtype values specifying the original types of
each column.
epoch: A tensor or placeholder for the epoch the training data comes from.
Returns:
The last op in the random tree training graph.
"""
epoch = [0] if epoch is None else epoch
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
input_data = []
# Count extremely random stats.
(node_sums, node_squares, splits_indices, splits_sums,
splits_squares, totals_indices, totals_sums,
totals_squares, input_leaves) = (
self.training_ops.count_extremely_random_stats(
input_data, sparse_indices, sparse_values, sparse_shape,
data_spec, input_labels, self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_to_accumulator_map,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
self.variables.start_epoch, epoch,
num_classes=self.params.num_output_columns,
regression=self.params.regression))
node_update_ops = []
node_update_ops.append(
state_ops.assign_add(self.variables.node_sums, node_sums))
splits_update_ops = []
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.candidate_split_sums,
splits_indices, splits_sums))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.accumulator_sums, totals_indices,
totals_sums))
if self.params.regression:
node_update_ops.append(state_ops.assign_add(self.variables.node_squares,
node_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.candidate_split_squares,
splits_indices, splits_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.accumulator_squares, totals_indices,
totals_squares))
# Sample inputs.
update_indices, feature_updates, threshold_updates = (
self.training_ops.sample_inputs(
input_data, sparse_indices, sparse_values, sparse_shape,
self.variables.node_to_accumulator_map,
input_leaves, self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
split_initializations_per_input=(
self.params.split_initializations_per_input),
split_sampling_random_seed=random_seed))
update_features_op = state_ops.scatter_update(
self.variables.candidate_split_features, update_indices,
feature_updates)
update_thresholds_op = state_ops.scatter_update(
self.variables.candidate_split_thresholds, update_indices,
threshold_updates)
# Calculate finished nodes.
with ops.control_dependencies(splits_update_ops):
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
finished, stale = self.training_ops.finished_nodes(
leaves, self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
self.variables.start_epoch, epoch,
num_split_after_samples=self.params.split_after_samples,
min_split_samples=self.params.min_split_samples)
# Update leaf scores.
non_fertile_leaves = array_ops.boolean_mask(
leaves, math_ops.less(array_ops.gather(
self.variables.node_to_accumulator_map, leaves), 0))
# TODO(gilberth): It should be possible to limit the number of non
# fertile leaves we calculate scores for, especially since we can only take
# at most array_ops.shape(finished)[0] of them.
with ops.control_dependencies(node_update_ops):
sums = array_ops.gather(self.variables.node_sums, non_fertile_leaves)
if self.params.regression:
squares = array_ops.gather(self.variables.node_squares,
non_fertile_leaves)
non_fertile_leaf_scores = self._variance(sums, squares)
else:
non_fertile_leaf_scores = self._weighted_gini(sums)
# Calculate best splits.
with ops.control_dependencies(splits_update_ops):
split_indices = self.training_ops.best_splits(
finished, self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
regression=self.params.regression)
# Grow tree.
with ops.control_dependencies([update_features_op, update_thresholds_op]):
(tree_update_indices, tree_children_updates,
tree_threshold_updates, tree_depth_updates, new_eot) = (
self.training_ops.grow_tree(
self.variables.end_of_tree, self.variables.tree_depths,
self.variables.node_to_accumulator_map, finished, split_indices,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds))
tree_update_op = state_ops.scatter_update(
self.variables.tree, tree_update_indices, tree_children_updates)
thresholds_update_op = state_ops.scatter_update(
self.variables.tree_thresholds, tree_update_indices,
tree_threshold_updates)
depth_update_op = state_ops.scatter_update(
self.variables.tree_depths, tree_update_indices, tree_depth_updates)
# TODO(thomaswc): Only update the epoch on the new leaves.
new_epoch_updates = epoch * array_ops.ones_like(tree_depth_updates)
epoch_update_op = state_ops.scatter_update(
self.variables.start_epoch, tree_update_indices,
new_epoch_updates)
# Update fertile slots.
with ops.control_dependencies([depth_update_op]):
(node_map_updates, accumulators_cleared, accumulators_allocated) = (
self.training_ops.update_fertile_slots(
finished, non_fertile_leaves,
non_fertile_leaf_scores,
self.variables.end_of_tree, self.variables.tree_depths,
self.variables.accumulator_sums,
self.variables.node_to_accumulator_map,
stale,
max_depth=self.params.max_depth,
regression=self.params.regression))
# Ensure end_of_tree doesn't get updated until UpdateFertileSlots has
# used it to calculate new leaves.
gated_new_eot, = control_flow_ops.tuple([new_eot],
control_inputs=[node_map_updates])
eot_update_op = state_ops.assign(self.variables.end_of_tree, gated_new_eot)
updates = []
updates.append(eot_update_op)
updates.append(tree_update_op)
updates.append(thresholds_update_op)
updates.append(epoch_update_op)
updates.append(state_ops.scatter_update(
self.variables.node_to_accumulator_map,
array_ops.squeeze(array_ops.slice(node_map_updates, [0, 0], [1, -1]),
squeeze_dims=[0]),
array_ops.squeeze(array_ops.slice(node_map_updates, [1, 0], [1, -1]),
squeeze_dims=[0])))
cleared_and_allocated_accumulators = array_ops.concat(
0, [accumulators_cleared, accumulators_allocated])
# Calculate values to put into scatter update for candidate counts.
# Candidate split counts are always reset back to 0 for both cleared
# and allocated accumulators. This means some accumulators might be doubly
# reset to 0 if the were released and not allocated, then later allocated.
split_values = array_ops.tile(
array_ops.expand_dims(array_ops.expand_dims(
array_ops.zeros_like(cleared_and_allocated_accumulators,
dtype=dtypes.float32), 1), 2),
[1, self.params.num_splits_to_consider, self.params.num_output_columns])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_sums,
cleared_and_allocated_accumulators, split_values))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.candidate_split_squares,
cleared_and_allocated_accumulators, split_values))
# Calculate values to put into scatter update for total counts.
total_cleared = array_ops.tile(
array_ops.expand_dims(
math_ops.neg(array_ops.ones_like(accumulators_cleared,
dtype=dtypes.float32)), 1),
[1, self.params.num_output_columns])
total_reset = array_ops.tile(
array_ops.expand_dims(
array_ops.zeros_like(accumulators_allocated,
dtype=dtypes.float32), 1),
[1, self.params.num_output_columns])
accumulator_updates = array_ops.concat(0, [total_cleared, total_reset])
updates.append(state_ops.scatter_update(
self.variables.accumulator_sums,
cleared_and_allocated_accumulators, accumulator_updates))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.accumulator_squares,
cleared_and_allocated_accumulators, accumulator_updates))
# Calculate values to put into scatter update for candidate splits.
split_features_updates = array_ops.tile(
array_ops.expand_dims(
math_ops.neg(array_ops.ones_like(
cleared_and_allocated_accumulators)), 1),
[1, self.params.num_splits_to_consider])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_features,
cleared_and_allocated_accumulators, split_features_updates))
updates += self.finish_iteration()
return control_flow_ops.group(*updates)
def finish_iteration(self):
"""Perform any operations that should be done at the end of an iteration.
This is mostly useful for subclasses that need to reset variables after
an iteration, such as ones that are used to finish nodes.
Returns:
A list of operations.
"""
return []
def inference_graph(self, input_data, data_spec):
"""Constructs a TF graph for evaluating a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random tree inference graph.
"""
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
input_data = []
return self.inference_ops.tree_predictions(
input_data, sparse_indices, sparse_values, sparse_shape, data_spec,
self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_sums,
valid_leaf_threshold=self.params.valid_leaf_threshold)
def average_impurity(self):
"""Constructs a TF graph for evaluating the average leaf impurity of a tree.
If in regression mode, this is the leaf variance. If in classification mode,
this is the gini impurity.
Returns:
The last op in the graph.
"""
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
counts = array_ops.gather(self.variables.node_sums, leaves)
gini = self._weighted_gini(counts)
# Guard against step 1, when there often are no leaves yet.
def impurity():
return gini
# Since average impurity can be used for loss, when there's no data just
# return a big number so that loss always decreases.
def big():
return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000.
return control_flow_ops.cond(math_ops.greater(
array_ops.shape(leaves)[0], 0), impurity, big)
def size(self):
"""Constructs a TF graph for evaluating the current number of nodes.
Returns:
The current number of nodes in the tree.
"""
return self.variables.end_of_tree - 1
def get_stats(self, session):
num_nodes = self.variables.end_of_tree.eval(session=session) - 1
num_leaves = array_ops.where(
math_ops.equal(array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1])), constants.LEAF_NODE)
).eval(session=session).shape[0]
return TreeStats(num_nodes, num_leaves)
|
average_impurity
|
Constructs a TF graph for evaluating the average leaf impurity of a tree.
If in regression mode, this is the leaf variance. If in classification mode,
this is the gini impurity.
Returns:
The last op in the graph.
|
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extremely random forest graph builder. go/brain-tree."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
from tensorflow.contrib.tensor_forest.python import constants
from tensorflow.contrib.tensor_forest.python.ops import inference_ops
from tensorflow.contrib.tensor_forest.python.ops import training_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
# A convenience class for holding random forest hyperparameters.
#
# To just get some good default parameters, use:
# hparams = ForestHParams(num_classes=2, num_features=40).fill()
#
# Note that num_classes can not be inferred and so must always be specified.
# Also, either num_splits_to_consider or num_features should be set.
#
# To override specific values, pass them to the constructor:
# hparams = ForestHParams(num_classes=5, num_trees=10, num_features=5).fill()
#
# TODO(thomaswc): Inherit from tf.HParams when that is publicly available.
class ForestHParams(object):
"""A base class for holding hyperparameters and calculating good defaults."""
def __init__(self, num_trees=100, max_nodes=10000, bagging_fraction=1.0,
max_depth=0, num_splits_to_consider=0,
feature_bagging_fraction=1.0,
max_fertile_nodes=0, split_after_samples=250,
min_split_samples=5,
valid_leaf_threshold=1, **kwargs):
self.num_trees = num_trees
self.max_nodes = max_nodes
self.bagging_fraction = bagging_fraction
self.feature_bagging_fraction = feature_bagging_fraction
self.max_depth = max_depth
self.num_splits_to_consider = num_splits_to_consider
self.max_fertile_nodes = max_fertile_nodes
self.split_after_samples = split_after_samples
self.min_split_samples = min_split_samples
self.valid_leaf_threshold = valid_leaf_threshold
for name, value in kwargs.items():
setattr(self, name, value)
def values(self):
return self.__dict__
def fill(self):
"""Intelligently sets any non-specific parameters."""
# Fail fast if num_classes or num_features isn't set.
_ = getattr(self, 'num_classes')
_ = getattr(self, 'num_features')
self.bagged_num_features = int(self.feature_bagging_fraction *
self.num_features)
self.bagged_features = None
if self.feature_bagging_fraction < 1.0:
self.bagged_features = [random.sample(
range(self.num_features),
self.bagged_num_features) for _ in range(self.num_trees)]
self.regression = getattr(self, 'regression', False)
# Num_outputs is the actual number of outputs (a single prediction for
# classification, a N-dimenensional point for regression).
self.num_outputs = self.num_classes if self.regression else 1
# Add an extra column to classes for storing counts, which is needed for
# regression and avoids having to recompute sums for classification.
self.num_output_columns = self.num_classes + 1
# Allow each tree to be unbalanced by up to a factor of 2.
self.max_depth = (self.max_depth or
int(2 * math.ceil(math.log(self.max_nodes, 2))))
# The Random Forest literature recommends sqrt(# features) for
# classification problems, and p/3 for regression problems.
# TODO(thomaswc): Consider capping this for large number of features.
self.num_splits_to_consider = (
self.num_splits_to_consider or
max(10, int(math.ceil(math.sqrt(self.num_features)))))
# max_fertile_nodes doesn't effect performance, only training speed.
# We therefore set it primarily based upon space considerations.
# Each fertile node takes up num_splits_to_consider times as much
# as space as a non-fertile node. We want the fertile nodes to in
# total only take up as much space as the non-fertile nodes, so
num_fertile = int(math.ceil(self.max_nodes / self.num_splits_to_consider))
# But always use at least 1000 accumulate slots.
num_fertile = max(num_fertile, 1000)
self.max_fertile_nodes = self.max_fertile_nodes or num_fertile
# But it also never needs to be larger than the number of leaves,
# which is max_nodes / 2.
self.max_fertile_nodes = min(self.max_fertile_nodes,
int(math.ceil(self.max_nodes / 2.0)))
# We have num_splits_to_consider slots to fill, and we want to spend
# approximately split_after_samples samples initializing them.
num_split_initializiations_per_input = max(1, int(math.floor(
self.num_splits_to_consider / self.split_after_samples)))
self.split_initializations_per_input = getattr(
self, 'split_initializations_per_input',
num_split_initializiations_per_input)
# If base_random_seed is 0, the current time will be used to seed the
# random number generators for each tree. If non-zero, the i-th tree
# will be seeded with base_random_seed + i.
self.base_random_seed = getattr(self, 'base_random_seed', 0)
return self
# A simple container to hold the training variables for a single tree.
class TreeTrainingVariables(object):
"""Stores tf.Variables for training a single random tree.
Uses tf.get_variable to get tree-specific names so that this can be used
with a tf.learn-style implementation (one that trains a model, saves it,
then relies on restoring that model to evaluate).
"""
def __init__(self, params, tree_num, training):
self.tree = variable_scope.get_variable(
name=self.get_tree_name('tree', tree_num), dtype=dtypes.int32,
shape=[params.max_nodes, 2],
initializer=init_ops.constant_initializer(-2))
self.tree_thresholds = variable_scope.get_variable(
name=self.get_tree_name('tree_thresholds', tree_num),
shape=[params.max_nodes],
initializer=init_ops.constant_initializer(-1.0))
self.tree_depths = variable_scope.get_variable(
name=self.get_tree_name('tree_depths', tree_num),
shape=[params.max_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(1))
self.end_of_tree = variable_scope.get_variable(
name=self.get_tree_name('end_of_tree', tree_num),
dtype=dtypes.int32,
initializer=constant_op.constant([1]))
self.start_epoch = tf_variables.Variable(
[0] * (params.max_nodes), name='start_epoch')
if training:
self.node_to_accumulator_map = variable_scope.get_variable(
name=self.get_tree_name('node_to_accumulator_map', tree_num),
shape=[params.max_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_features = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_features', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_thresholds = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_thresholds', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
initializer=init_ops.constant_initializer(0.0))
# Statistics shared by classification and regression.
self.node_sums = variable_scope.get_variable(
name=self.get_tree_name('node_sums', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
if training:
self.candidate_split_sums = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_sums = variable_scope.get_variable(
name=self.get_tree_name('accumulator_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
# Regression also tracks second order stats.
if params.regression:
self.node_squares = variable_scope.get_variable(
name=self.get_tree_name('node_squares', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.candidate_split_squares = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_squares = variable_scope.get_variable(
name=self.get_tree_name('accumulator_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
else:
self.node_squares = constant_op.constant(
0.0, name=self.get_tree_name('node_squares', tree_num))
self.candidate_split_squares = constant_op.constant(
0.0, name=self.get_tree_name('candidate_split_squares', tree_num))
self.accumulator_squares = constant_op.constant(
0.0, name=self.get_tree_name('accumulator_squares', tree_num))
def get_tree_name(self, name, num):
return '{0}-{1}'.format(name, num)
class ForestStats(object):
def __init__(self, tree_stats, params):
"""A simple container for stats about a forest."""
self.tree_stats = tree_stats
self.params = params
def get_average(self, thing):
val = 0.0
for i in range(self.params.num_trees):
val += getattr(self.tree_stats[i], thing)
return val / self.params.num_trees
class TreeStats(object):
def __init__(self, num_nodes, num_leaves):
self.num_nodes = num_nodes
self.num_leaves = num_leaves
class ForestTrainingVariables(object):
"""A container for a forests training data, consisting of multiple trees.
Instantiates a TreeTrainingVariables object for each tree. We override the
__getitem__ and __setitem__ function so that usage looks like this:
forest_variables = ForestTrainingVariables(params)
... forest_variables.tree ...
"""
def __init__(self, params, device_assigner, training=True,
tree_variables_class=TreeTrainingVariables):
self.variables = []
for i in range(params.num_trees):
with ops.device(device_assigner.get_device(i)):
self.variables.append(tree_variables_class(params, i, training))
def __setitem__(self, t, val):
self.variables[t] = val
def __getitem__(self, t):
return self.variables[t]
class RandomForestDeviceAssigner(object):
"""A device assigner that uses the default device.
Write subclasses that implement get_device for control over how trees
get assigned to devices. This assumes that whole trees are assigned
to a device.
"""
def __init__(self):
self.cached = None
def get_device(self, unused_tree_num):
if not self.cached:
dummy = constant_op.constant(0)
self.cached = dummy.device
return self.cached
class RandomForestGraphs(object):
"""Builds TF graphs for random forest training and inference."""
def __init__(self, params, device_assigner=None,
variables=None, tree_variables_class=TreeTrainingVariables,
tree_graphs=None, training=True,
t_ops=training_ops,
i_ops=inference_ops):
self.params = params
self.device_assigner = device_assigner or RandomForestDeviceAssigner()
logging.info('Constructing forest with params = ')
logging.info(self.params.__dict__)
self.variables = variables or ForestTrainingVariables(
self.params, device_assigner=self.device_assigner, training=training,
tree_variables_class=tree_variables_class)
tree_graph_class = tree_graphs or RandomTreeGraphs
self.trees = [
tree_graph_class(
self.variables[i], self.params,
t_ops.Load(), i_ops.Load(), i)
for i in range(self.params.num_trees)]
def _bag_features(self, tree_num, input_data):
split_data = array_ops.split(1, self.params.num_features, input_data)
return array_ops.concat(
1, [split_data[ind] for ind in self.params.bagged_features[tree_num]])
def training_graph(self, input_data, input_labels, data_spec=None,
epoch=None, **tree_kwargs):
"""Constructs a TF graph for training a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
epoch: A tensor or placeholder for the epoch the training data comes from.
**tree_kwargs: Keyword arguments passed to each tree's training_graph.
Returns:
The last op in the random forest training graph.
"""
data_spec = ([constants.DATA_FLOAT] * self.params.num_features
if data_spec is None else data_spec)
tree_graphs = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
seed = self.params.base_random_seed
if seed != 0:
seed += i
# If using bagging, randomly select some of the input.
tree_data = input_data
tree_labels = input_labels
if self.params.bagging_fraction < 1.0:
# TODO(thomaswc): This does sampling without replacment. Consider
# also allowing sampling with replacement as an option.
batch_size = array_ops.slice(array_ops.shape(input_data), [0], [1])
r = random_ops.random_uniform(batch_size, seed=seed)
mask = math_ops.less(
r, array_ops.ones_like(r) * self.params.bagging_fraction)
gather_indices = array_ops.squeeze(
array_ops.where(mask), squeeze_dims=[1])
# TODO(thomaswc): Calculate out-of-bag data and labels, and store
# them for use in calculating statistics later.
tree_data = array_ops.gather(input_data, gather_indices)
tree_labels = array_ops.gather(input_labels, gather_indices)
if self.params.bagged_features:
tree_data = self._bag_features(i, tree_data)
initialization = self.trees[i].tree_initialization()
with ops.control_dependencies([initialization]):
tree_graphs.append(
self.trees[i].training_graph(
tree_data, tree_labels, seed, data_spec=data_spec,
epoch=([0] if epoch is None else epoch),
**tree_kwargs))
return control_flow_ops.group(*tree_graphs)
def inference_graph(self, input_data, data_spec=None):
"""Constructs a TF graph for evaluating a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random forest inference graph.
"""
data_spec = ([constants.DATA_FLOAT] * self.params.num_features
if data_spec is None else data_spec)
probabilities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
tree_data = input_data
if self.params.bagged_features:
tree_data = self._bag_features(i, input_data)
probabilities.append(self.trees[i].inference_graph(tree_data,
data_spec))
with ops.device(self.device_assigner.get_device(0)):
all_predict = array_ops.pack(probabilities)
return math_ops.reduce_sum(all_predict, 0) / self.params.num_trees
def average_size(self):
"""Constructs a TF graph for evaluating the average size of a forest.
Returns:
The average number of nodes over the trees.
"""
sizes = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
sizes.append(self.trees[i].size())
return math_ops.reduce_mean(array_ops.pack(sizes))
def training_loss(self):
return math_ops.neg(self.average_size())
# pylint: disable=unused-argument
def validation_loss(self, features, labels):
return math_ops.neg(self.average_size())
def average_impurity(self):
"""Constructs a TF graph for evaluating the leaf impurity of a forest.
Returns:
The last op in the graph.
"""
impurities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
impurities.append(self.trees[i].average_impurity())
return math_ops.reduce_mean(array_ops.pack(impurities))
def get_stats(self, session):
tree_stats = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
tree_stats.append(self.trees[i].get_stats(session))
return ForestStats(tree_stats, self.params)
class RandomTreeGraphs(object):
"""Builds TF graphs for random tree training and inference."""
def __init__(self, variables, params, t_ops, i_ops, tree_num):
self.training_ops = t_ops
self.inference_ops = i_ops
self.variables = variables
self.params = params
self.tree_num = tree_num
def tree_initialization(self):
def _init_tree():
return state_ops.scatter_update(self.variables.tree, [0], [[-1, -1]]).op
def _nothing():
return control_flow_ops.no_op()
return control_flow_ops.cond(
math_ops.equal(array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [1, 1])), -2),
_init_tree, _nothing)
def _gini(self, class_counts):
"""Calculate the Gini impurity.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = 1 - sum_i ( c(i) / c )^2
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return 1.0 - sum_squares / (sums * sums)
def _weighted_gini(self, class_counts):
"""Our split score is the Gini impurity times the number of examples.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = c * (1 - sum_i ( c(i) / c )^2 )
= c - sum_i c(i)^2 / c
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return sums - sum_squares / sums
def _variance(self, sums, squares):
"""Calculate the variance for each row of the input tensors.
Variance is V = E[x^2] - (E[x])^2.
Args:
sums: A tensor containing output sums, usually a slice from
variables.node_sums. Should contain the number of examples seen
in index 0 so we can calculate expected value.
squares: Same as sums, but sums of squares.
Returns:
A 1-D tensor of the variances for each row in the input.
"""
total_count = array_ops.slice(sums, [0, 0], [-1, 1])
e_x = sums / total_count
e_x2 = squares / total_count
return math_ops.reduce_sum(e_x2 - math_ops.square(e_x), 1)
def training_graph(self, input_data, input_labels, random_seed,
data_spec, epoch=None):
"""Constructs a TF graph for training a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
random_seed: The random number generator seed to use for this tree. 0
means use the current time as the seed.
data_spec: A list of tf.dtype values specifying the original types of
each column.
epoch: A tensor or placeholder for the epoch the training data comes from.
Returns:
The last op in the random tree training graph.
"""
epoch = [0] if epoch is None else epoch
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
input_data = []
# Count extremely random stats.
(node_sums, node_squares, splits_indices, splits_sums,
splits_squares, totals_indices, totals_sums,
totals_squares, input_leaves) = (
self.training_ops.count_extremely_random_stats(
input_data, sparse_indices, sparse_values, sparse_shape,
data_spec, input_labels, self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_to_accumulator_map,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
self.variables.start_epoch, epoch,
num_classes=self.params.num_output_columns,
regression=self.params.regression))
node_update_ops = []
node_update_ops.append(
state_ops.assign_add(self.variables.node_sums, node_sums))
splits_update_ops = []
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.candidate_split_sums,
splits_indices, splits_sums))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.accumulator_sums, totals_indices,
totals_sums))
if self.params.regression:
node_update_ops.append(state_ops.assign_add(self.variables.node_squares,
node_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.candidate_split_squares,
splits_indices, splits_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.accumulator_squares, totals_indices,
totals_squares))
# Sample inputs.
update_indices, feature_updates, threshold_updates = (
self.training_ops.sample_inputs(
input_data, sparse_indices, sparse_values, sparse_shape,
self.variables.node_to_accumulator_map,
input_leaves, self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
split_initializations_per_input=(
self.params.split_initializations_per_input),
split_sampling_random_seed=random_seed))
update_features_op = state_ops.scatter_update(
self.variables.candidate_split_features, update_indices,
feature_updates)
update_thresholds_op = state_ops.scatter_update(
self.variables.candidate_split_thresholds, update_indices,
threshold_updates)
# Calculate finished nodes.
with ops.control_dependencies(splits_update_ops):
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
finished, stale = self.training_ops.finished_nodes(
leaves, self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
self.variables.start_epoch, epoch,
num_split_after_samples=self.params.split_after_samples,
min_split_samples=self.params.min_split_samples)
# Update leaf scores.
non_fertile_leaves = array_ops.boolean_mask(
leaves, math_ops.less(array_ops.gather(
self.variables.node_to_accumulator_map, leaves), 0))
# TODO(gilberth): It should be possible to limit the number of non
# fertile leaves we calculate scores for, especially since we can only take
# at most array_ops.shape(finished)[0] of them.
with ops.control_dependencies(node_update_ops):
sums = array_ops.gather(self.variables.node_sums, non_fertile_leaves)
if self.params.regression:
squares = array_ops.gather(self.variables.node_squares,
non_fertile_leaves)
non_fertile_leaf_scores = self._variance(sums, squares)
else:
non_fertile_leaf_scores = self._weighted_gini(sums)
# Calculate best splits.
with ops.control_dependencies(splits_update_ops):
split_indices = self.training_ops.best_splits(
finished, self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
regression=self.params.regression)
# Grow tree.
with ops.control_dependencies([update_features_op, update_thresholds_op]):
(tree_update_indices, tree_children_updates,
tree_threshold_updates, tree_depth_updates, new_eot) = (
self.training_ops.grow_tree(
self.variables.end_of_tree, self.variables.tree_depths,
self.variables.node_to_accumulator_map, finished, split_indices,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds))
tree_update_op = state_ops.scatter_update(
self.variables.tree, tree_update_indices, tree_children_updates)
thresholds_update_op = state_ops.scatter_update(
self.variables.tree_thresholds, tree_update_indices,
tree_threshold_updates)
depth_update_op = state_ops.scatter_update(
self.variables.tree_depths, tree_update_indices, tree_depth_updates)
# TODO(thomaswc): Only update the epoch on the new leaves.
new_epoch_updates = epoch * array_ops.ones_like(tree_depth_updates)
epoch_update_op = state_ops.scatter_update(
self.variables.start_epoch, tree_update_indices,
new_epoch_updates)
# Update fertile slots.
with ops.control_dependencies([depth_update_op]):
(node_map_updates, accumulators_cleared, accumulators_allocated) = (
self.training_ops.update_fertile_slots(
finished, non_fertile_leaves,
non_fertile_leaf_scores,
self.variables.end_of_tree, self.variables.tree_depths,
self.variables.accumulator_sums,
self.variables.node_to_accumulator_map,
stale,
max_depth=self.params.max_depth,
regression=self.params.regression))
# Ensure end_of_tree doesn't get updated until UpdateFertileSlots has
# used it to calculate new leaves.
gated_new_eot, = control_flow_ops.tuple([new_eot],
control_inputs=[node_map_updates])
eot_update_op = state_ops.assign(self.variables.end_of_tree, gated_new_eot)
updates = []
updates.append(eot_update_op)
updates.append(tree_update_op)
updates.append(thresholds_update_op)
updates.append(epoch_update_op)
updates.append(state_ops.scatter_update(
self.variables.node_to_accumulator_map,
array_ops.squeeze(array_ops.slice(node_map_updates, [0, 0], [1, -1]),
squeeze_dims=[0]),
array_ops.squeeze(array_ops.slice(node_map_updates, [1, 0], [1, -1]),
squeeze_dims=[0])))
cleared_and_allocated_accumulators = array_ops.concat(
0, [accumulators_cleared, accumulators_allocated])
# Calculate values to put into scatter update for candidate counts.
# Candidate split counts are always reset back to 0 for both cleared
# and allocated accumulators. This means some accumulators might be doubly
# reset to 0 if the were released and not allocated, then later allocated.
split_values = array_ops.tile(
array_ops.expand_dims(array_ops.expand_dims(
array_ops.zeros_like(cleared_and_allocated_accumulators,
dtype=dtypes.float32), 1), 2),
[1, self.params.num_splits_to_consider, self.params.num_output_columns])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_sums,
cleared_and_allocated_accumulators, split_values))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.candidate_split_squares,
cleared_and_allocated_accumulators, split_values))
# Calculate values to put into scatter update for total counts.
total_cleared = array_ops.tile(
array_ops.expand_dims(
math_ops.neg(array_ops.ones_like(accumulators_cleared,
dtype=dtypes.float32)), 1),
[1, self.params.num_output_columns])
total_reset = array_ops.tile(
array_ops.expand_dims(
array_ops.zeros_like(accumulators_allocated,
dtype=dtypes.float32), 1),
[1, self.params.num_output_columns])
accumulator_updates = array_ops.concat(0, [total_cleared, total_reset])
updates.append(state_ops.scatter_update(
self.variables.accumulator_sums,
cleared_and_allocated_accumulators, accumulator_updates))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.accumulator_squares,
cleared_and_allocated_accumulators, accumulator_updates))
# Calculate values to put into scatter update for candidate splits.
split_features_updates = array_ops.tile(
array_ops.expand_dims(
math_ops.neg(array_ops.ones_like(
cleared_and_allocated_accumulators)), 1),
[1, self.params.num_splits_to_consider])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_features,
cleared_and_allocated_accumulators, split_features_updates))
updates += self.finish_iteration()
return control_flow_ops.group(*updates)
def finish_iteration(self):
"""Perform any operations that should be done at the end of an iteration.
This is mostly useful for subclasses that need to reset variables after
an iteration, such as ones that are used to finish nodes.
Returns:
A list of operations.
"""
return []
def inference_graph(self, input_data, data_spec):
"""Constructs a TF graph for evaluating a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random tree inference graph.
"""
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
input_data = []
return self.inference_ops.tree_predictions(
input_data, sparse_indices, sparse_values, sparse_shape, data_spec,
self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_sums,
valid_leaf_threshold=self.params.valid_leaf_threshold)
# MASKED: average_impurity function (lines 803-827)
def size(self):
"""Constructs a TF graph for evaluating the current number of nodes.
Returns:
The current number of nodes in the tree.
"""
return self.variables.end_of_tree - 1
def get_stats(self, session):
num_nodes = self.variables.end_of_tree.eval(session=session) - 1
num_leaves = array_ops.where(
math_ops.equal(array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1])), constants.LEAF_NODE)
).eval(session=session).shape[0]
return TreeStats(num_nodes, num_leaves)
|
def average_impurity(self):
"""Constructs a TF graph for evaluating the average leaf impurity of a tree.
If in regression mode, this is the leaf variance. If in classification mode,
this is the gini impurity.
Returns:
The last op in the graph.
"""
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
counts = array_ops.gather(self.variables.node_sums, leaves)
gini = self._weighted_gini(counts)
# Guard against step 1, when there often are no leaves yet.
def impurity():
return gini
# Since average impurity can be used for loss, when there's no data just
# return a big number so that loss always decreases.
def big():
return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000.
return control_flow_ops.cond(math_ops.greater(
array_ops.shape(leaves)[0], 0), impurity, big)
| 803 | 827 |
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extremely random forest graph builder. go/brain-tree."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
from tensorflow.contrib.tensor_forest.python import constants
from tensorflow.contrib.tensor_forest.python.ops import inference_ops
from tensorflow.contrib.tensor_forest.python.ops import training_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
# A convenience class for holding random forest hyperparameters.
#
# To just get some good default parameters, use:
# hparams = ForestHParams(num_classes=2, num_features=40).fill()
#
# Note that num_classes can not be inferred and so must always be specified.
# Also, either num_splits_to_consider or num_features should be set.
#
# To override specific values, pass them to the constructor:
# hparams = ForestHParams(num_classes=5, num_trees=10, num_features=5).fill()
#
# TODO(thomaswc): Inherit from tf.HParams when that is publicly available.
class ForestHParams(object):
"""A base class for holding hyperparameters and calculating good defaults."""
def __init__(self, num_trees=100, max_nodes=10000, bagging_fraction=1.0,
max_depth=0, num_splits_to_consider=0,
feature_bagging_fraction=1.0,
max_fertile_nodes=0, split_after_samples=250,
min_split_samples=5,
valid_leaf_threshold=1, **kwargs):
self.num_trees = num_trees
self.max_nodes = max_nodes
self.bagging_fraction = bagging_fraction
self.feature_bagging_fraction = feature_bagging_fraction
self.max_depth = max_depth
self.num_splits_to_consider = num_splits_to_consider
self.max_fertile_nodes = max_fertile_nodes
self.split_after_samples = split_after_samples
self.min_split_samples = min_split_samples
self.valid_leaf_threshold = valid_leaf_threshold
for name, value in kwargs.items():
setattr(self, name, value)
def values(self):
return self.__dict__
def fill(self):
"""Intelligently sets any non-specific parameters."""
# Fail fast if num_classes or num_features isn't set.
_ = getattr(self, 'num_classes')
_ = getattr(self, 'num_features')
self.bagged_num_features = int(self.feature_bagging_fraction *
self.num_features)
self.bagged_features = None
if self.feature_bagging_fraction < 1.0:
self.bagged_features = [random.sample(
range(self.num_features),
self.bagged_num_features) for _ in range(self.num_trees)]
self.regression = getattr(self, 'regression', False)
# Num_outputs is the actual number of outputs (a single prediction for
# classification, a N-dimenensional point for regression).
self.num_outputs = self.num_classes if self.regression else 1
# Add an extra column to classes for storing counts, which is needed for
# regression and avoids having to recompute sums for classification.
self.num_output_columns = self.num_classes + 1
# Allow each tree to be unbalanced by up to a factor of 2.
self.max_depth = (self.max_depth or
int(2 * math.ceil(math.log(self.max_nodes, 2))))
# The Random Forest literature recommends sqrt(# features) for
# classification problems, and p/3 for regression problems.
# TODO(thomaswc): Consider capping this for large number of features.
self.num_splits_to_consider = (
self.num_splits_to_consider or
max(10, int(math.ceil(math.sqrt(self.num_features)))))
# max_fertile_nodes doesn't effect performance, only training speed.
# We therefore set it primarily based upon space considerations.
# Each fertile node takes up num_splits_to_consider times as much
# as space as a non-fertile node. We want the fertile nodes to in
# total only take up as much space as the non-fertile nodes, so
num_fertile = int(math.ceil(self.max_nodes / self.num_splits_to_consider))
# But always use at least 1000 accumulate slots.
num_fertile = max(num_fertile, 1000)
self.max_fertile_nodes = self.max_fertile_nodes or num_fertile
# But it also never needs to be larger than the number of leaves,
# which is max_nodes / 2.
self.max_fertile_nodes = min(self.max_fertile_nodes,
int(math.ceil(self.max_nodes / 2.0)))
# We have num_splits_to_consider slots to fill, and we want to spend
# approximately split_after_samples samples initializing them.
num_split_initializiations_per_input = max(1, int(math.floor(
self.num_splits_to_consider / self.split_after_samples)))
self.split_initializations_per_input = getattr(
self, 'split_initializations_per_input',
num_split_initializiations_per_input)
# If base_random_seed is 0, the current time will be used to seed the
# random number generators for each tree. If non-zero, the i-th tree
# will be seeded with base_random_seed + i.
self.base_random_seed = getattr(self, 'base_random_seed', 0)
return self
# A simple container to hold the training variables for a single tree.
class TreeTrainingVariables(object):
"""Stores tf.Variables for training a single random tree.
Uses tf.get_variable to get tree-specific names so that this can be used
with a tf.learn-style implementation (one that trains a model, saves it,
then relies on restoring that model to evaluate).
"""
def __init__(self, params, tree_num, training):
self.tree = variable_scope.get_variable(
name=self.get_tree_name('tree', tree_num), dtype=dtypes.int32,
shape=[params.max_nodes, 2],
initializer=init_ops.constant_initializer(-2))
self.tree_thresholds = variable_scope.get_variable(
name=self.get_tree_name('tree_thresholds', tree_num),
shape=[params.max_nodes],
initializer=init_ops.constant_initializer(-1.0))
self.tree_depths = variable_scope.get_variable(
name=self.get_tree_name('tree_depths', tree_num),
shape=[params.max_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(1))
self.end_of_tree = variable_scope.get_variable(
name=self.get_tree_name('end_of_tree', tree_num),
dtype=dtypes.int32,
initializer=constant_op.constant([1]))
self.start_epoch = tf_variables.Variable(
[0] * (params.max_nodes), name='start_epoch')
if training:
self.node_to_accumulator_map = variable_scope.get_variable(
name=self.get_tree_name('node_to_accumulator_map', tree_num),
shape=[params.max_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_features = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_features', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_thresholds = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_thresholds', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
initializer=init_ops.constant_initializer(0.0))
# Statistics shared by classification and regression.
self.node_sums = variable_scope.get_variable(
name=self.get_tree_name('node_sums', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
if training:
self.candidate_split_sums = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_sums = variable_scope.get_variable(
name=self.get_tree_name('accumulator_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
# Regression also tracks second order stats.
if params.regression:
self.node_squares = variable_scope.get_variable(
name=self.get_tree_name('node_squares', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.candidate_split_squares = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_squares = variable_scope.get_variable(
name=self.get_tree_name('accumulator_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
else:
self.node_squares = constant_op.constant(
0.0, name=self.get_tree_name('node_squares', tree_num))
self.candidate_split_squares = constant_op.constant(
0.0, name=self.get_tree_name('candidate_split_squares', tree_num))
self.accumulator_squares = constant_op.constant(
0.0, name=self.get_tree_name('accumulator_squares', tree_num))
def get_tree_name(self, name, num):
return '{0}-{1}'.format(name, num)
class ForestStats(object):
def __init__(self, tree_stats, params):
"""A simple container for stats about a forest."""
self.tree_stats = tree_stats
self.params = params
def get_average(self, thing):
val = 0.0
for i in range(self.params.num_trees):
val += getattr(self.tree_stats[i], thing)
return val / self.params.num_trees
class TreeStats(object):
def __init__(self, num_nodes, num_leaves):
self.num_nodes = num_nodes
self.num_leaves = num_leaves
class ForestTrainingVariables(object):
"""A container for a forests training data, consisting of multiple trees.
Instantiates a TreeTrainingVariables object for each tree. We override the
__getitem__ and __setitem__ function so that usage looks like this:
forest_variables = ForestTrainingVariables(params)
... forest_variables.tree ...
"""
def __init__(self, params, device_assigner, training=True,
tree_variables_class=TreeTrainingVariables):
self.variables = []
for i in range(params.num_trees):
with ops.device(device_assigner.get_device(i)):
self.variables.append(tree_variables_class(params, i, training))
def __setitem__(self, t, val):
self.variables[t] = val
def __getitem__(self, t):
return self.variables[t]
class RandomForestDeviceAssigner(object):
"""A device assigner that uses the default device.
Write subclasses that implement get_device for control over how trees
get assigned to devices. This assumes that whole trees are assigned
to a device.
"""
def __init__(self):
self.cached = None
def get_device(self, unused_tree_num):
if not self.cached:
dummy = constant_op.constant(0)
self.cached = dummy.device
return self.cached
class RandomForestGraphs(object):
"""Builds TF graphs for random forest training and inference."""
def __init__(self, params, device_assigner=None,
variables=None, tree_variables_class=TreeTrainingVariables,
tree_graphs=None, training=True,
t_ops=training_ops,
i_ops=inference_ops):
self.params = params
self.device_assigner = device_assigner or RandomForestDeviceAssigner()
logging.info('Constructing forest with params = ')
logging.info(self.params.__dict__)
self.variables = variables or ForestTrainingVariables(
self.params, device_assigner=self.device_assigner, training=training,
tree_variables_class=tree_variables_class)
tree_graph_class = tree_graphs or RandomTreeGraphs
self.trees = [
tree_graph_class(
self.variables[i], self.params,
t_ops.Load(), i_ops.Load(), i)
for i in range(self.params.num_trees)]
def _bag_features(self, tree_num, input_data):
split_data = array_ops.split(1, self.params.num_features, input_data)
return array_ops.concat(
1, [split_data[ind] for ind in self.params.bagged_features[tree_num]])
def training_graph(self, input_data, input_labels, data_spec=None,
epoch=None, **tree_kwargs):
"""Constructs a TF graph for training a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
epoch: A tensor or placeholder for the epoch the training data comes from.
**tree_kwargs: Keyword arguments passed to each tree's training_graph.
Returns:
The last op in the random forest training graph.
"""
data_spec = ([constants.DATA_FLOAT] * self.params.num_features
if data_spec is None else data_spec)
tree_graphs = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
seed = self.params.base_random_seed
if seed != 0:
seed += i
# If using bagging, randomly select some of the input.
tree_data = input_data
tree_labels = input_labels
if self.params.bagging_fraction < 1.0:
# TODO(thomaswc): This does sampling without replacment. Consider
# also allowing sampling with replacement as an option.
batch_size = array_ops.slice(array_ops.shape(input_data), [0], [1])
r = random_ops.random_uniform(batch_size, seed=seed)
mask = math_ops.less(
r, array_ops.ones_like(r) * self.params.bagging_fraction)
gather_indices = array_ops.squeeze(
array_ops.where(mask), squeeze_dims=[1])
# TODO(thomaswc): Calculate out-of-bag data and labels, and store
# them for use in calculating statistics later.
tree_data = array_ops.gather(input_data, gather_indices)
tree_labels = array_ops.gather(input_labels, gather_indices)
if self.params.bagged_features:
tree_data = self._bag_features(i, tree_data)
initialization = self.trees[i].tree_initialization()
with ops.control_dependencies([initialization]):
tree_graphs.append(
self.trees[i].training_graph(
tree_data, tree_labels, seed, data_spec=data_spec,
epoch=([0] if epoch is None else epoch),
**tree_kwargs))
return control_flow_ops.group(*tree_graphs)
def inference_graph(self, input_data, data_spec=None):
"""Constructs a TF graph for evaluating a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random forest inference graph.
"""
data_spec = ([constants.DATA_FLOAT] * self.params.num_features
if data_spec is None else data_spec)
probabilities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
tree_data = input_data
if self.params.bagged_features:
tree_data = self._bag_features(i, input_data)
probabilities.append(self.trees[i].inference_graph(tree_data,
data_spec))
with ops.device(self.device_assigner.get_device(0)):
all_predict = array_ops.pack(probabilities)
return math_ops.reduce_sum(all_predict, 0) / self.params.num_trees
def average_size(self):
"""Constructs a TF graph for evaluating the average size of a forest.
Returns:
The average number of nodes over the trees.
"""
sizes = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
sizes.append(self.trees[i].size())
return math_ops.reduce_mean(array_ops.pack(sizes))
def training_loss(self):
return math_ops.neg(self.average_size())
# pylint: disable=unused-argument
def validation_loss(self, features, labels):
return math_ops.neg(self.average_size())
def average_impurity(self):
"""Constructs a TF graph for evaluating the leaf impurity of a forest.
Returns:
The last op in the graph.
"""
impurities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
impurities.append(self.trees[i].average_impurity())
return math_ops.reduce_mean(array_ops.pack(impurities))
def get_stats(self, session):
tree_stats = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
tree_stats.append(self.trees[i].get_stats(session))
return ForestStats(tree_stats, self.params)
class RandomTreeGraphs(object):
"""Builds TF graphs for random tree training and inference."""
def __init__(self, variables, params, t_ops, i_ops, tree_num):
self.training_ops = t_ops
self.inference_ops = i_ops
self.variables = variables
self.params = params
self.tree_num = tree_num
def tree_initialization(self):
def _init_tree():
return state_ops.scatter_update(self.variables.tree, [0], [[-1, -1]]).op
def _nothing():
return control_flow_ops.no_op()
return control_flow_ops.cond(
math_ops.equal(array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [1, 1])), -2),
_init_tree, _nothing)
def _gini(self, class_counts):
"""Calculate the Gini impurity.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = 1 - sum_i ( c(i) / c )^2
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return 1.0 - sum_squares / (sums * sums)
def _weighted_gini(self, class_counts):
"""Our split score is the Gini impurity times the number of examples.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = c * (1 - sum_i ( c(i) / c )^2 )
= c - sum_i c(i)^2 / c
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return sums - sum_squares / sums
def _variance(self, sums, squares):
"""Calculate the variance for each row of the input tensors.
Variance is V = E[x^2] - (E[x])^2.
Args:
sums: A tensor containing output sums, usually a slice from
variables.node_sums. Should contain the number of examples seen
in index 0 so we can calculate expected value.
squares: Same as sums, but sums of squares.
Returns:
A 1-D tensor of the variances for each row in the input.
"""
total_count = array_ops.slice(sums, [0, 0], [-1, 1])
e_x = sums / total_count
e_x2 = squares / total_count
return math_ops.reduce_sum(e_x2 - math_ops.square(e_x), 1)
def training_graph(self, input_data, input_labels, random_seed,
data_spec, epoch=None):
"""Constructs a TF graph for training a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
random_seed: The random number generator seed to use for this tree. 0
means use the current time as the seed.
data_spec: A list of tf.dtype values specifying the original types of
each column.
epoch: A tensor or placeholder for the epoch the training data comes from.
Returns:
The last op in the random tree training graph.
"""
epoch = [0] if epoch is None else epoch
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
input_data = []
# Count extremely random stats.
(node_sums, node_squares, splits_indices, splits_sums,
splits_squares, totals_indices, totals_sums,
totals_squares, input_leaves) = (
self.training_ops.count_extremely_random_stats(
input_data, sparse_indices, sparse_values, sparse_shape,
data_spec, input_labels, self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_to_accumulator_map,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
self.variables.start_epoch, epoch,
num_classes=self.params.num_output_columns,
regression=self.params.regression))
node_update_ops = []
node_update_ops.append(
state_ops.assign_add(self.variables.node_sums, node_sums))
splits_update_ops = []
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.candidate_split_sums,
splits_indices, splits_sums))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.accumulator_sums, totals_indices,
totals_sums))
if self.params.regression:
node_update_ops.append(state_ops.assign_add(self.variables.node_squares,
node_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.candidate_split_squares,
splits_indices, splits_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.accumulator_squares, totals_indices,
totals_squares))
# Sample inputs.
update_indices, feature_updates, threshold_updates = (
self.training_ops.sample_inputs(
input_data, sparse_indices, sparse_values, sparse_shape,
self.variables.node_to_accumulator_map,
input_leaves, self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
split_initializations_per_input=(
self.params.split_initializations_per_input),
split_sampling_random_seed=random_seed))
update_features_op = state_ops.scatter_update(
self.variables.candidate_split_features, update_indices,
feature_updates)
update_thresholds_op = state_ops.scatter_update(
self.variables.candidate_split_thresholds, update_indices,
threshold_updates)
# Calculate finished nodes.
with ops.control_dependencies(splits_update_ops):
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
finished, stale = self.training_ops.finished_nodes(
leaves, self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
self.variables.start_epoch, epoch,
num_split_after_samples=self.params.split_after_samples,
min_split_samples=self.params.min_split_samples)
# Update leaf scores.
non_fertile_leaves = array_ops.boolean_mask(
leaves, math_ops.less(array_ops.gather(
self.variables.node_to_accumulator_map, leaves), 0))
# TODO(gilberth): It should be possible to limit the number of non
# fertile leaves we calculate scores for, especially since we can only take
# at most array_ops.shape(finished)[0] of them.
with ops.control_dependencies(node_update_ops):
sums = array_ops.gather(self.variables.node_sums, non_fertile_leaves)
if self.params.regression:
squares = array_ops.gather(self.variables.node_squares,
non_fertile_leaves)
non_fertile_leaf_scores = self._variance(sums, squares)
else:
non_fertile_leaf_scores = self._weighted_gini(sums)
# Calculate best splits.
with ops.control_dependencies(splits_update_ops):
split_indices = self.training_ops.best_splits(
finished, self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
regression=self.params.regression)
# Grow tree.
with ops.control_dependencies([update_features_op, update_thresholds_op]):
(tree_update_indices, tree_children_updates,
tree_threshold_updates, tree_depth_updates, new_eot) = (
self.training_ops.grow_tree(
self.variables.end_of_tree, self.variables.tree_depths,
self.variables.node_to_accumulator_map, finished, split_indices,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds))
tree_update_op = state_ops.scatter_update(
self.variables.tree, tree_update_indices, tree_children_updates)
thresholds_update_op = state_ops.scatter_update(
self.variables.tree_thresholds, tree_update_indices,
tree_threshold_updates)
depth_update_op = state_ops.scatter_update(
self.variables.tree_depths, tree_update_indices, tree_depth_updates)
# TODO(thomaswc): Only update the epoch on the new leaves.
new_epoch_updates = epoch * array_ops.ones_like(tree_depth_updates)
epoch_update_op = state_ops.scatter_update(
self.variables.start_epoch, tree_update_indices,
new_epoch_updates)
# Update fertile slots.
with ops.control_dependencies([depth_update_op]):
(node_map_updates, accumulators_cleared, accumulators_allocated) = (
self.training_ops.update_fertile_slots(
finished, non_fertile_leaves,
non_fertile_leaf_scores,
self.variables.end_of_tree, self.variables.tree_depths,
self.variables.accumulator_sums,
self.variables.node_to_accumulator_map,
stale,
max_depth=self.params.max_depth,
regression=self.params.regression))
# Ensure end_of_tree doesn't get updated until UpdateFertileSlots has
# used it to calculate new leaves.
gated_new_eot, = control_flow_ops.tuple([new_eot],
control_inputs=[node_map_updates])
eot_update_op = state_ops.assign(self.variables.end_of_tree, gated_new_eot)
updates = []
updates.append(eot_update_op)
updates.append(tree_update_op)
updates.append(thresholds_update_op)
updates.append(epoch_update_op)
updates.append(state_ops.scatter_update(
self.variables.node_to_accumulator_map,
array_ops.squeeze(array_ops.slice(node_map_updates, [0, 0], [1, -1]),
squeeze_dims=[0]),
array_ops.squeeze(array_ops.slice(node_map_updates, [1, 0], [1, -1]),
squeeze_dims=[0])))
cleared_and_allocated_accumulators = array_ops.concat(
0, [accumulators_cleared, accumulators_allocated])
# Calculate values to put into scatter update for candidate counts.
# Candidate split counts are always reset back to 0 for both cleared
# and allocated accumulators. This means some accumulators might be doubly
# reset to 0 if the were released and not allocated, then later allocated.
split_values = array_ops.tile(
array_ops.expand_dims(array_ops.expand_dims(
array_ops.zeros_like(cleared_and_allocated_accumulators,
dtype=dtypes.float32), 1), 2),
[1, self.params.num_splits_to_consider, self.params.num_output_columns])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_sums,
cleared_and_allocated_accumulators, split_values))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.candidate_split_squares,
cleared_and_allocated_accumulators, split_values))
# Calculate values to put into scatter update for total counts.
total_cleared = array_ops.tile(
array_ops.expand_dims(
math_ops.neg(array_ops.ones_like(accumulators_cleared,
dtype=dtypes.float32)), 1),
[1, self.params.num_output_columns])
total_reset = array_ops.tile(
array_ops.expand_dims(
array_ops.zeros_like(accumulators_allocated,
dtype=dtypes.float32), 1),
[1, self.params.num_output_columns])
accumulator_updates = array_ops.concat(0, [total_cleared, total_reset])
updates.append(state_ops.scatter_update(
self.variables.accumulator_sums,
cleared_and_allocated_accumulators, accumulator_updates))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.accumulator_squares,
cleared_and_allocated_accumulators, accumulator_updates))
# Calculate values to put into scatter update for candidate splits.
split_features_updates = array_ops.tile(
array_ops.expand_dims(
math_ops.neg(array_ops.ones_like(
cleared_and_allocated_accumulators)), 1),
[1, self.params.num_splits_to_consider])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_features,
cleared_and_allocated_accumulators, split_features_updates))
updates += self.finish_iteration()
return control_flow_ops.group(*updates)
def finish_iteration(self):
"""Perform any operations that should be done at the end of an iteration.
This is mostly useful for subclasses that need to reset variables after
an iteration, such as ones that are used to finish nodes.
Returns:
A list of operations.
"""
return []
def inference_graph(self, input_data, data_spec):
"""Constructs a TF graph for evaluating a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random tree inference graph.
"""
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
input_data = []
return self.inference_ops.tree_predictions(
input_data, sparse_indices, sparse_values, sparse_shape, data_spec,
self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_sums,
valid_leaf_threshold=self.params.valid_leaf_threshold)
def average_impurity(self):
"""Constructs a TF graph for evaluating the average leaf impurity of a tree.
If in regression mode, this is the leaf variance. If in classification mode,
this is the gini impurity.
Returns:
The last op in the graph.
"""
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
counts = array_ops.gather(self.variables.node_sums, leaves)
gini = self._weighted_gini(counts)
# Guard against step 1, when there often are no leaves yet.
def impurity():
return gini
# Since average impurity can be used for loss, when there's no data just
# return a big number so that loss always decreases.
def big():
return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000.
return control_flow_ops.cond(math_ops.greater(
array_ops.shape(leaves)[0], 0), impurity, big)
def size(self):
"""Constructs a TF graph for evaluating the current number of nodes.
Returns:
The current number of nodes in the tree.
"""
return self.variables.end_of_tree - 1
def get_stats(self, session):
num_nodes = self.variables.end_of_tree.eval(session=session) - 1
num_leaves = array_ops.where(
math_ops.equal(array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1])), constants.LEAF_NODE)
).eval(session=session).shape[0]
return TreeStats(num_nodes, num_leaves)
|
ncc_loss
|
Computes the normalized cross-correlation (NCC) loss.
Currently, only 4-D inputs are supported.
Parameters
----------
static : tf.Tensor, shape (N, H, W, C)
The static image to which the moving image is aligned.
moving : tf.Tensor, shape (N, H, W, C)
The moving image, the same shape as the static image.
Returns
-------
loss : tf.Tensor, shape ()
Normalized cross-correlation loss between the static and the
moving images, averaged over the batch. Range is [-1.0, 1.0].
The best value is -1 (perfect match) and the worst is 1.
References
----------
.. [1] `Wikipedia entry for the Cross-correlation
<https://en.wikipedia.org/wiki/Cross-correlation>`_
|
# -*- coding: utf-8 -*-
"""poc.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1fTzz1aT2sb8oAXRO1-dr6O_IR6dof36e
A simple example for deep-learning-based non-rigid image registration
with the MNIST dataset.
**README:** If the below error occurs, run the whole notebook again (Ctrl+F9).
```
ValueError: tf.function-decorated function tried to create variables on non-first call.
```
"""
import tensorflow as tf
import tensorflow.keras.layers as layers
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
print(tf.keras.backend.image_data_format())
"""Loss functions"""
@tf.function
def mse_loss(static, moving):
"""Computes the mean squared error (MSE) loss.
Currently, only 4-D inputs are supported.
Parameters
----------
static : tf.Tensor, shape (N, H, W, C)
The static image to which the moving image is aligned.
moving : tf.Tensor, shape (N, H, W, C)
The moving image, the same shape as the static image.
Returns
-------
loss : tf.Tensor, shape ()
Mean squared error between the static and the moving images,
averaged over the batch.
"""
loss = tf.reduce_mean(tf.square(moving - static)) # shape ()
return loss
# MASKED: ncc_loss function (lines 53-94)
"""Define the model """
def simple_cnn(input_shape=(32, 32, 2)):
"""Creates a 2-D convolutional encoder-decoder network.
Parameters
----------
input_shape : sequence of ints, optional
Input data shape of the form (H, W, C). Default is (32, 32, 2).
Returns
-------
model
An instance of Keras' Model class.
Notes
-----
Given a concatenated pair of static and moving images as input, the
CNN computes a dense displacement field that is used to warp the
moving image to match with the static image.
The number of channels in the output (displacement field) is equal
to the dimensionality of the input data. For 3-D volumes, it is 3,
and for 2-D images, it is 2. The first channel comprises
displacement in the x-direction and the second comprises
displacement in the y-direction.
"""
out_channels = 2
inputs = layers.Input(shape=input_shape)
# encoder
x = layers.Conv2D(32, kernel_size=3, strides=2, padding='same',
activation='relu')(inputs) # 32 --> 16
x = layers.BatchNormalization()(x) # 16
x = layers.Conv2D(32, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 16
x = layers.BatchNormalization()(x) # 16
x = layers.MaxPool2D(pool_size=2)(x) # 16 --> 8
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 8
x = layers.BatchNormalization()(x) # 8
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 8
x = layers.BatchNormalization()(x) # 8
x = layers.MaxPool2D(pool_size=2)(x) # 8 --> 4
x = layers.Conv2D(128, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 4
x = layers.BatchNormalization()(x) # 4
# decoder
x = layers.Conv2DTranspose(64, kernel_size=2, strides=2,
padding='same')(x) # 4 --> 8
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 8
x = layers.BatchNormalization()(x) # 8
x = layers.Conv2DTranspose(32, kernel_size=2, strides=2,
padding='same')(x) # 8 --> 16
x = layers.Conv2D(32, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 16
x = layers.BatchNormalization()(x) # 16
x = layers.Conv2DTranspose(16, kernel_size=2, strides=2,
padding='same')(x) # 16 --> 32
x = layers.Conv2D(16, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 32
x = layers.BatchNormalization()(x) # 32
x = layers.Conv2D(out_channels, kernel_size=1, strides=1,
padding='same')(x) # 32
# Create the model.
model = tf.keras.Model(inputs, x, name='simple_cnn')
return model
"""
Differntiable image sampling
References:
1. https://github.com/tensorflow/models/blob/master/research/transformer/spatial_transformer.py
2. Jaderberg, Max, Karen Simonyan, and Andrew Zisserman. "Spatial
transformer networks." Advances in neural information processing
systems. 2015. https://arxiv.org/pdf/1506.02025.pdf
3. *Spatial* Transformer Networks by Kushagra Bhatnagar https://link.medium.com/0b2OrmqVO5
"""
@tf.function
def grid_sample(moving, grid):
"""Given a moving image and a sampling grid as input, computes the
transformed image by sampling the moving image at locations given by
the grid.
Currently, only 2-D images, i.e., 4-D inputs are supported.
Parameters
----------
moving : tf.Tensor, shape (N, H, W, C)
The moving image.
grid : tf.Tensor, shape (N, H, W, C)
A tensor of sampling points (x, y). The x and y values should be
normalized to [-1.0, 1.0] range.
Returns
-------
moved : tf.Tensor, shape (N, H, W, C)
The transformed image.
Notes
-----
Let M be the moving image of shape (H, W, C), T be the transformed
image of the same shape and G be the 2-D sampling grid of shape
(H, W, 2). The value of T at a location (x, y) is T[y, x, :] =
M[y', x', :] where [x', y'] = G[y, x, :].
Further, [x', y'] = [x + dx, y + dy] where [dx, dy] are the
displacements outputted by the CNN. When dx and dy are 0, the
sampling grid G is a regular grid and the transformed image is the
same as the moving image.
Since the sampling point (x + dx, y + dy) can be non-integral, the
value M[y', x'] is calculated using bi-linear interpolation.
References
----------
.. [1] `Jaderberg, Max, Karen Simonyan, and Andrew Zisserman. "Spatial
transformer networks." Advances in neural information processing
systems. 2015. <https://arxiv.org/abs/1506.02025>`_
.. [2] `TensorFlow implementation of spatial transformer networks.
<https://github.com/tensorflow/models/tree/master/research/transformer>`_
.. [3] `Spatial Transformer Networks by Kushagra Bhatnagar
<https://link.medium.com/0b2OrmqVO5>`_
"""
nb, nh, nw, nc = moving.shape
x = grid[..., 0] # shape (N, H, W)
y = grid[..., 1]
x = tf.cast(x, 'float32')
y = tf.cast(y, 'float32')
# Scale x and y from [-1.0, 1.0] to [0, W] and [0, H] respectively.
x = (x + 1.0) * 0.5 * tf.cast(nw, 'float32')
y = (y + 1.0) * 0.5 * tf.cast(nh, 'float32')
y_max = tf.cast(nh - 1, 'int32')
x_max = tf.cast(nw - 1, 'int32')
zero = tf.constant(0, 'int32')
# The value at (x, y) is a weighted average of the values at the
# four nearest integer locations: (x0, y0), (x1, y0), (x0, y1) and
# (x1, y1) where x0 = floor(x), x1 = ceil(x).
x0 = tf.cast(tf.floor(x), 'int32')
x1 = x0 + 1
y0 = tf.cast(tf.floor(y), 'int32')
y1 = y0 + 1
# Make sure indices are within the boundaries of the image.
x0 = tf.clip_by_value(x0, zero, x_max)
x1 = tf.clip_by_value(x1, zero, x_max)
y0 = tf.clip_by_value(y0, zero, y_max)
y1 = tf.clip_by_value(y1, zero, y_max)
# Collect indices of the four corners.
b = tf.ones_like(x0) * tf.reshape(tf.range(nb), [nb, 1, 1])
idx_a = tf.stack([b, y0, x0], axis=-1) # all top-left corners
idx_b = tf.stack([b, y1, x0], axis=-1) # all bottom-left corners
idx_c = tf.stack([b, y0, x1], axis=-1) # all top-right corners
idx_d = tf.stack([b, y1, x1], axis=-1) # all bottom-right corners
# shape (N, H, W, 3)
# Collect values at the corners.
moving_a = tf.gather_nd(moving, idx_a) # all top-left values
moving_b = tf.gather_nd(moving, idx_b) # all bottom-left values
moving_c = tf.gather_nd(moving, idx_c) # all top-right values
moving_d = tf.gather_nd(moving, idx_d) # all bottom-right values
# shape (N, H, W, C)
x0_f = tf.cast(x0, 'float32')
x1_f = tf.cast(x1, 'float32')
y0_f = tf.cast(y0, 'float32')
y1_f = tf.cast(y1, 'float32')
# Calculate the weights.
wa = tf.expand_dims((x1_f - x) * (y1_f - y), axis=-1)
wb = tf.expand_dims((x1_f - x) * (y - y0_f), axis=-1)
wc = tf.expand_dims((x - x0_f) * (y1_f - y), axis=-1)
wd = tf.expand_dims((x - x0_f) * (y - y0_f), axis=-1)
# Calculate the weighted sum.
moved = tf.add_n([wa * moving_a, wb * moving_b, wc * moving_c,
wd * moving_d])
return moved
@tf.function
def regular_grid(shape):
"""Returns a batch of 2-D regular grids.
Currently, only 2-D regular grids are supported.
Parameters
----------
shape : sequence of ints, shape (3, )
The desired regular grid shape of the form (N, H, W).
Returns
-------
grid : tf.Tensor, shape (N, H, W, 2)
A batch of 2-D regular grids, values normalized to [-1.0, 1.0]
range.
Notes
-----
Sampling using the regular grid is an identity transformation, i.e.,
it results in the same input and output images.
References
----------
.. [1] `NumPy, "numpy.meshgrid"
<https://numpy.org/doc/stable/reference/generated/numpy.meshgrid.html>`_
.. [2] `NumPy, "numpy.indices"
<https://numpy.org/doc/stable/reference/generated/numpy.indices.html>`_
"""
nb, nh, nw = shape
x = tf.linspace(-1.0, 1.0, nw) # shape (W, )
y = tf.linspace(-1.0, 1.0, nh) # shape (H, )
X, Y = tf.meshgrid(x, y) # shape (H, W), both X and Y
grid = tf.stack([X, Y], axis=-1)
grid = tf.expand_dims(grid, axis=0) # shape (1, H, W, 2)
# Repeat the grids along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
grid = tf.tile(grid, multiples)
return grid
"""Training and testing functions"""
@tf.function
def train_step(model, moving, static, criterion, optimizer):
"""A generic training procedure for one iteration.
Parameters
----------
model
A convolutional encoder-decoder network.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
criterion
The loss function.
optimizer
An optimzer.
Returns
-------
loss : tf.Tensor, shape ()
The average loss for the batch.
"""
nb, nh, nw, nc = moving.shape
# Repeat the static image along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
# Define the GradientTape context for automatic differentiation.
with tf.GradientTape() as tape:
# Get the deformation field
inputs = tf.concat([moving, static], axis=-1)
deformation = model(inputs)
# Compute the new sampling grid.
grid = regular_grid([nb, nh, nw])
grid_new = grid + deformation
grid_new = tf.clip_by_value(grid_new, -1, 1)
# Sample the moving image using the new sampling grid.
moved = grid_sample(moving, grid_new)
# Compute the loss.
loss = criterion(moved, static)
# Compute gradients.
grads = tape.gradient(loss, model.trainable_variables)
# Update the trainable parameters.
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
@tf.function
def test_step(model, moving, static, criterion):
"""A generic testing procedure.
Parameters
----------
model
A convolutional encoder-decoder network.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
criterion
The loss function.
Returns
-------
loss : tf.Tensor, shape ()
The average loss for the batch.
"""
nb, nh, nw, nc = moving.shape
# Repeat the static image along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
# Get the deformation field.
inputs = tf.concat([moving, static], axis=-1)
deformation = model(inputs, training=False)
# Compute the new sampling grid.
grid = regular_grid([nb, nh, nw])
grid_new = grid + deformation
grid_new = tf.clip_by_value(grid_new, -1, 1)
# Sample the moving image using the new sampling grid.
moved = grid_sample(moving, grid_new)
# Compute the loss.
loss = criterion(moved, static)
return loss
"""Data loading"""
def load_data(label=2):
"""Loads the MNIST dataset and preprocesses it: scales to [0.0, 1.0]
range, resizes the images from (28, 28) to (32, 32) and filters the
dataset to keep images of just one class.
Parameters
----------
label : {2, 0, 1, 3, 4, 5, 6, 7, 8, 9}, default 2
The class of images to train and test on.
Returns
-------
(x_train, x_test) : tuple of ndarrays
NumPy arrays of training and testing images.
"""
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# Discard digits which are not equal to label.
ids_train = np.where(y_train == label)
ids_test = np.where(y_test == label)
x_train = x_train[ids_train]
x_test = x_test[ids_test]
# Scale the image to [0, 1] range.
x_train = x_train.astype(np.float32) / 255.0
x_test = x_test.astype(np.float32) / 255.0
# Add the channel dim at the end. (N, H, W) --> (N, H, W, 1)
x_train = x_train[..., None]
x_test = x_test[..., None]
# Resize images from (28, 28) to (32, 32).
x_train = tf.image.resize(x_train, (32, 32))
x_test = tf.image.resize(x_test, (32, 32))
return x_train, x_test
"""Sample results"""
def plot_images(model, moving, static):
"""Visualize some images after training.
Parameters
----------
model
The trained model.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
"""
nb, nh, nw, nc = moving.shape
# Repeat the static image along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
# Get the deformation fields for the batch.
inputs = tf.concat([moving, static], axis=-1)
deformation = model(inputs, training=False)
# Compute the new sampling grids.
grid = regular_grid([nb, nh, nw])
grid_new = grid + deformation
grid_new = tf.clip_by_value(grid_new, -1, 1)
# Sample the moving images using the new sampling grids.
moved = grid_sample(moving, grid_new)
# Convert the tensors to 8-bit images.
moved = moved.numpy().squeeze(axis=-1) * 255.0
moved = moved.astype(np.uint8)
moving = moving.numpy().squeeze(axis=-1) * 255.0
moving = moving.astype(np.uint8)
static = static.numpy().squeeze(axis=-1) * 255.0
static = static.astype(np.uint8)
# Plot images.
fig = plt.figure(figsize=(3 * 1.7, nb * 1.7))
titles_list = ['Static', 'Moved', 'Moving']
images_list = [static, moved, moving]
for i in range(nb):
for j in range(3):
ax = fig.add_subplot(nb, 3, i * 3 + j + 1)
if i == 0:
ax.set_title(titles_list[j], fontsize=20)
ax.set_axis_off()
ax.imshow(images_list[j][i], cmap='gray')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
class Args():
batch_size = 8
epochs = 50
lr = 0.004
label = 7 # which digit images to train on?
num_samples = 5 # number of sample results to show
save_model = False
args = Args()
# Load preprocessed training and testing data.
x_train, x_test = load_data(label=args.label)
# Randomly select an image as the static image from the test set.
# idx = np.random.randint(x_test.shape[0])
# static = tf.expand_dims(x_test[idx], axis=0)
static = tf.expand_dims(x_test[0], axis=0)
# Select some images from the test set to show sample results.
# ids = tf.constant(np.random.choice(x_test.shape[0], replace=False,
# size=args.num_samples))
# x_sample = tf.gather(x_test, ids)
x_sample = x_test[:args.num_samples]
# Shuffle and batch the dataset.
from_tensor_slices = tf.data.Dataset.from_tensor_slices
# x_train = from_tensor_slices(x_train).shuffle(10000).batch(args.batch_size)
# x_test = from_tensor_slices(x_test).shuffle(10000).batch(args.batch_size)
x_train = from_tensor_slices(x_train).batch(args.batch_size)
x_test = from_tensor_slices(x_test).batch(args.batch_size)
# Create a model instance.
model = simple_cnn(input_shape=(32, 32, 2))
model.summary()
tf.keras.utils.plot_model(model, show_shapes=True, dpi=50)
# Select optimizer and loss function.
optimizer = tf.keras.optimizers.SGD(learning_rate=args.lr)
criterion = ncc_loss # normalized_cross_correlation_loss() # or mse_loss
# Define the metrics to track training and testing losses.
m_train = tf.keras.metrics.Mean(name='loss_train')
m_test = tf.keras.metrics.Mean(name='loss_test')
# Train and evaluate the model.
for epoch in range(args.epochs):
m_train.reset_states()
m_test.reset_states()
for i, moving in enumerate(x_train):
loss_train = train_step(model, moving, static, criterion,
optimizer)
m_train.update_state(loss_train)
for i, moving in enumerate(x_test):
loss_test = test_step(model, moving, static, criterion)
m_test.update_state(loss_test)
print('Epoch: %3d/%d\tTrain Loss: %.6f\tTest Loss: %.6f'
% (epoch + 1, args.epochs, m_train.result(), m_test.result()))
print('\n')
# Show sample results.
plot_images(model, x_sample, static)
# Save the trained model.
if args.save_model:
model.save('saved_models/simple_cnn')
|
@tf.function
def ncc_loss(static, moving):
"""Computes the normalized cross-correlation (NCC) loss.
Currently, only 4-D inputs are supported.
Parameters
----------
static : tf.Tensor, shape (N, H, W, C)
The static image to which the moving image is aligned.
moving : tf.Tensor, shape (N, H, W, C)
The moving image, the same shape as the static image.
Returns
-------
loss : tf.Tensor, shape ()
Normalized cross-correlation loss between the static and the
moving images, averaged over the batch. Range is [-1.0, 1.0].
The best value is -1 (perfect match) and the worst is 1.
References
----------
.. [1] `Wikipedia entry for the Cross-correlation
<https://en.wikipedia.org/wiki/Cross-correlation>`_
"""
eps = tf.constant(1e-9, 'float32')
static_mean = tf.reduce_mean(static, axis=[1, 2], keepdims=True)
moving_mean = tf.reduce_mean(moving, axis=[1, 2], keepdims=True)
# shape (N, 1, 1, C)
static_std = tf.math.reduce_std(static, axis=[1, 2], keepdims=True)
moving_std = tf.math.reduce_std(moving, axis=[1, 2], keepdims=True)
# shape (N, 1, 1, C)
static_hat = (static - static_mean)/(static_std + eps)
moving_hat = (moving - moving_mean)/(moving_std + eps)
# shape (N, H, W, C)
ncc = tf.reduce_mean(static_hat * moving_hat) # shape ()
loss = -ncc
return loss
| 53 | 94 |
# -*- coding: utf-8 -*-
"""poc.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1fTzz1aT2sb8oAXRO1-dr6O_IR6dof36e
A simple example for deep-learning-based non-rigid image registration
with the MNIST dataset.
**README:** If the below error occurs, run the whole notebook again (Ctrl+F9).
```
ValueError: tf.function-decorated function tried to create variables on non-first call.
```
"""
import tensorflow as tf
import tensorflow.keras.layers as layers
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
print(tf.keras.backend.image_data_format())
"""Loss functions"""
@tf.function
def mse_loss(static, moving):
"""Computes the mean squared error (MSE) loss.
Currently, only 4-D inputs are supported.
Parameters
----------
static : tf.Tensor, shape (N, H, W, C)
The static image to which the moving image is aligned.
moving : tf.Tensor, shape (N, H, W, C)
The moving image, the same shape as the static image.
Returns
-------
loss : tf.Tensor, shape ()
Mean squared error between the static and the moving images,
averaged over the batch.
"""
loss = tf.reduce_mean(tf.square(moving - static)) # shape ()
return loss
@tf.function
def ncc_loss(static, moving):
"""Computes the normalized cross-correlation (NCC) loss.
Currently, only 4-D inputs are supported.
Parameters
----------
static : tf.Tensor, shape (N, H, W, C)
The static image to which the moving image is aligned.
moving : tf.Tensor, shape (N, H, W, C)
The moving image, the same shape as the static image.
Returns
-------
loss : tf.Tensor, shape ()
Normalized cross-correlation loss between the static and the
moving images, averaged over the batch. Range is [-1.0, 1.0].
The best value is -1 (perfect match) and the worst is 1.
References
----------
.. [1] `Wikipedia entry for the Cross-correlation
<https://en.wikipedia.org/wiki/Cross-correlation>`_
"""
eps = tf.constant(1e-9, 'float32')
static_mean = tf.reduce_mean(static, axis=[1, 2], keepdims=True)
moving_mean = tf.reduce_mean(moving, axis=[1, 2], keepdims=True)
# shape (N, 1, 1, C)
static_std = tf.math.reduce_std(static, axis=[1, 2], keepdims=True)
moving_std = tf.math.reduce_std(moving, axis=[1, 2], keepdims=True)
# shape (N, 1, 1, C)
static_hat = (static - static_mean)/(static_std + eps)
moving_hat = (moving - moving_mean)/(moving_std + eps)
# shape (N, H, W, C)
ncc = tf.reduce_mean(static_hat * moving_hat) # shape ()
loss = -ncc
return loss
"""Define the model """
def simple_cnn(input_shape=(32, 32, 2)):
"""Creates a 2-D convolutional encoder-decoder network.
Parameters
----------
input_shape : sequence of ints, optional
Input data shape of the form (H, W, C). Default is (32, 32, 2).
Returns
-------
model
An instance of Keras' Model class.
Notes
-----
Given a concatenated pair of static and moving images as input, the
CNN computes a dense displacement field that is used to warp the
moving image to match with the static image.
The number of channels in the output (displacement field) is equal
to the dimensionality of the input data. For 3-D volumes, it is 3,
and for 2-D images, it is 2. The first channel comprises
displacement in the x-direction and the second comprises
displacement in the y-direction.
"""
out_channels = 2
inputs = layers.Input(shape=input_shape)
# encoder
x = layers.Conv2D(32, kernel_size=3, strides=2, padding='same',
activation='relu')(inputs) # 32 --> 16
x = layers.BatchNormalization()(x) # 16
x = layers.Conv2D(32, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 16
x = layers.BatchNormalization()(x) # 16
x = layers.MaxPool2D(pool_size=2)(x) # 16 --> 8
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 8
x = layers.BatchNormalization()(x) # 8
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 8
x = layers.BatchNormalization()(x) # 8
x = layers.MaxPool2D(pool_size=2)(x) # 8 --> 4
x = layers.Conv2D(128, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 4
x = layers.BatchNormalization()(x) # 4
# decoder
x = layers.Conv2DTranspose(64, kernel_size=2, strides=2,
padding='same')(x) # 4 --> 8
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 8
x = layers.BatchNormalization()(x) # 8
x = layers.Conv2DTranspose(32, kernel_size=2, strides=2,
padding='same')(x) # 8 --> 16
x = layers.Conv2D(32, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 16
x = layers.BatchNormalization()(x) # 16
x = layers.Conv2DTranspose(16, kernel_size=2, strides=2,
padding='same')(x) # 16 --> 32
x = layers.Conv2D(16, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 32
x = layers.BatchNormalization()(x) # 32
x = layers.Conv2D(out_channels, kernel_size=1, strides=1,
padding='same')(x) # 32
# Create the model.
model = tf.keras.Model(inputs, x, name='simple_cnn')
return model
"""
Differntiable image sampling
References:
1. https://github.com/tensorflow/models/blob/master/research/transformer/spatial_transformer.py
2. Jaderberg, Max, Karen Simonyan, and Andrew Zisserman. "Spatial
transformer networks." Advances in neural information processing
systems. 2015. https://arxiv.org/pdf/1506.02025.pdf
3. *Spatial* Transformer Networks by Kushagra Bhatnagar https://link.medium.com/0b2OrmqVO5
"""
@tf.function
def grid_sample(moving, grid):
"""Given a moving image and a sampling grid as input, computes the
transformed image by sampling the moving image at locations given by
the grid.
Currently, only 2-D images, i.e., 4-D inputs are supported.
Parameters
----------
moving : tf.Tensor, shape (N, H, W, C)
The moving image.
grid : tf.Tensor, shape (N, H, W, C)
A tensor of sampling points (x, y). The x and y values should be
normalized to [-1.0, 1.0] range.
Returns
-------
moved : tf.Tensor, shape (N, H, W, C)
The transformed image.
Notes
-----
Let M be the moving image of shape (H, W, C), T be the transformed
image of the same shape and G be the 2-D sampling grid of shape
(H, W, 2). The value of T at a location (x, y) is T[y, x, :] =
M[y', x', :] where [x', y'] = G[y, x, :].
Further, [x', y'] = [x + dx, y + dy] where [dx, dy] are the
displacements outputted by the CNN. When dx and dy are 0, the
sampling grid G is a regular grid and the transformed image is the
same as the moving image.
Since the sampling point (x + dx, y + dy) can be non-integral, the
value M[y', x'] is calculated using bi-linear interpolation.
References
----------
.. [1] `Jaderberg, Max, Karen Simonyan, and Andrew Zisserman. "Spatial
transformer networks." Advances in neural information processing
systems. 2015. <https://arxiv.org/abs/1506.02025>`_
.. [2] `TensorFlow implementation of spatial transformer networks.
<https://github.com/tensorflow/models/tree/master/research/transformer>`_
.. [3] `Spatial Transformer Networks by Kushagra Bhatnagar
<https://link.medium.com/0b2OrmqVO5>`_
"""
nb, nh, nw, nc = moving.shape
x = grid[..., 0] # shape (N, H, W)
y = grid[..., 1]
x = tf.cast(x, 'float32')
y = tf.cast(y, 'float32')
# Scale x and y from [-1.0, 1.0] to [0, W] and [0, H] respectively.
x = (x + 1.0) * 0.5 * tf.cast(nw, 'float32')
y = (y + 1.0) * 0.5 * tf.cast(nh, 'float32')
y_max = tf.cast(nh - 1, 'int32')
x_max = tf.cast(nw - 1, 'int32')
zero = tf.constant(0, 'int32')
# The value at (x, y) is a weighted average of the values at the
# four nearest integer locations: (x0, y0), (x1, y0), (x0, y1) and
# (x1, y1) where x0 = floor(x), x1 = ceil(x).
x0 = tf.cast(tf.floor(x), 'int32')
x1 = x0 + 1
y0 = tf.cast(tf.floor(y), 'int32')
y1 = y0 + 1
# Make sure indices are within the boundaries of the image.
x0 = tf.clip_by_value(x0, zero, x_max)
x1 = tf.clip_by_value(x1, zero, x_max)
y0 = tf.clip_by_value(y0, zero, y_max)
y1 = tf.clip_by_value(y1, zero, y_max)
# Collect indices of the four corners.
b = tf.ones_like(x0) * tf.reshape(tf.range(nb), [nb, 1, 1])
idx_a = tf.stack([b, y0, x0], axis=-1) # all top-left corners
idx_b = tf.stack([b, y1, x0], axis=-1) # all bottom-left corners
idx_c = tf.stack([b, y0, x1], axis=-1) # all top-right corners
idx_d = tf.stack([b, y1, x1], axis=-1) # all bottom-right corners
# shape (N, H, W, 3)
# Collect values at the corners.
moving_a = tf.gather_nd(moving, idx_a) # all top-left values
moving_b = tf.gather_nd(moving, idx_b) # all bottom-left values
moving_c = tf.gather_nd(moving, idx_c) # all top-right values
moving_d = tf.gather_nd(moving, idx_d) # all bottom-right values
# shape (N, H, W, C)
x0_f = tf.cast(x0, 'float32')
x1_f = tf.cast(x1, 'float32')
y0_f = tf.cast(y0, 'float32')
y1_f = tf.cast(y1, 'float32')
# Calculate the weights.
wa = tf.expand_dims((x1_f - x) * (y1_f - y), axis=-1)
wb = tf.expand_dims((x1_f - x) * (y - y0_f), axis=-1)
wc = tf.expand_dims((x - x0_f) * (y1_f - y), axis=-1)
wd = tf.expand_dims((x - x0_f) * (y - y0_f), axis=-1)
# Calculate the weighted sum.
moved = tf.add_n([wa * moving_a, wb * moving_b, wc * moving_c,
wd * moving_d])
return moved
@tf.function
def regular_grid(shape):
"""Returns a batch of 2-D regular grids.
Currently, only 2-D regular grids are supported.
Parameters
----------
shape : sequence of ints, shape (3, )
The desired regular grid shape of the form (N, H, W).
Returns
-------
grid : tf.Tensor, shape (N, H, W, 2)
A batch of 2-D regular grids, values normalized to [-1.0, 1.0]
range.
Notes
-----
Sampling using the regular grid is an identity transformation, i.e.,
it results in the same input and output images.
References
----------
.. [1] `NumPy, "numpy.meshgrid"
<https://numpy.org/doc/stable/reference/generated/numpy.meshgrid.html>`_
.. [2] `NumPy, "numpy.indices"
<https://numpy.org/doc/stable/reference/generated/numpy.indices.html>`_
"""
nb, nh, nw = shape
x = tf.linspace(-1.0, 1.0, nw) # shape (W, )
y = tf.linspace(-1.0, 1.0, nh) # shape (H, )
X, Y = tf.meshgrid(x, y) # shape (H, W), both X and Y
grid = tf.stack([X, Y], axis=-1)
grid = tf.expand_dims(grid, axis=0) # shape (1, H, W, 2)
# Repeat the grids along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
grid = tf.tile(grid, multiples)
return grid
"""Training and testing functions"""
@tf.function
def train_step(model, moving, static, criterion, optimizer):
"""A generic training procedure for one iteration.
Parameters
----------
model
A convolutional encoder-decoder network.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
criterion
The loss function.
optimizer
An optimzer.
Returns
-------
loss : tf.Tensor, shape ()
The average loss for the batch.
"""
nb, nh, nw, nc = moving.shape
# Repeat the static image along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
# Define the GradientTape context for automatic differentiation.
with tf.GradientTape() as tape:
# Get the deformation field
inputs = tf.concat([moving, static], axis=-1)
deformation = model(inputs)
# Compute the new sampling grid.
grid = regular_grid([nb, nh, nw])
grid_new = grid + deformation
grid_new = tf.clip_by_value(grid_new, -1, 1)
# Sample the moving image using the new sampling grid.
moved = grid_sample(moving, grid_new)
# Compute the loss.
loss = criterion(moved, static)
# Compute gradients.
grads = tape.gradient(loss, model.trainable_variables)
# Update the trainable parameters.
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
@tf.function
def test_step(model, moving, static, criterion):
"""A generic testing procedure.
Parameters
----------
model
A convolutional encoder-decoder network.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
criterion
The loss function.
Returns
-------
loss : tf.Tensor, shape ()
The average loss for the batch.
"""
nb, nh, nw, nc = moving.shape
# Repeat the static image along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
# Get the deformation field.
inputs = tf.concat([moving, static], axis=-1)
deformation = model(inputs, training=False)
# Compute the new sampling grid.
grid = regular_grid([nb, nh, nw])
grid_new = grid + deformation
grid_new = tf.clip_by_value(grid_new, -1, 1)
# Sample the moving image using the new sampling grid.
moved = grid_sample(moving, grid_new)
# Compute the loss.
loss = criterion(moved, static)
return loss
"""Data loading"""
def load_data(label=2):
"""Loads the MNIST dataset and preprocesses it: scales to [0.0, 1.0]
range, resizes the images from (28, 28) to (32, 32) and filters the
dataset to keep images of just one class.
Parameters
----------
label : {2, 0, 1, 3, 4, 5, 6, 7, 8, 9}, default 2
The class of images to train and test on.
Returns
-------
(x_train, x_test) : tuple of ndarrays
NumPy arrays of training and testing images.
"""
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# Discard digits which are not equal to label.
ids_train = np.where(y_train == label)
ids_test = np.where(y_test == label)
x_train = x_train[ids_train]
x_test = x_test[ids_test]
# Scale the image to [0, 1] range.
x_train = x_train.astype(np.float32) / 255.0
x_test = x_test.astype(np.float32) / 255.0
# Add the channel dim at the end. (N, H, W) --> (N, H, W, 1)
x_train = x_train[..., None]
x_test = x_test[..., None]
# Resize images from (28, 28) to (32, 32).
x_train = tf.image.resize(x_train, (32, 32))
x_test = tf.image.resize(x_test, (32, 32))
return x_train, x_test
"""Sample results"""
def plot_images(model, moving, static):
"""Visualize some images after training.
Parameters
----------
model
The trained model.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
"""
nb, nh, nw, nc = moving.shape
# Repeat the static image along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
# Get the deformation fields for the batch.
inputs = tf.concat([moving, static], axis=-1)
deformation = model(inputs, training=False)
# Compute the new sampling grids.
grid = regular_grid([nb, nh, nw])
grid_new = grid + deformation
grid_new = tf.clip_by_value(grid_new, -1, 1)
# Sample the moving images using the new sampling grids.
moved = grid_sample(moving, grid_new)
# Convert the tensors to 8-bit images.
moved = moved.numpy().squeeze(axis=-1) * 255.0
moved = moved.astype(np.uint8)
moving = moving.numpy().squeeze(axis=-1) * 255.0
moving = moving.astype(np.uint8)
static = static.numpy().squeeze(axis=-1) * 255.0
static = static.astype(np.uint8)
# Plot images.
fig = plt.figure(figsize=(3 * 1.7, nb * 1.7))
titles_list = ['Static', 'Moved', 'Moving']
images_list = [static, moved, moving]
for i in range(nb):
for j in range(3):
ax = fig.add_subplot(nb, 3, i * 3 + j + 1)
if i == 0:
ax.set_title(titles_list[j], fontsize=20)
ax.set_axis_off()
ax.imshow(images_list[j][i], cmap='gray')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
class Args():
batch_size = 8
epochs = 50
lr = 0.004
label = 7 # which digit images to train on?
num_samples = 5 # number of sample results to show
save_model = False
args = Args()
# Load preprocessed training and testing data.
x_train, x_test = load_data(label=args.label)
# Randomly select an image as the static image from the test set.
# idx = np.random.randint(x_test.shape[0])
# static = tf.expand_dims(x_test[idx], axis=0)
static = tf.expand_dims(x_test[0], axis=0)
# Select some images from the test set to show sample results.
# ids = tf.constant(np.random.choice(x_test.shape[0], replace=False,
# size=args.num_samples))
# x_sample = tf.gather(x_test, ids)
x_sample = x_test[:args.num_samples]
# Shuffle and batch the dataset.
from_tensor_slices = tf.data.Dataset.from_tensor_slices
# x_train = from_tensor_slices(x_train).shuffle(10000).batch(args.batch_size)
# x_test = from_tensor_slices(x_test).shuffle(10000).batch(args.batch_size)
x_train = from_tensor_slices(x_train).batch(args.batch_size)
x_test = from_tensor_slices(x_test).batch(args.batch_size)
# Create a model instance.
model = simple_cnn(input_shape=(32, 32, 2))
model.summary()
tf.keras.utils.plot_model(model, show_shapes=True, dpi=50)
# Select optimizer and loss function.
optimizer = tf.keras.optimizers.SGD(learning_rate=args.lr)
criterion = ncc_loss # normalized_cross_correlation_loss() # or mse_loss
# Define the metrics to track training and testing losses.
m_train = tf.keras.metrics.Mean(name='loss_train')
m_test = tf.keras.metrics.Mean(name='loss_test')
# Train and evaluate the model.
for epoch in range(args.epochs):
m_train.reset_states()
m_test.reset_states()
for i, moving in enumerate(x_train):
loss_train = train_step(model, moving, static, criterion,
optimizer)
m_train.update_state(loss_train)
for i, moving in enumerate(x_test):
loss_test = test_step(model, moving, static, criterion)
m_test.update_state(loss_test)
print('Epoch: %3d/%d\tTrain Loss: %.6f\tTest Loss: %.6f'
% (epoch + 1, args.epochs, m_train.result(), m_test.result()))
print('\n')
# Show sample results.
plot_images(model, x_sample, static)
# Save the trained model.
if args.save_model:
model.save('saved_models/simple_cnn')
|
simple_cnn
|
Creates a 2-D convolutional encoder-decoder network.
Parameters
----------
input_shape : sequence of ints, optional
Input data shape of the form (H, W, C). Default is (32, 32, 2).
Returns
-------
model
An instance of Keras' Model class.
Notes
-----
Given a concatenated pair of static and moving images as input, the
CNN computes a dense displacement field that is used to warp the
moving image to match with the static image.
The number of channels in the output (displacement field) is equal
to the dimensionality of the input data. For 3-D volumes, it is 3,
and for 2-D images, it is 2. The first channel comprises
displacement in the x-direction and the second comprises
displacement in the y-direction.
|
# -*- coding: utf-8 -*-
"""poc.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1fTzz1aT2sb8oAXRO1-dr6O_IR6dof36e
A simple example for deep-learning-based non-rigid image registration
with the MNIST dataset.
**README:** If the below error occurs, run the whole notebook again (Ctrl+F9).
```
ValueError: tf.function-decorated function tried to create variables on non-first call.
```
"""
import tensorflow as tf
import tensorflow.keras.layers as layers
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
print(tf.keras.backend.image_data_format())
"""Loss functions"""
@tf.function
def mse_loss(static, moving):
"""Computes the mean squared error (MSE) loss.
Currently, only 4-D inputs are supported.
Parameters
----------
static : tf.Tensor, shape (N, H, W, C)
The static image to which the moving image is aligned.
moving : tf.Tensor, shape (N, H, W, C)
The moving image, the same shape as the static image.
Returns
-------
loss : tf.Tensor, shape ()
Mean squared error between the static and the moving images,
averaged over the batch.
"""
loss = tf.reduce_mean(tf.square(moving - static)) # shape ()
return loss
@tf.function
def ncc_loss(static, moving):
"""Computes the normalized cross-correlation (NCC) loss.
Currently, only 4-D inputs are supported.
Parameters
----------
static : tf.Tensor, shape (N, H, W, C)
The static image to which the moving image is aligned.
moving : tf.Tensor, shape (N, H, W, C)
The moving image, the same shape as the static image.
Returns
-------
loss : tf.Tensor, shape ()
Normalized cross-correlation loss between the static and the
moving images, averaged over the batch. Range is [-1.0, 1.0].
The best value is -1 (perfect match) and the worst is 1.
References
----------
.. [1] `Wikipedia entry for the Cross-correlation
<https://en.wikipedia.org/wiki/Cross-correlation>`_
"""
eps = tf.constant(1e-9, 'float32')
static_mean = tf.reduce_mean(static, axis=[1, 2], keepdims=True)
moving_mean = tf.reduce_mean(moving, axis=[1, 2], keepdims=True)
# shape (N, 1, 1, C)
static_std = tf.math.reduce_std(static, axis=[1, 2], keepdims=True)
moving_std = tf.math.reduce_std(moving, axis=[1, 2], keepdims=True)
# shape (N, 1, 1, C)
static_hat = (static - static_mean)/(static_std + eps)
moving_hat = (moving - moving_mean)/(moving_std + eps)
# shape (N, H, W, C)
ncc = tf.reduce_mean(static_hat * moving_hat) # shape ()
loss = -ncc
return loss
"""Define the model """
# MASKED: simple_cnn function (lines 98-178)
@tf.function
def grid_sample(moving, grid):
"""Given a moving image and a sampling grid as input, computes the
transformed image by sampling the moving image at locations given by
the grid.
Currently, only 2-D images, i.e., 4-D inputs are supported.
Parameters
----------
moving : tf.Tensor, shape (N, H, W, C)
The moving image.
grid : tf.Tensor, shape (N, H, W, C)
A tensor of sampling points (x, y). The x and y values should be
normalized to [-1.0, 1.0] range.
Returns
-------
moved : tf.Tensor, shape (N, H, W, C)
The transformed image.
Notes
-----
Let M be the moving image of shape (H, W, C), T be the transformed
image of the same shape and G be the 2-D sampling grid of shape
(H, W, 2). The value of T at a location (x, y) is T[y, x, :] =
M[y', x', :] where [x', y'] = G[y, x, :].
Further, [x', y'] = [x + dx, y + dy] where [dx, dy] are the
displacements outputted by the CNN. When dx and dy are 0, the
sampling grid G is a regular grid and the transformed image is the
same as the moving image.
Since the sampling point (x + dx, y + dy) can be non-integral, the
value M[y', x'] is calculated using bi-linear interpolation.
References
----------
.. [1] `Jaderberg, Max, Karen Simonyan, and Andrew Zisserman. "Spatial
transformer networks." Advances in neural information processing
systems. 2015. <https://arxiv.org/abs/1506.02025>`_
.. [2] `TensorFlow implementation of spatial transformer networks.
<https://github.com/tensorflow/models/tree/master/research/transformer>`_
.. [3] `Spatial Transformer Networks by Kushagra Bhatnagar
<https://link.medium.com/0b2OrmqVO5>`_
"""
nb, nh, nw, nc = moving.shape
x = grid[..., 0] # shape (N, H, W)
y = grid[..., 1]
x = tf.cast(x, 'float32')
y = tf.cast(y, 'float32')
# Scale x and y from [-1.0, 1.0] to [0, W] and [0, H] respectively.
x = (x + 1.0) * 0.5 * tf.cast(nw, 'float32')
y = (y + 1.0) * 0.5 * tf.cast(nh, 'float32')
y_max = tf.cast(nh - 1, 'int32')
x_max = tf.cast(nw - 1, 'int32')
zero = tf.constant(0, 'int32')
# The value at (x, y) is a weighted average of the values at the
# four nearest integer locations: (x0, y0), (x1, y0), (x0, y1) and
# (x1, y1) where x0 = floor(x), x1 = ceil(x).
x0 = tf.cast(tf.floor(x), 'int32')
x1 = x0 + 1
y0 = tf.cast(tf.floor(y), 'int32')
y1 = y0 + 1
# Make sure indices are within the boundaries of the image.
x0 = tf.clip_by_value(x0, zero, x_max)
x1 = tf.clip_by_value(x1, zero, x_max)
y0 = tf.clip_by_value(y0, zero, y_max)
y1 = tf.clip_by_value(y1, zero, y_max)
# Collect indices of the four corners.
b = tf.ones_like(x0) * tf.reshape(tf.range(nb), [nb, 1, 1])
idx_a = tf.stack([b, y0, x0], axis=-1) # all top-left corners
idx_b = tf.stack([b, y1, x0], axis=-1) # all bottom-left corners
idx_c = tf.stack([b, y0, x1], axis=-1) # all top-right corners
idx_d = tf.stack([b, y1, x1], axis=-1) # all bottom-right corners
# shape (N, H, W, 3)
# Collect values at the corners.
moving_a = tf.gather_nd(moving, idx_a) # all top-left values
moving_b = tf.gather_nd(moving, idx_b) # all bottom-left values
moving_c = tf.gather_nd(moving, idx_c) # all top-right values
moving_d = tf.gather_nd(moving, idx_d) # all bottom-right values
# shape (N, H, W, C)
x0_f = tf.cast(x0, 'float32')
x1_f = tf.cast(x1, 'float32')
y0_f = tf.cast(y0, 'float32')
y1_f = tf.cast(y1, 'float32')
# Calculate the weights.
wa = tf.expand_dims((x1_f - x) * (y1_f - y), axis=-1)
wb = tf.expand_dims((x1_f - x) * (y - y0_f), axis=-1)
wc = tf.expand_dims((x - x0_f) * (y1_f - y), axis=-1)
wd = tf.expand_dims((x - x0_f) * (y - y0_f), axis=-1)
# Calculate the weighted sum.
moved = tf.add_n([wa * moving_a, wb * moving_b, wc * moving_c,
wd * moving_d])
return moved
@tf.function
def regular_grid(shape):
"""Returns a batch of 2-D regular grids.
Currently, only 2-D regular grids are supported.
Parameters
----------
shape : sequence of ints, shape (3, )
The desired regular grid shape of the form (N, H, W).
Returns
-------
grid : tf.Tensor, shape (N, H, W, 2)
A batch of 2-D regular grids, values normalized to [-1.0, 1.0]
range.
Notes
-----
Sampling using the regular grid is an identity transformation, i.e.,
it results in the same input and output images.
References
----------
.. [1] `NumPy, "numpy.meshgrid"
<https://numpy.org/doc/stable/reference/generated/numpy.meshgrid.html>`_
.. [2] `NumPy, "numpy.indices"
<https://numpy.org/doc/stable/reference/generated/numpy.indices.html>`_
"""
nb, nh, nw = shape
x = tf.linspace(-1.0, 1.0, nw) # shape (W, )
y = tf.linspace(-1.0, 1.0, nh) # shape (H, )
X, Y = tf.meshgrid(x, y) # shape (H, W), both X and Y
grid = tf.stack([X, Y], axis=-1)
grid = tf.expand_dims(grid, axis=0) # shape (1, H, W, 2)
# Repeat the grids along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
grid = tf.tile(grid, multiples)
return grid
"""Training and testing functions"""
@tf.function
def train_step(model, moving, static, criterion, optimizer):
"""A generic training procedure for one iteration.
Parameters
----------
model
A convolutional encoder-decoder network.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
criterion
The loss function.
optimizer
An optimzer.
Returns
-------
loss : tf.Tensor, shape ()
The average loss for the batch.
"""
nb, nh, nw, nc = moving.shape
# Repeat the static image along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
# Define the GradientTape context for automatic differentiation.
with tf.GradientTape() as tape:
# Get the deformation field
inputs = tf.concat([moving, static], axis=-1)
deformation = model(inputs)
# Compute the new sampling grid.
grid = regular_grid([nb, nh, nw])
grid_new = grid + deformation
grid_new = tf.clip_by_value(grid_new, -1, 1)
# Sample the moving image using the new sampling grid.
moved = grid_sample(moving, grid_new)
# Compute the loss.
loss = criterion(moved, static)
# Compute gradients.
grads = tape.gradient(loss, model.trainable_variables)
# Update the trainable parameters.
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
@tf.function
def test_step(model, moving, static, criterion):
"""A generic testing procedure.
Parameters
----------
model
A convolutional encoder-decoder network.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
criterion
The loss function.
Returns
-------
loss : tf.Tensor, shape ()
The average loss for the batch.
"""
nb, nh, nw, nc = moving.shape
# Repeat the static image along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
# Get the deformation field.
inputs = tf.concat([moving, static], axis=-1)
deformation = model(inputs, training=False)
# Compute the new sampling grid.
grid = regular_grid([nb, nh, nw])
grid_new = grid + deformation
grid_new = tf.clip_by_value(grid_new, -1, 1)
# Sample the moving image using the new sampling grid.
moved = grid_sample(moving, grid_new)
# Compute the loss.
loss = criterion(moved, static)
return loss
"""Data loading"""
def load_data(label=2):
"""Loads the MNIST dataset and preprocesses it: scales to [0.0, 1.0]
range, resizes the images from (28, 28) to (32, 32) and filters the
dataset to keep images of just one class.
Parameters
----------
label : {2, 0, 1, 3, 4, 5, 6, 7, 8, 9}, default 2
The class of images to train and test on.
Returns
-------
(x_train, x_test) : tuple of ndarrays
NumPy arrays of training and testing images.
"""
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# Discard digits which are not equal to label.
ids_train = np.where(y_train == label)
ids_test = np.where(y_test == label)
x_train = x_train[ids_train]
x_test = x_test[ids_test]
# Scale the image to [0, 1] range.
x_train = x_train.astype(np.float32) / 255.0
x_test = x_test.astype(np.float32) / 255.0
# Add the channel dim at the end. (N, H, W) --> (N, H, W, 1)
x_train = x_train[..., None]
x_test = x_test[..., None]
# Resize images from (28, 28) to (32, 32).
x_train = tf.image.resize(x_train, (32, 32))
x_test = tf.image.resize(x_test, (32, 32))
return x_train, x_test
"""Sample results"""
def plot_images(model, moving, static):
"""Visualize some images after training.
Parameters
----------
model
The trained model.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
"""
nb, nh, nw, nc = moving.shape
# Repeat the static image along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
# Get the deformation fields for the batch.
inputs = tf.concat([moving, static], axis=-1)
deformation = model(inputs, training=False)
# Compute the new sampling grids.
grid = regular_grid([nb, nh, nw])
grid_new = grid + deformation
grid_new = tf.clip_by_value(grid_new, -1, 1)
# Sample the moving images using the new sampling grids.
moved = grid_sample(moving, grid_new)
# Convert the tensors to 8-bit images.
moved = moved.numpy().squeeze(axis=-1) * 255.0
moved = moved.astype(np.uint8)
moving = moving.numpy().squeeze(axis=-1) * 255.0
moving = moving.astype(np.uint8)
static = static.numpy().squeeze(axis=-1) * 255.0
static = static.astype(np.uint8)
# Plot images.
fig = plt.figure(figsize=(3 * 1.7, nb * 1.7))
titles_list = ['Static', 'Moved', 'Moving']
images_list = [static, moved, moving]
for i in range(nb):
for j in range(3):
ax = fig.add_subplot(nb, 3, i * 3 + j + 1)
if i == 0:
ax.set_title(titles_list[j], fontsize=20)
ax.set_axis_off()
ax.imshow(images_list[j][i], cmap='gray')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
class Args():
batch_size = 8
epochs = 50
lr = 0.004
label = 7 # which digit images to train on?
num_samples = 5 # number of sample results to show
save_model = False
args = Args()
# Load preprocessed training and testing data.
x_train, x_test = load_data(label=args.label)
# Randomly select an image as the static image from the test set.
# idx = np.random.randint(x_test.shape[0])
# static = tf.expand_dims(x_test[idx], axis=0)
static = tf.expand_dims(x_test[0], axis=0)
# Select some images from the test set to show sample results.
# ids = tf.constant(np.random.choice(x_test.shape[0], replace=False,
# size=args.num_samples))
# x_sample = tf.gather(x_test, ids)
x_sample = x_test[:args.num_samples]
# Shuffle and batch the dataset.
from_tensor_slices = tf.data.Dataset.from_tensor_slices
# x_train = from_tensor_slices(x_train).shuffle(10000).batch(args.batch_size)
# x_test = from_tensor_slices(x_test).shuffle(10000).batch(args.batch_size)
x_train = from_tensor_slices(x_train).batch(args.batch_size)
x_test = from_tensor_slices(x_test).batch(args.batch_size)
# Create a model instance.
model = simple_cnn(input_shape=(32, 32, 2))
model.summary()
tf.keras.utils.plot_model(model, show_shapes=True, dpi=50)
# Select optimizer and loss function.
optimizer = tf.keras.optimizers.SGD(learning_rate=args.lr)
criterion = ncc_loss # normalized_cross_correlation_loss() # or mse_loss
# Define the metrics to track training and testing losses.
m_train = tf.keras.metrics.Mean(name='loss_train')
m_test = tf.keras.metrics.Mean(name='loss_test')
# Train and evaluate the model.
for epoch in range(args.epochs):
m_train.reset_states()
m_test.reset_states()
for i, moving in enumerate(x_train):
loss_train = train_step(model, moving, static, criterion,
optimizer)
m_train.update_state(loss_train)
for i, moving in enumerate(x_test):
loss_test = test_step(model, moving, static, criterion)
m_test.update_state(loss_test)
print('Epoch: %3d/%d\tTrain Loss: %.6f\tTest Loss: %.6f'
% (epoch + 1, args.epochs, m_train.result(), m_test.result()))
print('\n')
# Show sample results.
plot_images(model, x_sample, static)
# Save the trained model.
if args.save_model:
model.save('saved_models/simple_cnn')
|
def simple_cnn(input_shape=(32, 32, 2)):
"""Creates a 2-D convolutional encoder-decoder network.
Parameters
----------
input_shape : sequence of ints, optional
Input data shape of the form (H, W, C). Default is (32, 32, 2).
Returns
-------
model
An instance of Keras' Model class.
Notes
-----
Given a concatenated pair of static and moving images as input, the
CNN computes a dense displacement field that is used to warp the
moving image to match with the static image.
The number of channels in the output (displacement field) is equal
to the dimensionality of the input data. For 3-D volumes, it is 3,
and for 2-D images, it is 2. The first channel comprises
displacement in the x-direction and the second comprises
displacement in the y-direction.
"""
out_channels = 2
inputs = layers.Input(shape=input_shape)
# encoder
x = layers.Conv2D(32, kernel_size=3, strides=2, padding='same',
activation='relu')(inputs) # 32 --> 16
x = layers.BatchNormalization()(x) # 16
x = layers.Conv2D(32, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 16
x = layers.BatchNormalization()(x) # 16
x = layers.MaxPool2D(pool_size=2)(x) # 16 --> 8
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 8
x = layers.BatchNormalization()(x) # 8
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 8
x = layers.BatchNormalization()(x) # 8
x = layers.MaxPool2D(pool_size=2)(x) # 8 --> 4
x = layers.Conv2D(128, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 4
x = layers.BatchNormalization()(x) # 4
# decoder
x = layers.Conv2DTranspose(64, kernel_size=2, strides=2,
padding='same')(x) # 4 --> 8
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 8
x = layers.BatchNormalization()(x) # 8
x = layers.Conv2DTranspose(32, kernel_size=2, strides=2,
padding='same')(x) # 8 --> 16
x = layers.Conv2D(32, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 16
x = layers.BatchNormalization()(x) # 16
x = layers.Conv2DTranspose(16, kernel_size=2, strides=2,
padding='same')(x) # 16 --> 32
x = layers.Conv2D(16, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 32
x = layers.BatchNormalization()(x) # 32
x = layers.Conv2D(out_channels, kernel_size=1, strides=1,
padding='same')(x) # 32
# Create the model.
model = tf.keras.Model(inputs, x, name='simple_cnn')
return model
"""
Differntiable image sampling
References:
1. https://github.com/tensorflow/models/blob/master/research/transformer/spatial_transformer.py
2. Jaderberg, Max, Karen Simonyan, and Andrew Zisserman. "Spatial
transformer networks." Advances in neural information processing
systems. 2015. https://arxiv.org/pdf/1506.02025.pdf
3. *Spatial* Transformer Networks by Kushagra Bhatnagar https://link.medium.com/0b2OrmqVO5
"""
| 98 | 178 |
# -*- coding: utf-8 -*-
"""poc.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1fTzz1aT2sb8oAXRO1-dr6O_IR6dof36e
A simple example for deep-learning-based non-rigid image registration
with the MNIST dataset.
**README:** If the below error occurs, run the whole notebook again (Ctrl+F9).
```
ValueError: tf.function-decorated function tried to create variables on non-first call.
```
"""
import tensorflow as tf
import tensorflow.keras.layers as layers
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
print(tf.keras.backend.image_data_format())
"""Loss functions"""
@tf.function
def mse_loss(static, moving):
"""Computes the mean squared error (MSE) loss.
Currently, only 4-D inputs are supported.
Parameters
----------
static : tf.Tensor, shape (N, H, W, C)
The static image to which the moving image is aligned.
moving : tf.Tensor, shape (N, H, W, C)
The moving image, the same shape as the static image.
Returns
-------
loss : tf.Tensor, shape ()
Mean squared error between the static and the moving images,
averaged over the batch.
"""
loss = tf.reduce_mean(tf.square(moving - static)) # shape ()
return loss
@tf.function
def ncc_loss(static, moving):
"""Computes the normalized cross-correlation (NCC) loss.
Currently, only 4-D inputs are supported.
Parameters
----------
static : tf.Tensor, shape (N, H, W, C)
The static image to which the moving image is aligned.
moving : tf.Tensor, shape (N, H, W, C)
The moving image, the same shape as the static image.
Returns
-------
loss : tf.Tensor, shape ()
Normalized cross-correlation loss between the static and the
moving images, averaged over the batch. Range is [-1.0, 1.0].
The best value is -1 (perfect match) and the worst is 1.
References
----------
.. [1] `Wikipedia entry for the Cross-correlation
<https://en.wikipedia.org/wiki/Cross-correlation>`_
"""
eps = tf.constant(1e-9, 'float32')
static_mean = tf.reduce_mean(static, axis=[1, 2], keepdims=True)
moving_mean = tf.reduce_mean(moving, axis=[1, 2], keepdims=True)
# shape (N, 1, 1, C)
static_std = tf.math.reduce_std(static, axis=[1, 2], keepdims=True)
moving_std = tf.math.reduce_std(moving, axis=[1, 2], keepdims=True)
# shape (N, 1, 1, C)
static_hat = (static - static_mean)/(static_std + eps)
moving_hat = (moving - moving_mean)/(moving_std + eps)
# shape (N, H, W, C)
ncc = tf.reduce_mean(static_hat * moving_hat) # shape ()
loss = -ncc
return loss
"""Define the model """
def simple_cnn(input_shape=(32, 32, 2)):
"""Creates a 2-D convolutional encoder-decoder network.
Parameters
----------
input_shape : sequence of ints, optional
Input data shape of the form (H, W, C). Default is (32, 32, 2).
Returns
-------
model
An instance of Keras' Model class.
Notes
-----
Given a concatenated pair of static and moving images as input, the
CNN computes a dense displacement field that is used to warp the
moving image to match with the static image.
The number of channels in the output (displacement field) is equal
to the dimensionality of the input data. For 3-D volumes, it is 3,
and for 2-D images, it is 2. The first channel comprises
displacement in the x-direction and the second comprises
displacement in the y-direction.
"""
out_channels = 2
inputs = layers.Input(shape=input_shape)
# encoder
x = layers.Conv2D(32, kernel_size=3, strides=2, padding='same',
activation='relu')(inputs) # 32 --> 16
x = layers.BatchNormalization()(x) # 16
x = layers.Conv2D(32, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 16
x = layers.BatchNormalization()(x) # 16
x = layers.MaxPool2D(pool_size=2)(x) # 16 --> 8
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 8
x = layers.BatchNormalization()(x) # 8
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 8
x = layers.BatchNormalization()(x) # 8
x = layers.MaxPool2D(pool_size=2)(x) # 8 --> 4
x = layers.Conv2D(128, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 4
x = layers.BatchNormalization()(x) # 4
# decoder
x = layers.Conv2DTranspose(64, kernel_size=2, strides=2,
padding='same')(x) # 4 --> 8
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 8
x = layers.BatchNormalization()(x) # 8
x = layers.Conv2DTranspose(32, kernel_size=2, strides=2,
padding='same')(x) # 8 --> 16
x = layers.Conv2D(32, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 16
x = layers.BatchNormalization()(x) # 16
x = layers.Conv2DTranspose(16, kernel_size=2, strides=2,
padding='same')(x) # 16 --> 32
x = layers.Conv2D(16, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 32
x = layers.BatchNormalization()(x) # 32
x = layers.Conv2D(out_channels, kernel_size=1, strides=1,
padding='same')(x) # 32
# Create the model.
model = tf.keras.Model(inputs, x, name='simple_cnn')
return model
"""
Differntiable image sampling
References:
1. https://github.com/tensorflow/models/blob/master/research/transformer/spatial_transformer.py
2. Jaderberg, Max, Karen Simonyan, and Andrew Zisserman. "Spatial
transformer networks." Advances in neural information processing
systems. 2015. https://arxiv.org/pdf/1506.02025.pdf
3. *Spatial* Transformer Networks by Kushagra Bhatnagar https://link.medium.com/0b2OrmqVO5
"""
@tf.function
def grid_sample(moving, grid):
"""Given a moving image and a sampling grid as input, computes the
transformed image by sampling the moving image at locations given by
the grid.
Currently, only 2-D images, i.e., 4-D inputs are supported.
Parameters
----------
moving : tf.Tensor, shape (N, H, W, C)
The moving image.
grid : tf.Tensor, shape (N, H, W, C)
A tensor of sampling points (x, y). The x and y values should be
normalized to [-1.0, 1.0] range.
Returns
-------
moved : tf.Tensor, shape (N, H, W, C)
The transformed image.
Notes
-----
Let M be the moving image of shape (H, W, C), T be the transformed
image of the same shape and G be the 2-D sampling grid of shape
(H, W, 2). The value of T at a location (x, y) is T[y, x, :] =
M[y', x', :] where [x', y'] = G[y, x, :].
Further, [x', y'] = [x + dx, y + dy] where [dx, dy] are the
displacements outputted by the CNN. When dx and dy are 0, the
sampling grid G is a regular grid and the transformed image is the
same as the moving image.
Since the sampling point (x + dx, y + dy) can be non-integral, the
value M[y', x'] is calculated using bi-linear interpolation.
References
----------
.. [1] `Jaderberg, Max, Karen Simonyan, and Andrew Zisserman. "Spatial
transformer networks." Advances in neural information processing
systems. 2015. <https://arxiv.org/abs/1506.02025>`_
.. [2] `TensorFlow implementation of spatial transformer networks.
<https://github.com/tensorflow/models/tree/master/research/transformer>`_
.. [3] `Spatial Transformer Networks by Kushagra Bhatnagar
<https://link.medium.com/0b2OrmqVO5>`_
"""
nb, nh, nw, nc = moving.shape
x = grid[..., 0] # shape (N, H, W)
y = grid[..., 1]
x = tf.cast(x, 'float32')
y = tf.cast(y, 'float32')
# Scale x and y from [-1.0, 1.0] to [0, W] and [0, H] respectively.
x = (x + 1.0) * 0.5 * tf.cast(nw, 'float32')
y = (y + 1.0) * 0.5 * tf.cast(nh, 'float32')
y_max = tf.cast(nh - 1, 'int32')
x_max = tf.cast(nw - 1, 'int32')
zero = tf.constant(0, 'int32')
# The value at (x, y) is a weighted average of the values at the
# four nearest integer locations: (x0, y0), (x1, y0), (x0, y1) and
# (x1, y1) where x0 = floor(x), x1 = ceil(x).
x0 = tf.cast(tf.floor(x), 'int32')
x1 = x0 + 1
y0 = tf.cast(tf.floor(y), 'int32')
y1 = y0 + 1
# Make sure indices are within the boundaries of the image.
x0 = tf.clip_by_value(x0, zero, x_max)
x1 = tf.clip_by_value(x1, zero, x_max)
y0 = tf.clip_by_value(y0, zero, y_max)
y1 = tf.clip_by_value(y1, zero, y_max)
# Collect indices of the four corners.
b = tf.ones_like(x0) * tf.reshape(tf.range(nb), [nb, 1, 1])
idx_a = tf.stack([b, y0, x0], axis=-1) # all top-left corners
idx_b = tf.stack([b, y1, x0], axis=-1) # all bottom-left corners
idx_c = tf.stack([b, y0, x1], axis=-1) # all top-right corners
idx_d = tf.stack([b, y1, x1], axis=-1) # all bottom-right corners
# shape (N, H, W, 3)
# Collect values at the corners.
moving_a = tf.gather_nd(moving, idx_a) # all top-left values
moving_b = tf.gather_nd(moving, idx_b) # all bottom-left values
moving_c = tf.gather_nd(moving, idx_c) # all top-right values
moving_d = tf.gather_nd(moving, idx_d) # all bottom-right values
# shape (N, H, W, C)
x0_f = tf.cast(x0, 'float32')
x1_f = tf.cast(x1, 'float32')
y0_f = tf.cast(y0, 'float32')
y1_f = tf.cast(y1, 'float32')
# Calculate the weights.
wa = tf.expand_dims((x1_f - x) * (y1_f - y), axis=-1)
wb = tf.expand_dims((x1_f - x) * (y - y0_f), axis=-1)
wc = tf.expand_dims((x - x0_f) * (y1_f - y), axis=-1)
wd = tf.expand_dims((x - x0_f) * (y - y0_f), axis=-1)
# Calculate the weighted sum.
moved = tf.add_n([wa * moving_a, wb * moving_b, wc * moving_c,
wd * moving_d])
return moved
@tf.function
def regular_grid(shape):
"""Returns a batch of 2-D regular grids.
Currently, only 2-D regular grids are supported.
Parameters
----------
shape : sequence of ints, shape (3, )
The desired regular grid shape of the form (N, H, W).
Returns
-------
grid : tf.Tensor, shape (N, H, W, 2)
A batch of 2-D regular grids, values normalized to [-1.0, 1.0]
range.
Notes
-----
Sampling using the regular grid is an identity transformation, i.e.,
it results in the same input and output images.
References
----------
.. [1] `NumPy, "numpy.meshgrid"
<https://numpy.org/doc/stable/reference/generated/numpy.meshgrid.html>`_
.. [2] `NumPy, "numpy.indices"
<https://numpy.org/doc/stable/reference/generated/numpy.indices.html>`_
"""
nb, nh, nw = shape
x = tf.linspace(-1.0, 1.0, nw) # shape (W, )
y = tf.linspace(-1.0, 1.0, nh) # shape (H, )
X, Y = tf.meshgrid(x, y) # shape (H, W), both X and Y
grid = tf.stack([X, Y], axis=-1)
grid = tf.expand_dims(grid, axis=0) # shape (1, H, W, 2)
# Repeat the grids along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
grid = tf.tile(grid, multiples)
return grid
"""Training and testing functions"""
@tf.function
def train_step(model, moving, static, criterion, optimizer):
"""A generic training procedure for one iteration.
Parameters
----------
model
A convolutional encoder-decoder network.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
criterion
The loss function.
optimizer
An optimzer.
Returns
-------
loss : tf.Tensor, shape ()
The average loss for the batch.
"""
nb, nh, nw, nc = moving.shape
# Repeat the static image along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
# Define the GradientTape context for automatic differentiation.
with tf.GradientTape() as tape:
# Get the deformation field
inputs = tf.concat([moving, static], axis=-1)
deformation = model(inputs)
# Compute the new sampling grid.
grid = regular_grid([nb, nh, nw])
grid_new = grid + deformation
grid_new = tf.clip_by_value(grid_new, -1, 1)
# Sample the moving image using the new sampling grid.
moved = grid_sample(moving, grid_new)
# Compute the loss.
loss = criterion(moved, static)
# Compute gradients.
grads = tape.gradient(loss, model.trainable_variables)
# Update the trainable parameters.
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
@tf.function
def test_step(model, moving, static, criterion):
"""A generic testing procedure.
Parameters
----------
model
A convolutional encoder-decoder network.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
criterion
The loss function.
Returns
-------
loss : tf.Tensor, shape ()
The average loss for the batch.
"""
nb, nh, nw, nc = moving.shape
# Repeat the static image along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
# Get the deformation field.
inputs = tf.concat([moving, static], axis=-1)
deformation = model(inputs, training=False)
# Compute the new sampling grid.
grid = regular_grid([nb, nh, nw])
grid_new = grid + deformation
grid_new = tf.clip_by_value(grid_new, -1, 1)
# Sample the moving image using the new sampling grid.
moved = grid_sample(moving, grid_new)
# Compute the loss.
loss = criterion(moved, static)
return loss
"""Data loading"""
def load_data(label=2):
"""Loads the MNIST dataset and preprocesses it: scales to [0.0, 1.0]
range, resizes the images from (28, 28) to (32, 32) and filters the
dataset to keep images of just one class.
Parameters
----------
label : {2, 0, 1, 3, 4, 5, 6, 7, 8, 9}, default 2
The class of images to train and test on.
Returns
-------
(x_train, x_test) : tuple of ndarrays
NumPy arrays of training and testing images.
"""
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# Discard digits which are not equal to label.
ids_train = np.where(y_train == label)
ids_test = np.where(y_test == label)
x_train = x_train[ids_train]
x_test = x_test[ids_test]
# Scale the image to [0, 1] range.
x_train = x_train.astype(np.float32) / 255.0
x_test = x_test.astype(np.float32) / 255.0
# Add the channel dim at the end. (N, H, W) --> (N, H, W, 1)
x_train = x_train[..., None]
x_test = x_test[..., None]
# Resize images from (28, 28) to (32, 32).
x_train = tf.image.resize(x_train, (32, 32))
x_test = tf.image.resize(x_test, (32, 32))
return x_train, x_test
"""Sample results"""
def plot_images(model, moving, static):
"""Visualize some images after training.
Parameters
----------
model
The trained model.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
"""
nb, nh, nw, nc = moving.shape
# Repeat the static image along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
# Get the deformation fields for the batch.
inputs = tf.concat([moving, static], axis=-1)
deformation = model(inputs, training=False)
# Compute the new sampling grids.
grid = regular_grid([nb, nh, nw])
grid_new = grid + deformation
grid_new = tf.clip_by_value(grid_new, -1, 1)
# Sample the moving images using the new sampling grids.
moved = grid_sample(moving, grid_new)
# Convert the tensors to 8-bit images.
moved = moved.numpy().squeeze(axis=-1) * 255.0
moved = moved.astype(np.uint8)
moving = moving.numpy().squeeze(axis=-1) * 255.0
moving = moving.astype(np.uint8)
static = static.numpy().squeeze(axis=-1) * 255.0
static = static.astype(np.uint8)
# Plot images.
fig = plt.figure(figsize=(3 * 1.7, nb * 1.7))
titles_list = ['Static', 'Moved', 'Moving']
images_list = [static, moved, moving]
for i in range(nb):
for j in range(3):
ax = fig.add_subplot(nb, 3, i * 3 + j + 1)
if i == 0:
ax.set_title(titles_list[j], fontsize=20)
ax.set_axis_off()
ax.imshow(images_list[j][i], cmap='gray')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
class Args():
batch_size = 8
epochs = 50
lr = 0.004
label = 7 # which digit images to train on?
num_samples = 5 # number of sample results to show
save_model = False
args = Args()
# Load preprocessed training and testing data.
x_train, x_test = load_data(label=args.label)
# Randomly select an image as the static image from the test set.
# idx = np.random.randint(x_test.shape[0])
# static = tf.expand_dims(x_test[idx], axis=0)
static = tf.expand_dims(x_test[0], axis=0)
# Select some images from the test set to show sample results.
# ids = tf.constant(np.random.choice(x_test.shape[0], replace=False,
# size=args.num_samples))
# x_sample = tf.gather(x_test, ids)
x_sample = x_test[:args.num_samples]
# Shuffle and batch the dataset.
from_tensor_slices = tf.data.Dataset.from_tensor_slices
# x_train = from_tensor_slices(x_train).shuffle(10000).batch(args.batch_size)
# x_test = from_tensor_slices(x_test).shuffle(10000).batch(args.batch_size)
x_train = from_tensor_slices(x_train).batch(args.batch_size)
x_test = from_tensor_slices(x_test).batch(args.batch_size)
# Create a model instance.
model = simple_cnn(input_shape=(32, 32, 2))
model.summary()
tf.keras.utils.plot_model(model, show_shapes=True, dpi=50)
# Select optimizer and loss function.
optimizer = tf.keras.optimizers.SGD(learning_rate=args.lr)
criterion = ncc_loss # normalized_cross_correlation_loss() # or mse_loss
# Define the metrics to track training and testing losses.
m_train = tf.keras.metrics.Mean(name='loss_train')
m_test = tf.keras.metrics.Mean(name='loss_test')
# Train and evaluate the model.
for epoch in range(args.epochs):
m_train.reset_states()
m_test.reset_states()
for i, moving in enumerate(x_train):
loss_train = train_step(model, moving, static, criterion,
optimizer)
m_train.update_state(loss_train)
for i, moving in enumerate(x_test):
loss_test = test_step(model, moving, static, criterion)
m_test.update_state(loss_test)
print('Epoch: %3d/%d\tTrain Loss: %.6f\tTest Loss: %.6f'
% (epoch + 1, args.epochs, m_train.result(), m_test.result()))
print('\n')
# Show sample results.
plot_images(model, x_sample, static)
# Save the trained model.
if args.save_model:
model.save('saved_models/simple_cnn')
|
regular_grid
|
Returns a batch of 2-D regular grids.
Currently, only 2-D regular grids are supported.
Parameters
----------
shape : sequence of ints, shape (3, )
The desired regular grid shape of the form (N, H, W).
Returns
-------
grid : tf.Tensor, shape (N, H, W, 2)
A batch of 2-D regular grids, values normalized to [-1.0, 1.0]
range.
Notes
-----
Sampling using the regular grid is an identity transformation, i.e.,
it results in the same input and output images.
References
----------
.. [1] `NumPy, "numpy.meshgrid"
<https://numpy.org/doc/stable/reference/generated/numpy.meshgrid.html>`_
.. [2] `NumPy, "numpy.indices"
<https://numpy.org/doc/stable/reference/generated/numpy.indices.html>`_
|
# -*- coding: utf-8 -*-
"""poc.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1fTzz1aT2sb8oAXRO1-dr6O_IR6dof36e
A simple example for deep-learning-based non-rigid image registration
with the MNIST dataset.
**README:** If the below error occurs, run the whole notebook again (Ctrl+F9).
```
ValueError: tf.function-decorated function tried to create variables on non-first call.
```
"""
import tensorflow as tf
import tensorflow.keras.layers as layers
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
print(tf.keras.backend.image_data_format())
"""Loss functions"""
@tf.function
def mse_loss(static, moving):
"""Computes the mean squared error (MSE) loss.
Currently, only 4-D inputs are supported.
Parameters
----------
static : tf.Tensor, shape (N, H, W, C)
The static image to which the moving image is aligned.
moving : tf.Tensor, shape (N, H, W, C)
The moving image, the same shape as the static image.
Returns
-------
loss : tf.Tensor, shape ()
Mean squared error between the static and the moving images,
averaged over the batch.
"""
loss = tf.reduce_mean(tf.square(moving - static)) # shape ()
return loss
@tf.function
def ncc_loss(static, moving):
"""Computes the normalized cross-correlation (NCC) loss.
Currently, only 4-D inputs are supported.
Parameters
----------
static : tf.Tensor, shape (N, H, W, C)
The static image to which the moving image is aligned.
moving : tf.Tensor, shape (N, H, W, C)
The moving image, the same shape as the static image.
Returns
-------
loss : tf.Tensor, shape ()
Normalized cross-correlation loss between the static and the
moving images, averaged over the batch. Range is [-1.0, 1.0].
The best value is -1 (perfect match) and the worst is 1.
References
----------
.. [1] `Wikipedia entry for the Cross-correlation
<https://en.wikipedia.org/wiki/Cross-correlation>`_
"""
eps = tf.constant(1e-9, 'float32')
static_mean = tf.reduce_mean(static, axis=[1, 2], keepdims=True)
moving_mean = tf.reduce_mean(moving, axis=[1, 2], keepdims=True)
# shape (N, 1, 1, C)
static_std = tf.math.reduce_std(static, axis=[1, 2], keepdims=True)
moving_std = tf.math.reduce_std(moving, axis=[1, 2], keepdims=True)
# shape (N, 1, 1, C)
static_hat = (static - static_mean)/(static_std + eps)
moving_hat = (moving - moving_mean)/(moving_std + eps)
# shape (N, H, W, C)
ncc = tf.reduce_mean(static_hat * moving_hat) # shape ()
loss = -ncc
return loss
"""Define the model """
def simple_cnn(input_shape=(32, 32, 2)):
"""Creates a 2-D convolutional encoder-decoder network.
Parameters
----------
input_shape : sequence of ints, optional
Input data shape of the form (H, W, C). Default is (32, 32, 2).
Returns
-------
model
An instance of Keras' Model class.
Notes
-----
Given a concatenated pair of static and moving images as input, the
CNN computes a dense displacement field that is used to warp the
moving image to match with the static image.
The number of channels in the output (displacement field) is equal
to the dimensionality of the input data. For 3-D volumes, it is 3,
and for 2-D images, it is 2. The first channel comprises
displacement in the x-direction and the second comprises
displacement in the y-direction.
"""
out_channels = 2
inputs = layers.Input(shape=input_shape)
# encoder
x = layers.Conv2D(32, kernel_size=3, strides=2, padding='same',
activation='relu')(inputs) # 32 --> 16
x = layers.BatchNormalization()(x) # 16
x = layers.Conv2D(32, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 16
x = layers.BatchNormalization()(x) # 16
x = layers.MaxPool2D(pool_size=2)(x) # 16 --> 8
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 8
x = layers.BatchNormalization()(x) # 8
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 8
x = layers.BatchNormalization()(x) # 8
x = layers.MaxPool2D(pool_size=2)(x) # 8 --> 4
x = layers.Conv2D(128, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 4
x = layers.BatchNormalization()(x) # 4
# decoder
x = layers.Conv2DTranspose(64, kernel_size=2, strides=2,
padding='same')(x) # 4 --> 8
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 8
x = layers.BatchNormalization()(x) # 8
x = layers.Conv2DTranspose(32, kernel_size=2, strides=2,
padding='same')(x) # 8 --> 16
x = layers.Conv2D(32, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 16
x = layers.BatchNormalization()(x) # 16
x = layers.Conv2DTranspose(16, kernel_size=2, strides=2,
padding='same')(x) # 16 --> 32
x = layers.Conv2D(16, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 32
x = layers.BatchNormalization()(x) # 32
x = layers.Conv2D(out_channels, kernel_size=1, strides=1,
padding='same')(x) # 32
# Create the model.
model = tf.keras.Model(inputs, x, name='simple_cnn')
return model
"""
Differntiable image sampling
References:
1. https://github.com/tensorflow/models/blob/master/research/transformer/spatial_transformer.py
2. Jaderberg, Max, Karen Simonyan, and Andrew Zisserman. "Spatial
transformer networks." Advances in neural information processing
systems. 2015. https://arxiv.org/pdf/1506.02025.pdf
3. *Spatial* Transformer Networks by Kushagra Bhatnagar https://link.medium.com/0b2OrmqVO5
"""
@tf.function
def grid_sample(moving, grid):
"""Given a moving image and a sampling grid as input, computes the
transformed image by sampling the moving image at locations given by
the grid.
Currently, only 2-D images, i.e., 4-D inputs are supported.
Parameters
----------
moving : tf.Tensor, shape (N, H, W, C)
The moving image.
grid : tf.Tensor, shape (N, H, W, C)
A tensor of sampling points (x, y). The x and y values should be
normalized to [-1.0, 1.0] range.
Returns
-------
moved : tf.Tensor, shape (N, H, W, C)
The transformed image.
Notes
-----
Let M be the moving image of shape (H, W, C), T be the transformed
image of the same shape and G be the 2-D sampling grid of shape
(H, W, 2). The value of T at a location (x, y) is T[y, x, :] =
M[y', x', :] where [x', y'] = G[y, x, :].
Further, [x', y'] = [x + dx, y + dy] where [dx, dy] are the
displacements outputted by the CNN. When dx and dy are 0, the
sampling grid G is a regular grid and the transformed image is the
same as the moving image.
Since the sampling point (x + dx, y + dy) can be non-integral, the
value M[y', x'] is calculated using bi-linear interpolation.
References
----------
.. [1] `Jaderberg, Max, Karen Simonyan, and Andrew Zisserman. "Spatial
transformer networks." Advances in neural information processing
systems. 2015. <https://arxiv.org/abs/1506.02025>`_
.. [2] `TensorFlow implementation of spatial transformer networks.
<https://github.com/tensorflow/models/tree/master/research/transformer>`_
.. [3] `Spatial Transformer Networks by Kushagra Bhatnagar
<https://link.medium.com/0b2OrmqVO5>`_
"""
nb, nh, nw, nc = moving.shape
x = grid[..., 0] # shape (N, H, W)
y = grid[..., 1]
x = tf.cast(x, 'float32')
y = tf.cast(y, 'float32')
# Scale x and y from [-1.0, 1.0] to [0, W] and [0, H] respectively.
x = (x + 1.0) * 0.5 * tf.cast(nw, 'float32')
y = (y + 1.0) * 0.5 * tf.cast(nh, 'float32')
y_max = tf.cast(nh - 1, 'int32')
x_max = tf.cast(nw - 1, 'int32')
zero = tf.constant(0, 'int32')
# The value at (x, y) is a weighted average of the values at the
# four nearest integer locations: (x0, y0), (x1, y0), (x0, y1) and
# (x1, y1) where x0 = floor(x), x1 = ceil(x).
x0 = tf.cast(tf.floor(x), 'int32')
x1 = x0 + 1
y0 = tf.cast(tf.floor(y), 'int32')
y1 = y0 + 1
# Make sure indices are within the boundaries of the image.
x0 = tf.clip_by_value(x0, zero, x_max)
x1 = tf.clip_by_value(x1, zero, x_max)
y0 = tf.clip_by_value(y0, zero, y_max)
y1 = tf.clip_by_value(y1, zero, y_max)
# Collect indices of the four corners.
b = tf.ones_like(x0) * tf.reshape(tf.range(nb), [nb, 1, 1])
idx_a = tf.stack([b, y0, x0], axis=-1) # all top-left corners
idx_b = tf.stack([b, y1, x0], axis=-1) # all bottom-left corners
idx_c = tf.stack([b, y0, x1], axis=-1) # all top-right corners
idx_d = tf.stack([b, y1, x1], axis=-1) # all bottom-right corners
# shape (N, H, W, 3)
# Collect values at the corners.
moving_a = tf.gather_nd(moving, idx_a) # all top-left values
moving_b = tf.gather_nd(moving, idx_b) # all bottom-left values
moving_c = tf.gather_nd(moving, idx_c) # all top-right values
moving_d = tf.gather_nd(moving, idx_d) # all bottom-right values
# shape (N, H, W, C)
x0_f = tf.cast(x0, 'float32')
x1_f = tf.cast(x1, 'float32')
y0_f = tf.cast(y0, 'float32')
y1_f = tf.cast(y1, 'float32')
# Calculate the weights.
wa = tf.expand_dims((x1_f - x) * (y1_f - y), axis=-1)
wb = tf.expand_dims((x1_f - x) * (y - y0_f), axis=-1)
wc = tf.expand_dims((x - x0_f) * (y1_f - y), axis=-1)
wd = tf.expand_dims((x - x0_f) * (y - y0_f), axis=-1)
# Calculate the weighted sum.
moved = tf.add_n([wa * moving_a, wb * moving_b, wc * moving_c,
wd * moving_d])
return moved
# MASKED: regular_grid function (lines 286-328)
"""Training and testing functions"""
@tf.function
def train_step(model, moving, static, criterion, optimizer):
"""A generic training procedure for one iteration.
Parameters
----------
model
A convolutional encoder-decoder network.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
criterion
The loss function.
optimizer
An optimzer.
Returns
-------
loss : tf.Tensor, shape ()
The average loss for the batch.
"""
nb, nh, nw, nc = moving.shape
# Repeat the static image along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
# Define the GradientTape context for automatic differentiation.
with tf.GradientTape() as tape:
# Get the deformation field
inputs = tf.concat([moving, static], axis=-1)
deformation = model(inputs)
# Compute the new sampling grid.
grid = regular_grid([nb, nh, nw])
grid_new = grid + deformation
grid_new = tf.clip_by_value(grid_new, -1, 1)
# Sample the moving image using the new sampling grid.
moved = grid_sample(moving, grid_new)
# Compute the loss.
loss = criterion(moved, static)
# Compute gradients.
grads = tape.gradient(loss, model.trainable_variables)
# Update the trainable parameters.
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
@tf.function
def test_step(model, moving, static, criterion):
"""A generic testing procedure.
Parameters
----------
model
A convolutional encoder-decoder network.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
criterion
The loss function.
Returns
-------
loss : tf.Tensor, shape ()
The average loss for the batch.
"""
nb, nh, nw, nc = moving.shape
# Repeat the static image along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
# Get the deformation field.
inputs = tf.concat([moving, static], axis=-1)
deformation = model(inputs, training=False)
# Compute the new sampling grid.
grid = regular_grid([nb, nh, nw])
grid_new = grid + deformation
grid_new = tf.clip_by_value(grid_new, -1, 1)
# Sample the moving image using the new sampling grid.
moved = grid_sample(moving, grid_new)
# Compute the loss.
loss = criterion(moved, static)
return loss
"""Data loading"""
def load_data(label=2):
"""Loads the MNIST dataset and preprocesses it: scales to [0.0, 1.0]
range, resizes the images from (28, 28) to (32, 32) and filters the
dataset to keep images of just one class.
Parameters
----------
label : {2, 0, 1, 3, 4, 5, 6, 7, 8, 9}, default 2
The class of images to train and test on.
Returns
-------
(x_train, x_test) : tuple of ndarrays
NumPy arrays of training and testing images.
"""
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# Discard digits which are not equal to label.
ids_train = np.where(y_train == label)
ids_test = np.where(y_test == label)
x_train = x_train[ids_train]
x_test = x_test[ids_test]
# Scale the image to [0, 1] range.
x_train = x_train.astype(np.float32) / 255.0
x_test = x_test.astype(np.float32) / 255.0
# Add the channel dim at the end. (N, H, W) --> (N, H, W, 1)
x_train = x_train[..., None]
x_test = x_test[..., None]
# Resize images from (28, 28) to (32, 32).
x_train = tf.image.resize(x_train, (32, 32))
x_test = tf.image.resize(x_test, (32, 32))
return x_train, x_test
"""Sample results"""
def plot_images(model, moving, static):
"""Visualize some images after training.
Parameters
----------
model
The trained model.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
"""
nb, nh, nw, nc = moving.shape
# Repeat the static image along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
# Get the deformation fields for the batch.
inputs = tf.concat([moving, static], axis=-1)
deformation = model(inputs, training=False)
# Compute the new sampling grids.
grid = regular_grid([nb, nh, nw])
grid_new = grid + deformation
grid_new = tf.clip_by_value(grid_new, -1, 1)
# Sample the moving images using the new sampling grids.
moved = grid_sample(moving, grid_new)
# Convert the tensors to 8-bit images.
moved = moved.numpy().squeeze(axis=-1) * 255.0
moved = moved.astype(np.uint8)
moving = moving.numpy().squeeze(axis=-1) * 255.0
moving = moving.astype(np.uint8)
static = static.numpy().squeeze(axis=-1) * 255.0
static = static.astype(np.uint8)
# Plot images.
fig = plt.figure(figsize=(3 * 1.7, nb * 1.7))
titles_list = ['Static', 'Moved', 'Moving']
images_list = [static, moved, moving]
for i in range(nb):
for j in range(3):
ax = fig.add_subplot(nb, 3, i * 3 + j + 1)
if i == 0:
ax.set_title(titles_list[j], fontsize=20)
ax.set_axis_off()
ax.imshow(images_list[j][i], cmap='gray')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
class Args():
batch_size = 8
epochs = 50
lr = 0.004
label = 7 # which digit images to train on?
num_samples = 5 # number of sample results to show
save_model = False
args = Args()
# Load preprocessed training and testing data.
x_train, x_test = load_data(label=args.label)
# Randomly select an image as the static image from the test set.
# idx = np.random.randint(x_test.shape[0])
# static = tf.expand_dims(x_test[idx], axis=0)
static = tf.expand_dims(x_test[0], axis=0)
# Select some images from the test set to show sample results.
# ids = tf.constant(np.random.choice(x_test.shape[0], replace=False,
# size=args.num_samples))
# x_sample = tf.gather(x_test, ids)
x_sample = x_test[:args.num_samples]
# Shuffle and batch the dataset.
from_tensor_slices = tf.data.Dataset.from_tensor_slices
# x_train = from_tensor_slices(x_train).shuffle(10000).batch(args.batch_size)
# x_test = from_tensor_slices(x_test).shuffle(10000).batch(args.batch_size)
x_train = from_tensor_slices(x_train).batch(args.batch_size)
x_test = from_tensor_slices(x_test).batch(args.batch_size)
# Create a model instance.
model = simple_cnn(input_shape=(32, 32, 2))
model.summary()
tf.keras.utils.plot_model(model, show_shapes=True, dpi=50)
# Select optimizer and loss function.
optimizer = tf.keras.optimizers.SGD(learning_rate=args.lr)
criterion = ncc_loss # normalized_cross_correlation_loss() # or mse_loss
# Define the metrics to track training and testing losses.
m_train = tf.keras.metrics.Mean(name='loss_train')
m_test = tf.keras.metrics.Mean(name='loss_test')
# Train and evaluate the model.
for epoch in range(args.epochs):
m_train.reset_states()
m_test.reset_states()
for i, moving in enumerate(x_train):
loss_train = train_step(model, moving, static, criterion,
optimizer)
m_train.update_state(loss_train)
for i, moving in enumerate(x_test):
loss_test = test_step(model, moving, static, criterion)
m_test.update_state(loss_test)
print('Epoch: %3d/%d\tTrain Loss: %.6f\tTest Loss: %.6f'
% (epoch + 1, args.epochs, m_train.result(), m_test.result()))
print('\n')
# Show sample results.
plot_images(model, x_sample, static)
# Save the trained model.
if args.save_model:
model.save('saved_models/simple_cnn')
|
@tf.function
def regular_grid(shape):
"""Returns a batch of 2-D regular grids.
Currently, only 2-D regular grids are supported.
Parameters
----------
shape : sequence of ints, shape (3, )
The desired regular grid shape of the form (N, H, W).
Returns
-------
grid : tf.Tensor, shape (N, H, W, 2)
A batch of 2-D regular grids, values normalized to [-1.0, 1.0]
range.
Notes
-----
Sampling using the regular grid is an identity transformation, i.e.,
it results in the same input and output images.
References
----------
.. [1] `NumPy, "numpy.meshgrid"
<https://numpy.org/doc/stable/reference/generated/numpy.meshgrid.html>`_
.. [2] `NumPy, "numpy.indices"
<https://numpy.org/doc/stable/reference/generated/numpy.indices.html>`_
"""
nb, nh, nw = shape
x = tf.linspace(-1.0, 1.0, nw) # shape (W, )
y = tf.linspace(-1.0, 1.0, nh) # shape (H, )
X, Y = tf.meshgrid(x, y) # shape (H, W), both X and Y
grid = tf.stack([X, Y], axis=-1)
grid = tf.expand_dims(grid, axis=0) # shape (1, H, W, 2)
# Repeat the grids along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
grid = tf.tile(grid, multiples)
return grid
| 286 | 328 |
# -*- coding: utf-8 -*-
"""poc.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1fTzz1aT2sb8oAXRO1-dr6O_IR6dof36e
A simple example for deep-learning-based non-rigid image registration
with the MNIST dataset.
**README:** If the below error occurs, run the whole notebook again (Ctrl+F9).
```
ValueError: tf.function-decorated function tried to create variables on non-first call.
```
"""
import tensorflow as tf
import tensorflow.keras.layers as layers
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
print(tf.keras.backend.image_data_format())
"""Loss functions"""
@tf.function
def mse_loss(static, moving):
"""Computes the mean squared error (MSE) loss.
Currently, only 4-D inputs are supported.
Parameters
----------
static : tf.Tensor, shape (N, H, W, C)
The static image to which the moving image is aligned.
moving : tf.Tensor, shape (N, H, W, C)
The moving image, the same shape as the static image.
Returns
-------
loss : tf.Tensor, shape ()
Mean squared error between the static and the moving images,
averaged over the batch.
"""
loss = tf.reduce_mean(tf.square(moving - static)) # shape ()
return loss
@tf.function
def ncc_loss(static, moving):
"""Computes the normalized cross-correlation (NCC) loss.
Currently, only 4-D inputs are supported.
Parameters
----------
static : tf.Tensor, shape (N, H, W, C)
The static image to which the moving image is aligned.
moving : tf.Tensor, shape (N, H, W, C)
The moving image, the same shape as the static image.
Returns
-------
loss : tf.Tensor, shape ()
Normalized cross-correlation loss between the static and the
moving images, averaged over the batch. Range is [-1.0, 1.0].
The best value is -1 (perfect match) and the worst is 1.
References
----------
.. [1] `Wikipedia entry for the Cross-correlation
<https://en.wikipedia.org/wiki/Cross-correlation>`_
"""
eps = tf.constant(1e-9, 'float32')
static_mean = tf.reduce_mean(static, axis=[1, 2], keepdims=True)
moving_mean = tf.reduce_mean(moving, axis=[1, 2], keepdims=True)
# shape (N, 1, 1, C)
static_std = tf.math.reduce_std(static, axis=[1, 2], keepdims=True)
moving_std = tf.math.reduce_std(moving, axis=[1, 2], keepdims=True)
# shape (N, 1, 1, C)
static_hat = (static - static_mean)/(static_std + eps)
moving_hat = (moving - moving_mean)/(moving_std + eps)
# shape (N, H, W, C)
ncc = tf.reduce_mean(static_hat * moving_hat) # shape ()
loss = -ncc
return loss
"""Define the model """
def simple_cnn(input_shape=(32, 32, 2)):
"""Creates a 2-D convolutional encoder-decoder network.
Parameters
----------
input_shape : sequence of ints, optional
Input data shape of the form (H, W, C). Default is (32, 32, 2).
Returns
-------
model
An instance of Keras' Model class.
Notes
-----
Given a concatenated pair of static and moving images as input, the
CNN computes a dense displacement field that is used to warp the
moving image to match with the static image.
The number of channels in the output (displacement field) is equal
to the dimensionality of the input data. For 3-D volumes, it is 3,
and for 2-D images, it is 2. The first channel comprises
displacement in the x-direction and the second comprises
displacement in the y-direction.
"""
out_channels = 2
inputs = layers.Input(shape=input_shape)
# encoder
x = layers.Conv2D(32, kernel_size=3, strides=2, padding='same',
activation='relu')(inputs) # 32 --> 16
x = layers.BatchNormalization()(x) # 16
x = layers.Conv2D(32, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 16
x = layers.BatchNormalization()(x) # 16
x = layers.MaxPool2D(pool_size=2)(x) # 16 --> 8
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 8
x = layers.BatchNormalization()(x) # 8
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 8
x = layers.BatchNormalization()(x) # 8
x = layers.MaxPool2D(pool_size=2)(x) # 8 --> 4
x = layers.Conv2D(128, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 4
x = layers.BatchNormalization()(x) # 4
# decoder
x = layers.Conv2DTranspose(64, kernel_size=2, strides=2,
padding='same')(x) # 4 --> 8
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 8
x = layers.BatchNormalization()(x) # 8
x = layers.Conv2DTranspose(32, kernel_size=2, strides=2,
padding='same')(x) # 8 --> 16
x = layers.Conv2D(32, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 16
x = layers.BatchNormalization()(x) # 16
x = layers.Conv2DTranspose(16, kernel_size=2, strides=2,
padding='same')(x) # 16 --> 32
x = layers.Conv2D(16, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 32
x = layers.BatchNormalization()(x) # 32
x = layers.Conv2D(out_channels, kernel_size=1, strides=1,
padding='same')(x) # 32
# Create the model.
model = tf.keras.Model(inputs, x, name='simple_cnn')
return model
"""
Differntiable image sampling
References:
1. https://github.com/tensorflow/models/blob/master/research/transformer/spatial_transformer.py
2. Jaderberg, Max, Karen Simonyan, and Andrew Zisserman. "Spatial
transformer networks." Advances in neural information processing
systems. 2015. https://arxiv.org/pdf/1506.02025.pdf
3. *Spatial* Transformer Networks by Kushagra Bhatnagar https://link.medium.com/0b2OrmqVO5
"""
@tf.function
def grid_sample(moving, grid):
"""Given a moving image and a sampling grid as input, computes the
transformed image by sampling the moving image at locations given by
the grid.
Currently, only 2-D images, i.e., 4-D inputs are supported.
Parameters
----------
moving : tf.Tensor, shape (N, H, W, C)
The moving image.
grid : tf.Tensor, shape (N, H, W, C)
A tensor of sampling points (x, y). The x and y values should be
normalized to [-1.0, 1.0] range.
Returns
-------
moved : tf.Tensor, shape (N, H, W, C)
The transformed image.
Notes
-----
Let M be the moving image of shape (H, W, C), T be the transformed
image of the same shape and G be the 2-D sampling grid of shape
(H, W, 2). The value of T at a location (x, y) is T[y, x, :] =
M[y', x', :] where [x', y'] = G[y, x, :].
Further, [x', y'] = [x + dx, y + dy] where [dx, dy] are the
displacements outputted by the CNN. When dx and dy are 0, the
sampling grid G is a regular grid and the transformed image is the
same as the moving image.
Since the sampling point (x + dx, y + dy) can be non-integral, the
value M[y', x'] is calculated using bi-linear interpolation.
References
----------
.. [1] `Jaderberg, Max, Karen Simonyan, and Andrew Zisserman. "Spatial
transformer networks." Advances in neural information processing
systems. 2015. <https://arxiv.org/abs/1506.02025>`_
.. [2] `TensorFlow implementation of spatial transformer networks.
<https://github.com/tensorflow/models/tree/master/research/transformer>`_
.. [3] `Spatial Transformer Networks by Kushagra Bhatnagar
<https://link.medium.com/0b2OrmqVO5>`_
"""
nb, nh, nw, nc = moving.shape
x = grid[..., 0] # shape (N, H, W)
y = grid[..., 1]
x = tf.cast(x, 'float32')
y = tf.cast(y, 'float32')
# Scale x and y from [-1.0, 1.0] to [0, W] and [0, H] respectively.
x = (x + 1.0) * 0.5 * tf.cast(nw, 'float32')
y = (y + 1.0) * 0.5 * tf.cast(nh, 'float32')
y_max = tf.cast(nh - 1, 'int32')
x_max = tf.cast(nw - 1, 'int32')
zero = tf.constant(0, 'int32')
# The value at (x, y) is a weighted average of the values at the
# four nearest integer locations: (x0, y0), (x1, y0), (x0, y1) and
# (x1, y1) where x0 = floor(x), x1 = ceil(x).
x0 = tf.cast(tf.floor(x), 'int32')
x1 = x0 + 1
y0 = tf.cast(tf.floor(y), 'int32')
y1 = y0 + 1
# Make sure indices are within the boundaries of the image.
x0 = tf.clip_by_value(x0, zero, x_max)
x1 = tf.clip_by_value(x1, zero, x_max)
y0 = tf.clip_by_value(y0, zero, y_max)
y1 = tf.clip_by_value(y1, zero, y_max)
# Collect indices of the four corners.
b = tf.ones_like(x0) * tf.reshape(tf.range(nb), [nb, 1, 1])
idx_a = tf.stack([b, y0, x0], axis=-1) # all top-left corners
idx_b = tf.stack([b, y1, x0], axis=-1) # all bottom-left corners
idx_c = tf.stack([b, y0, x1], axis=-1) # all top-right corners
idx_d = tf.stack([b, y1, x1], axis=-1) # all bottom-right corners
# shape (N, H, W, 3)
# Collect values at the corners.
moving_a = tf.gather_nd(moving, idx_a) # all top-left values
moving_b = tf.gather_nd(moving, idx_b) # all bottom-left values
moving_c = tf.gather_nd(moving, idx_c) # all top-right values
moving_d = tf.gather_nd(moving, idx_d) # all bottom-right values
# shape (N, H, W, C)
x0_f = tf.cast(x0, 'float32')
x1_f = tf.cast(x1, 'float32')
y0_f = tf.cast(y0, 'float32')
y1_f = tf.cast(y1, 'float32')
# Calculate the weights.
wa = tf.expand_dims((x1_f - x) * (y1_f - y), axis=-1)
wb = tf.expand_dims((x1_f - x) * (y - y0_f), axis=-1)
wc = tf.expand_dims((x - x0_f) * (y1_f - y), axis=-1)
wd = tf.expand_dims((x - x0_f) * (y - y0_f), axis=-1)
# Calculate the weighted sum.
moved = tf.add_n([wa * moving_a, wb * moving_b, wc * moving_c,
wd * moving_d])
return moved
@tf.function
def regular_grid(shape):
"""Returns a batch of 2-D regular grids.
Currently, only 2-D regular grids are supported.
Parameters
----------
shape : sequence of ints, shape (3, )
The desired regular grid shape of the form (N, H, W).
Returns
-------
grid : tf.Tensor, shape (N, H, W, 2)
A batch of 2-D regular grids, values normalized to [-1.0, 1.0]
range.
Notes
-----
Sampling using the regular grid is an identity transformation, i.e.,
it results in the same input and output images.
References
----------
.. [1] `NumPy, "numpy.meshgrid"
<https://numpy.org/doc/stable/reference/generated/numpy.meshgrid.html>`_
.. [2] `NumPy, "numpy.indices"
<https://numpy.org/doc/stable/reference/generated/numpy.indices.html>`_
"""
nb, nh, nw = shape
x = tf.linspace(-1.0, 1.0, nw) # shape (W, )
y = tf.linspace(-1.0, 1.0, nh) # shape (H, )
X, Y = tf.meshgrid(x, y) # shape (H, W), both X and Y
grid = tf.stack([X, Y], axis=-1)
grid = tf.expand_dims(grid, axis=0) # shape (1, H, W, 2)
# Repeat the grids along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
grid = tf.tile(grid, multiples)
return grid
"""Training and testing functions"""
@tf.function
def train_step(model, moving, static, criterion, optimizer):
"""A generic training procedure for one iteration.
Parameters
----------
model
A convolutional encoder-decoder network.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
criterion
The loss function.
optimizer
An optimzer.
Returns
-------
loss : tf.Tensor, shape ()
The average loss for the batch.
"""
nb, nh, nw, nc = moving.shape
# Repeat the static image along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
# Define the GradientTape context for automatic differentiation.
with tf.GradientTape() as tape:
# Get the deformation field
inputs = tf.concat([moving, static], axis=-1)
deformation = model(inputs)
# Compute the new sampling grid.
grid = regular_grid([nb, nh, nw])
grid_new = grid + deformation
grid_new = tf.clip_by_value(grid_new, -1, 1)
# Sample the moving image using the new sampling grid.
moved = grid_sample(moving, grid_new)
# Compute the loss.
loss = criterion(moved, static)
# Compute gradients.
grads = tape.gradient(loss, model.trainable_variables)
# Update the trainable parameters.
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
@tf.function
def test_step(model, moving, static, criterion):
"""A generic testing procedure.
Parameters
----------
model
A convolutional encoder-decoder network.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
criterion
The loss function.
Returns
-------
loss : tf.Tensor, shape ()
The average loss for the batch.
"""
nb, nh, nw, nc = moving.shape
# Repeat the static image along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
# Get the deformation field.
inputs = tf.concat([moving, static], axis=-1)
deformation = model(inputs, training=False)
# Compute the new sampling grid.
grid = regular_grid([nb, nh, nw])
grid_new = grid + deformation
grid_new = tf.clip_by_value(grid_new, -1, 1)
# Sample the moving image using the new sampling grid.
moved = grid_sample(moving, grid_new)
# Compute the loss.
loss = criterion(moved, static)
return loss
"""Data loading"""
def load_data(label=2):
"""Loads the MNIST dataset and preprocesses it: scales to [0.0, 1.0]
range, resizes the images from (28, 28) to (32, 32) and filters the
dataset to keep images of just one class.
Parameters
----------
label : {2, 0, 1, 3, 4, 5, 6, 7, 8, 9}, default 2
The class of images to train and test on.
Returns
-------
(x_train, x_test) : tuple of ndarrays
NumPy arrays of training and testing images.
"""
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# Discard digits which are not equal to label.
ids_train = np.where(y_train == label)
ids_test = np.where(y_test == label)
x_train = x_train[ids_train]
x_test = x_test[ids_test]
# Scale the image to [0, 1] range.
x_train = x_train.astype(np.float32) / 255.0
x_test = x_test.astype(np.float32) / 255.0
# Add the channel dim at the end. (N, H, W) --> (N, H, W, 1)
x_train = x_train[..., None]
x_test = x_test[..., None]
# Resize images from (28, 28) to (32, 32).
x_train = tf.image.resize(x_train, (32, 32))
x_test = tf.image.resize(x_test, (32, 32))
return x_train, x_test
"""Sample results"""
def plot_images(model, moving, static):
"""Visualize some images after training.
Parameters
----------
model
The trained model.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
"""
nb, nh, nw, nc = moving.shape
# Repeat the static image along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
# Get the deformation fields for the batch.
inputs = tf.concat([moving, static], axis=-1)
deformation = model(inputs, training=False)
# Compute the new sampling grids.
grid = regular_grid([nb, nh, nw])
grid_new = grid + deformation
grid_new = tf.clip_by_value(grid_new, -1, 1)
# Sample the moving images using the new sampling grids.
moved = grid_sample(moving, grid_new)
# Convert the tensors to 8-bit images.
moved = moved.numpy().squeeze(axis=-1) * 255.0
moved = moved.astype(np.uint8)
moving = moving.numpy().squeeze(axis=-1) * 255.0
moving = moving.astype(np.uint8)
static = static.numpy().squeeze(axis=-1) * 255.0
static = static.astype(np.uint8)
# Plot images.
fig = plt.figure(figsize=(3 * 1.7, nb * 1.7))
titles_list = ['Static', 'Moved', 'Moving']
images_list = [static, moved, moving]
for i in range(nb):
for j in range(3):
ax = fig.add_subplot(nb, 3, i * 3 + j + 1)
if i == 0:
ax.set_title(titles_list[j], fontsize=20)
ax.set_axis_off()
ax.imshow(images_list[j][i], cmap='gray')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
class Args():
batch_size = 8
epochs = 50
lr = 0.004
label = 7 # which digit images to train on?
num_samples = 5 # number of sample results to show
save_model = False
args = Args()
# Load preprocessed training and testing data.
x_train, x_test = load_data(label=args.label)
# Randomly select an image as the static image from the test set.
# idx = np.random.randint(x_test.shape[0])
# static = tf.expand_dims(x_test[idx], axis=0)
static = tf.expand_dims(x_test[0], axis=0)
# Select some images from the test set to show sample results.
# ids = tf.constant(np.random.choice(x_test.shape[0], replace=False,
# size=args.num_samples))
# x_sample = tf.gather(x_test, ids)
x_sample = x_test[:args.num_samples]
# Shuffle and batch the dataset.
from_tensor_slices = tf.data.Dataset.from_tensor_slices
# x_train = from_tensor_slices(x_train).shuffle(10000).batch(args.batch_size)
# x_test = from_tensor_slices(x_test).shuffle(10000).batch(args.batch_size)
x_train = from_tensor_slices(x_train).batch(args.batch_size)
x_test = from_tensor_slices(x_test).batch(args.batch_size)
# Create a model instance.
model = simple_cnn(input_shape=(32, 32, 2))
model.summary()
tf.keras.utils.plot_model(model, show_shapes=True, dpi=50)
# Select optimizer and loss function.
optimizer = tf.keras.optimizers.SGD(learning_rate=args.lr)
criterion = ncc_loss # normalized_cross_correlation_loss() # or mse_loss
# Define the metrics to track training and testing losses.
m_train = tf.keras.metrics.Mean(name='loss_train')
m_test = tf.keras.metrics.Mean(name='loss_test')
# Train and evaluate the model.
for epoch in range(args.epochs):
m_train.reset_states()
m_test.reset_states()
for i, moving in enumerate(x_train):
loss_train = train_step(model, moving, static, criterion,
optimizer)
m_train.update_state(loss_train)
for i, moving in enumerate(x_test):
loss_test = test_step(model, moving, static, criterion)
m_test.update_state(loss_test)
print('Epoch: %3d/%d\tTrain Loss: %.6f\tTest Loss: %.6f'
% (epoch + 1, args.epochs, m_train.result(), m_test.result()))
print('\n')
# Show sample results.
plot_images(model, x_sample, static)
# Save the trained model.
if args.save_model:
model.save('saved_models/simple_cnn')
|
test_step
|
A generic testing procedure.
Parameters
----------
model
A convolutional encoder-decoder network.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
criterion
The loss function.
Returns
-------
loss : tf.Tensor, shape ()
The average loss for the batch.
|
# -*- coding: utf-8 -*-
"""poc.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1fTzz1aT2sb8oAXRO1-dr6O_IR6dof36e
A simple example for deep-learning-based non-rigid image registration
with the MNIST dataset.
**README:** If the below error occurs, run the whole notebook again (Ctrl+F9).
```
ValueError: tf.function-decorated function tried to create variables on non-first call.
```
"""
import tensorflow as tf
import tensorflow.keras.layers as layers
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
print(tf.keras.backend.image_data_format())
"""Loss functions"""
@tf.function
def mse_loss(static, moving):
"""Computes the mean squared error (MSE) loss.
Currently, only 4-D inputs are supported.
Parameters
----------
static : tf.Tensor, shape (N, H, W, C)
The static image to which the moving image is aligned.
moving : tf.Tensor, shape (N, H, W, C)
The moving image, the same shape as the static image.
Returns
-------
loss : tf.Tensor, shape ()
Mean squared error between the static and the moving images,
averaged over the batch.
"""
loss = tf.reduce_mean(tf.square(moving - static)) # shape ()
return loss
@tf.function
def ncc_loss(static, moving):
"""Computes the normalized cross-correlation (NCC) loss.
Currently, only 4-D inputs are supported.
Parameters
----------
static : tf.Tensor, shape (N, H, W, C)
The static image to which the moving image is aligned.
moving : tf.Tensor, shape (N, H, W, C)
The moving image, the same shape as the static image.
Returns
-------
loss : tf.Tensor, shape ()
Normalized cross-correlation loss between the static and the
moving images, averaged over the batch. Range is [-1.0, 1.0].
The best value is -1 (perfect match) and the worst is 1.
References
----------
.. [1] `Wikipedia entry for the Cross-correlation
<https://en.wikipedia.org/wiki/Cross-correlation>`_
"""
eps = tf.constant(1e-9, 'float32')
static_mean = tf.reduce_mean(static, axis=[1, 2], keepdims=True)
moving_mean = tf.reduce_mean(moving, axis=[1, 2], keepdims=True)
# shape (N, 1, 1, C)
static_std = tf.math.reduce_std(static, axis=[1, 2], keepdims=True)
moving_std = tf.math.reduce_std(moving, axis=[1, 2], keepdims=True)
# shape (N, 1, 1, C)
static_hat = (static - static_mean)/(static_std + eps)
moving_hat = (moving - moving_mean)/(moving_std + eps)
# shape (N, H, W, C)
ncc = tf.reduce_mean(static_hat * moving_hat) # shape ()
loss = -ncc
return loss
"""Define the model """
def simple_cnn(input_shape=(32, 32, 2)):
"""Creates a 2-D convolutional encoder-decoder network.
Parameters
----------
input_shape : sequence of ints, optional
Input data shape of the form (H, W, C). Default is (32, 32, 2).
Returns
-------
model
An instance of Keras' Model class.
Notes
-----
Given a concatenated pair of static and moving images as input, the
CNN computes a dense displacement field that is used to warp the
moving image to match with the static image.
The number of channels in the output (displacement field) is equal
to the dimensionality of the input data. For 3-D volumes, it is 3,
and for 2-D images, it is 2. The first channel comprises
displacement in the x-direction and the second comprises
displacement in the y-direction.
"""
out_channels = 2
inputs = layers.Input(shape=input_shape)
# encoder
x = layers.Conv2D(32, kernel_size=3, strides=2, padding='same',
activation='relu')(inputs) # 32 --> 16
x = layers.BatchNormalization()(x) # 16
x = layers.Conv2D(32, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 16
x = layers.BatchNormalization()(x) # 16
x = layers.MaxPool2D(pool_size=2)(x) # 16 --> 8
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 8
x = layers.BatchNormalization()(x) # 8
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 8
x = layers.BatchNormalization()(x) # 8
x = layers.MaxPool2D(pool_size=2)(x) # 8 --> 4
x = layers.Conv2D(128, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 4
x = layers.BatchNormalization()(x) # 4
# decoder
x = layers.Conv2DTranspose(64, kernel_size=2, strides=2,
padding='same')(x) # 4 --> 8
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 8
x = layers.BatchNormalization()(x) # 8
x = layers.Conv2DTranspose(32, kernel_size=2, strides=2,
padding='same')(x) # 8 --> 16
x = layers.Conv2D(32, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 16
x = layers.BatchNormalization()(x) # 16
x = layers.Conv2DTranspose(16, kernel_size=2, strides=2,
padding='same')(x) # 16 --> 32
x = layers.Conv2D(16, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 32
x = layers.BatchNormalization()(x) # 32
x = layers.Conv2D(out_channels, kernel_size=1, strides=1,
padding='same')(x) # 32
# Create the model.
model = tf.keras.Model(inputs, x, name='simple_cnn')
return model
"""
Differntiable image sampling
References:
1. https://github.com/tensorflow/models/blob/master/research/transformer/spatial_transformer.py
2. Jaderberg, Max, Karen Simonyan, and Andrew Zisserman. "Spatial
transformer networks." Advances in neural information processing
systems. 2015. https://arxiv.org/pdf/1506.02025.pdf
3. *Spatial* Transformer Networks by Kushagra Bhatnagar https://link.medium.com/0b2OrmqVO5
"""
@tf.function
def grid_sample(moving, grid):
"""Given a moving image and a sampling grid as input, computes the
transformed image by sampling the moving image at locations given by
the grid.
Currently, only 2-D images, i.e., 4-D inputs are supported.
Parameters
----------
moving : tf.Tensor, shape (N, H, W, C)
The moving image.
grid : tf.Tensor, shape (N, H, W, C)
A tensor of sampling points (x, y). The x and y values should be
normalized to [-1.0, 1.0] range.
Returns
-------
moved : tf.Tensor, shape (N, H, W, C)
The transformed image.
Notes
-----
Let M be the moving image of shape (H, W, C), T be the transformed
image of the same shape and G be the 2-D sampling grid of shape
(H, W, 2). The value of T at a location (x, y) is T[y, x, :] =
M[y', x', :] where [x', y'] = G[y, x, :].
Further, [x', y'] = [x + dx, y + dy] where [dx, dy] are the
displacements outputted by the CNN. When dx and dy are 0, the
sampling grid G is a regular grid and the transformed image is the
same as the moving image.
Since the sampling point (x + dx, y + dy) can be non-integral, the
value M[y', x'] is calculated using bi-linear interpolation.
References
----------
.. [1] `Jaderberg, Max, Karen Simonyan, and Andrew Zisserman. "Spatial
transformer networks." Advances in neural information processing
systems. 2015. <https://arxiv.org/abs/1506.02025>`_
.. [2] `TensorFlow implementation of spatial transformer networks.
<https://github.com/tensorflow/models/tree/master/research/transformer>`_
.. [3] `Spatial Transformer Networks by Kushagra Bhatnagar
<https://link.medium.com/0b2OrmqVO5>`_
"""
nb, nh, nw, nc = moving.shape
x = grid[..., 0] # shape (N, H, W)
y = grid[..., 1]
x = tf.cast(x, 'float32')
y = tf.cast(y, 'float32')
# Scale x and y from [-1.0, 1.0] to [0, W] and [0, H] respectively.
x = (x + 1.0) * 0.5 * tf.cast(nw, 'float32')
y = (y + 1.0) * 0.5 * tf.cast(nh, 'float32')
y_max = tf.cast(nh - 1, 'int32')
x_max = tf.cast(nw - 1, 'int32')
zero = tf.constant(0, 'int32')
# The value at (x, y) is a weighted average of the values at the
# four nearest integer locations: (x0, y0), (x1, y0), (x0, y1) and
# (x1, y1) where x0 = floor(x), x1 = ceil(x).
x0 = tf.cast(tf.floor(x), 'int32')
x1 = x0 + 1
y0 = tf.cast(tf.floor(y), 'int32')
y1 = y0 + 1
# Make sure indices are within the boundaries of the image.
x0 = tf.clip_by_value(x0, zero, x_max)
x1 = tf.clip_by_value(x1, zero, x_max)
y0 = tf.clip_by_value(y0, zero, y_max)
y1 = tf.clip_by_value(y1, zero, y_max)
# Collect indices of the four corners.
b = tf.ones_like(x0) * tf.reshape(tf.range(nb), [nb, 1, 1])
idx_a = tf.stack([b, y0, x0], axis=-1) # all top-left corners
idx_b = tf.stack([b, y1, x0], axis=-1) # all bottom-left corners
idx_c = tf.stack([b, y0, x1], axis=-1) # all top-right corners
idx_d = tf.stack([b, y1, x1], axis=-1) # all bottom-right corners
# shape (N, H, W, 3)
# Collect values at the corners.
moving_a = tf.gather_nd(moving, idx_a) # all top-left values
moving_b = tf.gather_nd(moving, idx_b) # all bottom-left values
moving_c = tf.gather_nd(moving, idx_c) # all top-right values
moving_d = tf.gather_nd(moving, idx_d) # all bottom-right values
# shape (N, H, W, C)
x0_f = tf.cast(x0, 'float32')
x1_f = tf.cast(x1, 'float32')
y0_f = tf.cast(y0, 'float32')
y1_f = tf.cast(y1, 'float32')
# Calculate the weights.
wa = tf.expand_dims((x1_f - x) * (y1_f - y), axis=-1)
wb = tf.expand_dims((x1_f - x) * (y - y0_f), axis=-1)
wc = tf.expand_dims((x - x0_f) * (y1_f - y), axis=-1)
wd = tf.expand_dims((x - x0_f) * (y - y0_f), axis=-1)
# Calculate the weighted sum.
moved = tf.add_n([wa * moving_a, wb * moving_b, wc * moving_c,
wd * moving_d])
return moved
@tf.function
def regular_grid(shape):
"""Returns a batch of 2-D regular grids.
Currently, only 2-D regular grids are supported.
Parameters
----------
shape : sequence of ints, shape (3, )
The desired regular grid shape of the form (N, H, W).
Returns
-------
grid : tf.Tensor, shape (N, H, W, 2)
A batch of 2-D regular grids, values normalized to [-1.0, 1.0]
range.
Notes
-----
Sampling using the regular grid is an identity transformation, i.e.,
it results in the same input and output images.
References
----------
.. [1] `NumPy, "numpy.meshgrid"
<https://numpy.org/doc/stable/reference/generated/numpy.meshgrid.html>`_
.. [2] `NumPy, "numpy.indices"
<https://numpy.org/doc/stable/reference/generated/numpy.indices.html>`_
"""
nb, nh, nw = shape
x = tf.linspace(-1.0, 1.0, nw) # shape (W, )
y = tf.linspace(-1.0, 1.0, nh) # shape (H, )
X, Y = tf.meshgrid(x, y) # shape (H, W), both X and Y
grid = tf.stack([X, Y], axis=-1)
grid = tf.expand_dims(grid, axis=0) # shape (1, H, W, 2)
# Repeat the grids along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
grid = tf.tile(grid, multiples)
return grid
"""Training and testing functions"""
@tf.function
def train_step(model, moving, static, criterion, optimizer):
"""A generic training procedure for one iteration.
Parameters
----------
model
A convolutional encoder-decoder network.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
criterion
The loss function.
optimizer
An optimzer.
Returns
-------
loss : tf.Tensor, shape ()
The average loss for the batch.
"""
nb, nh, nw, nc = moving.shape
# Repeat the static image along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
# Define the GradientTape context for automatic differentiation.
with tf.GradientTape() as tape:
# Get the deformation field
inputs = tf.concat([moving, static], axis=-1)
deformation = model(inputs)
# Compute the new sampling grid.
grid = regular_grid([nb, nh, nw])
grid_new = grid + deformation
grid_new = tf.clip_by_value(grid_new, -1, 1)
# Sample the moving image using the new sampling grid.
moved = grid_sample(moving, grid_new)
# Compute the loss.
loss = criterion(moved, static)
# Compute gradients.
grads = tape.gradient(loss, model.trainable_variables)
# Update the trainable parameters.
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
# MASKED: test_step function (lines 383-423)
"""Data loading"""
def load_data(label=2):
"""Loads the MNIST dataset and preprocesses it: scales to [0.0, 1.0]
range, resizes the images from (28, 28) to (32, 32) and filters the
dataset to keep images of just one class.
Parameters
----------
label : {2, 0, 1, 3, 4, 5, 6, 7, 8, 9}, default 2
The class of images to train and test on.
Returns
-------
(x_train, x_test) : tuple of ndarrays
NumPy arrays of training and testing images.
"""
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# Discard digits which are not equal to label.
ids_train = np.where(y_train == label)
ids_test = np.where(y_test == label)
x_train = x_train[ids_train]
x_test = x_test[ids_test]
# Scale the image to [0, 1] range.
x_train = x_train.astype(np.float32) / 255.0
x_test = x_test.astype(np.float32) / 255.0
# Add the channel dim at the end. (N, H, W) --> (N, H, W, 1)
x_train = x_train[..., None]
x_test = x_test[..., None]
# Resize images from (28, 28) to (32, 32).
x_train = tf.image.resize(x_train, (32, 32))
x_test = tf.image.resize(x_test, (32, 32))
return x_train, x_test
"""Sample results"""
def plot_images(model, moving, static):
"""Visualize some images after training.
Parameters
----------
model
The trained model.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
"""
nb, nh, nw, nc = moving.shape
# Repeat the static image along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
# Get the deformation fields for the batch.
inputs = tf.concat([moving, static], axis=-1)
deformation = model(inputs, training=False)
# Compute the new sampling grids.
grid = regular_grid([nb, nh, nw])
grid_new = grid + deformation
grid_new = tf.clip_by_value(grid_new, -1, 1)
# Sample the moving images using the new sampling grids.
moved = grid_sample(moving, grid_new)
# Convert the tensors to 8-bit images.
moved = moved.numpy().squeeze(axis=-1) * 255.0
moved = moved.astype(np.uint8)
moving = moving.numpy().squeeze(axis=-1) * 255.0
moving = moving.astype(np.uint8)
static = static.numpy().squeeze(axis=-1) * 255.0
static = static.astype(np.uint8)
# Plot images.
fig = plt.figure(figsize=(3 * 1.7, nb * 1.7))
titles_list = ['Static', 'Moved', 'Moving']
images_list = [static, moved, moving]
for i in range(nb):
for j in range(3):
ax = fig.add_subplot(nb, 3, i * 3 + j + 1)
if i == 0:
ax.set_title(titles_list[j], fontsize=20)
ax.set_axis_off()
ax.imshow(images_list[j][i], cmap='gray')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
class Args():
batch_size = 8
epochs = 50
lr = 0.004
label = 7 # which digit images to train on?
num_samples = 5 # number of sample results to show
save_model = False
args = Args()
# Load preprocessed training and testing data.
x_train, x_test = load_data(label=args.label)
# Randomly select an image as the static image from the test set.
# idx = np.random.randint(x_test.shape[0])
# static = tf.expand_dims(x_test[idx], axis=0)
static = tf.expand_dims(x_test[0], axis=0)
# Select some images from the test set to show sample results.
# ids = tf.constant(np.random.choice(x_test.shape[0], replace=False,
# size=args.num_samples))
# x_sample = tf.gather(x_test, ids)
x_sample = x_test[:args.num_samples]
# Shuffle and batch the dataset.
from_tensor_slices = tf.data.Dataset.from_tensor_slices
# x_train = from_tensor_slices(x_train).shuffle(10000).batch(args.batch_size)
# x_test = from_tensor_slices(x_test).shuffle(10000).batch(args.batch_size)
x_train = from_tensor_slices(x_train).batch(args.batch_size)
x_test = from_tensor_slices(x_test).batch(args.batch_size)
# Create a model instance.
model = simple_cnn(input_shape=(32, 32, 2))
model.summary()
tf.keras.utils.plot_model(model, show_shapes=True, dpi=50)
# Select optimizer and loss function.
optimizer = tf.keras.optimizers.SGD(learning_rate=args.lr)
criterion = ncc_loss # normalized_cross_correlation_loss() # or mse_loss
# Define the metrics to track training and testing losses.
m_train = tf.keras.metrics.Mean(name='loss_train')
m_test = tf.keras.metrics.Mean(name='loss_test')
# Train and evaluate the model.
for epoch in range(args.epochs):
m_train.reset_states()
m_test.reset_states()
for i, moving in enumerate(x_train):
loss_train = train_step(model, moving, static, criterion,
optimizer)
m_train.update_state(loss_train)
for i, moving in enumerate(x_test):
loss_test = test_step(model, moving, static, criterion)
m_test.update_state(loss_test)
print('Epoch: %3d/%d\tTrain Loss: %.6f\tTest Loss: %.6f'
% (epoch + 1, args.epochs, m_train.result(), m_test.result()))
print('\n')
# Show sample results.
plot_images(model, x_sample, static)
# Save the trained model.
if args.save_model:
model.save('saved_models/simple_cnn')
|
@tf.function
def test_step(model, moving, static, criterion):
"""A generic testing procedure.
Parameters
----------
model
A convolutional encoder-decoder network.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
criterion
The loss function.
Returns
-------
loss : tf.Tensor, shape ()
The average loss for the batch.
"""
nb, nh, nw, nc = moving.shape
# Repeat the static image along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
# Get the deformation field.
inputs = tf.concat([moving, static], axis=-1)
deformation = model(inputs, training=False)
# Compute the new sampling grid.
grid = regular_grid([nb, nh, nw])
grid_new = grid + deformation
grid_new = tf.clip_by_value(grid_new, -1, 1)
# Sample the moving image using the new sampling grid.
moved = grid_sample(moving, grid_new)
# Compute the loss.
loss = criterion(moved, static)
return loss
| 383 | 423 |
# -*- coding: utf-8 -*-
"""poc.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1fTzz1aT2sb8oAXRO1-dr6O_IR6dof36e
A simple example for deep-learning-based non-rigid image registration
with the MNIST dataset.
**README:** If the below error occurs, run the whole notebook again (Ctrl+F9).
```
ValueError: tf.function-decorated function tried to create variables on non-first call.
```
"""
import tensorflow as tf
import tensorflow.keras.layers as layers
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
print(tf.keras.backend.image_data_format())
"""Loss functions"""
@tf.function
def mse_loss(static, moving):
"""Computes the mean squared error (MSE) loss.
Currently, only 4-D inputs are supported.
Parameters
----------
static : tf.Tensor, shape (N, H, W, C)
The static image to which the moving image is aligned.
moving : tf.Tensor, shape (N, H, W, C)
The moving image, the same shape as the static image.
Returns
-------
loss : tf.Tensor, shape ()
Mean squared error between the static and the moving images,
averaged over the batch.
"""
loss = tf.reduce_mean(tf.square(moving - static)) # shape ()
return loss
@tf.function
def ncc_loss(static, moving):
"""Computes the normalized cross-correlation (NCC) loss.
Currently, only 4-D inputs are supported.
Parameters
----------
static : tf.Tensor, shape (N, H, W, C)
The static image to which the moving image is aligned.
moving : tf.Tensor, shape (N, H, W, C)
The moving image, the same shape as the static image.
Returns
-------
loss : tf.Tensor, shape ()
Normalized cross-correlation loss between the static and the
moving images, averaged over the batch. Range is [-1.0, 1.0].
The best value is -1 (perfect match) and the worst is 1.
References
----------
.. [1] `Wikipedia entry for the Cross-correlation
<https://en.wikipedia.org/wiki/Cross-correlation>`_
"""
eps = tf.constant(1e-9, 'float32')
static_mean = tf.reduce_mean(static, axis=[1, 2], keepdims=True)
moving_mean = tf.reduce_mean(moving, axis=[1, 2], keepdims=True)
# shape (N, 1, 1, C)
static_std = tf.math.reduce_std(static, axis=[1, 2], keepdims=True)
moving_std = tf.math.reduce_std(moving, axis=[1, 2], keepdims=True)
# shape (N, 1, 1, C)
static_hat = (static - static_mean)/(static_std + eps)
moving_hat = (moving - moving_mean)/(moving_std + eps)
# shape (N, H, W, C)
ncc = tf.reduce_mean(static_hat * moving_hat) # shape ()
loss = -ncc
return loss
"""Define the model """
def simple_cnn(input_shape=(32, 32, 2)):
"""Creates a 2-D convolutional encoder-decoder network.
Parameters
----------
input_shape : sequence of ints, optional
Input data shape of the form (H, W, C). Default is (32, 32, 2).
Returns
-------
model
An instance of Keras' Model class.
Notes
-----
Given a concatenated pair of static and moving images as input, the
CNN computes a dense displacement field that is used to warp the
moving image to match with the static image.
The number of channels in the output (displacement field) is equal
to the dimensionality of the input data. For 3-D volumes, it is 3,
and for 2-D images, it is 2. The first channel comprises
displacement in the x-direction and the second comprises
displacement in the y-direction.
"""
out_channels = 2
inputs = layers.Input(shape=input_shape)
# encoder
x = layers.Conv2D(32, kernel_size=3, strides=2, padding='same',
activation='relu')(inputs) # 32 --> 16
x = layers.BatchNormalization()(x) # 16
x = layers.Conv2D(32, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 16
x = layers.BatchNormalization()(x) # 16
x = layers.MaxPool2D(pool_size=2)(x) # 16 --> 8
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 8
x = layers.BatchNormalization()(x) # 8
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 8
x = layers.BatchNormalization()(x) # 8
x = layers.MaxPool2D(pool_size=2)(x) # 8 --> 4
x = layers.Conv2D(128, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 4
x = layers.BatchNormalization()(x) # 4
# decoder
x = layers.Conv2DTranspose(64, kernel_size=2, strides=2,
padding='same')(x) # 4 --> 8
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 8
x = layers.BatchNormalization()(x) # 8
x = layers.Conv2DTranspose(32, kernel_size=2, strides=2,
padding='same')(x) # 8 --> 16
x = layers.Conv2D(32, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 16
x = layers.BatchNormalization()(x) # 16
x = layers.Conv2DTranspose(16, kernel_size=2, strides=2,
padding='same')(x) # 16 --> 32
x = layers.Conv2D(16, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 32
x = layers.BatchNormalization()(x) # 32
x = layers.Conv2D(out_channels, kernel_size=1, strides=1,
padding='same')(x) # 32
# Create the model.
model = tf.keras.Model(inputs, x, name='simple_cnn')
return model
"""
Differntiable image sampling
References:
1. https://github.com/tensorflow/models/blob/master/research/transformer/spatial_transformer.py
2. Jaderberg, Max, Karen Simonyan, and Andrew Zisserman. "Spatial
transformer networks." Advances in neural information processing
systems. 2015. https://arxiv.org/pdf/1506.02025.pdf
3. *Spatial* Transformer Networks by Kushagra Bhatnagar https://link.medium.com/0b2OrmqVO5
"""
@tf.function
def grid_sample(moving, grid):
"""Given a moving image and a sampling grid as input, computes the
transformed image by sampling the moving image at locations given by
the grid.
Currently, only 2-D images, i.e., 4-D inputs are supported.
Parameters
----------
moving : tf.Tensor, shape (N, H, W, C)
The moving image.
grid : tf.Tensor, shape (N, H, W, C)
A tensor of sampling points (x, y). The x and y values should be
normalized to [-1.0, 1.0] range.
Returns
-------
moved : tf.Tensor, shape (N, H, W, C)
The transformed image.
Notes
-----
Let M be the moving image of shape (H, W, C), T be the transformed
image of the same shape and G be the 2-D sampling grid of shape
(H, W, 2). The value of T at a location (x, y) is T[y, x, :] =
M[y', x', :] where [x', y'] = G[y, x, :].
Further, [x', y'] = [x + dx, y + dy] where [dx, dy] are the
displacements outputted by the CNN. When dx and dy are 0, the
sampling grid G is a regular grid and the transformed image is the
same as the moving image.
Since the sampling point (x + dx, y + dy) can be non-integral, the
value M[y', x'] is calculated using bi-linear interpolation.
References
----------
.. [1] `Jaderberg, Max, Karen Simonyan, and Andrew Zisserman. "Spatial
transformer networks." Advances in neural information processing
systems. 2015. <https://arxiv.org/abs/1506.02025>`_
.. [2] `TensorFlow implementation of spatial transformer networks.
<https://github.com/tensorflow/models/tree/master/research/transformer>`_
.. [3] `Spatial Transformer Networks by Kushagra Bhatnagar
<https://link.medium.com/0b2OrmqVO5>`_
"""
nb, nh, nw, nc = moving.shape
x = grid[..., 0] # shape (N, H, W)
y = grid[..., 1]
x = tf.cast(x, 'float32')
y = tf.cast(y, 'float32')
# Scale x and y from [-1.0, 1.0] to [0, W] and [0, H] respectively.
x = (x + 1.0) * 0.5 * tf.cast(nw, 'float32')
y = (y + 1.0) * 0.5 * tf.cast(nh, 'float32')
y_max = tf.cast(nh - 1, 'int32')
x_max = tf.cast(nw - 1, 'int32')
zero = tf.constant(0, 'int32')
# The value at (x, y) is a weighted average of the values at the
# four nearest integer locations: (x0, y0), (x1, y0), (x0, y1) and
# (x1, y1) where x0 = floor(x), x1 = ceil(x).
x0 = tf.cast(tf.floor(x), 'int32')
x1 = x0 + 1
y0 = tf.cast(tf.floor(y), 'int32')
y1 = y0 + 1
# Make sure indices are within the boundaries of the image.
x0 = tf.clip_by_value(x0, zero, x_max)
x1 = tf.clip_by_value(x1, zero, x_max)
y0 = tf.clip_by_value(y0, zero, y_max)
y1 = tf.clip_by_value(y1, zero, y_max)
# Collect indices of the four corners.
b = tf.ones_like(x0) * tf.reshape(tf.range(nb), [nb, 1, 1])
idx_a = tf.stack([b, y0, x0], axis=-1) # all top-left corners
idx_b = tf.stack([b, y1, x0], axis=-1) # all bottom-left corners
idx_c = tf.stack([b, y0, x1], axis=-1) # all top-right corners
idx_d = tf.stack([b, y1, x1], axis=-1) # all bottom-right corners
# shape (N, H, W, 3)
# Collect values at the corners.
moving_a = tf.gather_nd(moving, idx_a) # all top-left values
moving_b = tf.gather_nd(moving, idx_b) # all bottom-left values
moving_c = tf.gather_nd(moving, idx_c) # all top-right values
moving_d = tf.gather_nd(moving, idx_d) # all bottom-right values
# shape (N, H, W, C)
x0_f = tf.cast(x0, 'float32')
x1_f = tf.cast(x1, 'float32')
y0_f = tf.cast(y0, 'float32')
y1_f = tf.cast(y1, 'float32')
# Calculate the weights.
wa = tf.expand_dims((x1_f - x) * (y1_f - y), axis=-1)
wb = tf.expand_dims((x1_f - x) * (y - y0_f), axis=-1)
wc = tf.expand_dims((x - x0_f) * (y1_f - y), axis=-1)
wd = tf.expand_dims((x - x0_f) * (y - y0_f), axis=-1)
# Calculate the weighted sum.
moved = tf.add_n([wa * moving_a, wb * moving_b, wc * moving_c,
wd * moving_d])
return moved
@tf.function
def regular_grid(shape):
"""Returns a batch of 2-D regular grids.
Currently, only 2-D regular grids are supported.
Parameters
----------
shape : sequence of ints, shape (3, )
The desired regular grid shape of the form (N, H, W).
Returns
-------
grid : tf.Tensor, shape (N, H, W, 2)
A batch of 2-D regular grids, values normalized to [-1.0, 1.0]
range.
Notes
-----
Sampling using the regular grid is an identity transformation, i.e.,
it results in the same input and output images.
References
----------
.. [1] `NumPy, "numpy.meshgrid"
<https://numpy.org/doc/stable/reference/generated/numpy.meshgrid.html>`_
.. [2] `NumPy, "numpy.indices"
<https://numpy.org/doc/stable/reference/generated/numpy.indices.html>`_
"""
nb, nh, nw = shape
x = tf.linspace(-1.0, 1.0, nw) # shape (W, )
y = tf.linspace(-1.0, 1.0, nh) # shape (H, )
X, Y = tf.meshgrid(x, y) # shape (H, W), both X and Y
grid = tf.stack([X, Y], axis=-1)
grid = tf.expand_dims(grid, axis=0) # shape (1, H, W, 2)
# Repeat the grids along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
grid = tf.tile(grid, multiples)
return grid
"""Training and testing functions"""
@tf.function
def train_step(model, moving, static, criterion, optimizer):
"""A generic training procedure for one iteration.
Parameters
----------
model
A convolutional encoder-decoder network.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
criterion
The loss function.
optimizer
An optimzer.
Returns
-------
loss : tf.Tensor, shape ()
The average loss for the batch.
"""
nb, nh, nw, nc = moving.shape
# Repeat the static image along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
# Define the GradientTape context for automatic differentiation.
with tf.GradientTape() as tape:
# Get the deformation field
inputs = tf.concat([moving, static], axis=-1)
deformation = model(inputs)
# Compute the new sampling grid.
grid = regular_grid([nb, nh, nw])
grid_new = grid + deformation
grid_new = tf.clip_by_value(grid_new, -1, 1)
# Sample the moving image using the new sampling grid.
moved = grid_sample(moving, grid_new)
# Compute the loss.
loss = criterion(moved, static)
# Compute gradients.
grads = tape.gradient(loss, model.trainable_variables)
# Update the trainable parameters.
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
@tf.function
def test_step(model, moving, static, criterion):
"""A generic testing procedure.
Parameters
----------
model
A convolutional encoder-decoder network.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
criterion
The loss function.
Returns
-------
loss : tf.Tensor, shape ()
The average loss for the batch.
"""
nb, nh, nw, nc = moving.shape
# Repeat the static image along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
# Get the deformation field.
inputs = tf.concat([moving, static], axis=-1)
deformation = model(inputs, training=False)
# Compute the new sampling grid.
grid = regular_grid([nb, nh, nw])
grid_new = grid + deformation
grid_new = tf.clip_by_value(grid_new, -1, 1)
# Sample the moving image using the new sampling grid.
moved = grid_sample(moving, grid_new)
# Compute the loss.
loss = criterion(moved, static)
return loss
"""Data loading"""
def load_data(label=2):
"""Loads the MNIST dataset and preprocesses it: scales to [0.0, 1.0]
range, resizes the images from (28, 28) to (32, 32) and filters the
dataset to keep images of just one class.
Parameters
----------
label : {2, 0, 1, 3, 4, 5, 6, 7, 8, 9}, default 2
The class of images to train and test on.
Returns
-------
(x_train, x_test) : tuple of ndarrays
NumPy arrays of training and testing images.
"""
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# Discard digits which are not equal to label.
ids_train = np.where(y_train == label)
ids_test = np.where(y_test == label)
x_train = x_train[ids_train]
x_test = x_test[ids_test]
# Scale the image to [0, 1] range.
x_train = x_train.astype(np.float32) / 255.0
x_test = x_test.astype(np.float32) / 255.0
# Add the channel dim at the end. (N, H, W) --> (N, H, W, 1)
x_train = x_train[..., None]
x_test = x_test[..., None]
# Resize images from (28, 28) to (32, 32).
x_train = tf.image.resize(x_train, (32, 32))
x_test = tf.image.resize(x_test, (32, 32))
return x_train, x_test
"""Sample results"""
def plot_images(model, moving, static):
"""Visualize some images after training.
Parameters
----------
model
The trained model.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
"""
nb, nh, nw, nc = moving.shape
# Repeat the static image along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
# Get the deformation fields for the batch.
inputs = tf.concat([moving, static], axis=-1)
deformation = model(inputs, training=False)
# Compute the new sampling grids.
grid = regular_grid([nb, nh, nw])
grid_new = grid + deformation
grid_new = tf.clip_by_value(grid_new, -1, 1)
# Sample the moving images using the new sampling grids.
moved = grid_sample(moving, grid_new)
# Convert the tensors to 8-bit images.
moved = moved.numpy().squeeze(axis=-1) * 255.0
moved = moved.astype(np.uint8)
moving = moving.numpy().squeeze(axis=-1) * 255.0
moving = moving.astype(np.uint8)
static = static.numpy().squeeze(axis=-1) * 255.0
static = static.astype(np.uint8)
# Plot images.
fig = plt.figure(figsize=(3 * 1.7, nb * 1.7))
titles_list = ['Static', 'Moved', 'Moving']
images_list = [static, moved, moving]
for i in range(nb):
for j in range(3):
ax = fig.add_subplot(nb, 3, i * 3 + j + 1)
if i == 0:
ax.set_title(titles_list[j], fontsize=20)
ax.set_axis_off()
ax.imshow(images_list[j][i], cmap='gray')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
class Args():
batch_size = 8
epochs = 50
lr = 0.004
label = 7 # which digit images to train on?
num_samples = 5 # number of sample results to show
save_model = False
args = Args()
# Load preprocessed training and testing data.
x_train, x_test = load_data(label=args.label)
# Randomly select an image as the static image from the test set.
# idx = np.random.randint(x_test.shape[0])
# static = tf.expand_dims(x_test[idx], axis=0)
static = tf.expand_dims(x_test[0], axis=0)
# Select some images from the test set to show sample results.
# ids = tf.constant(np.random.choice(x_test.shape[0], replace=False,
# size=args.num_samples))
# x_sample = tf.gather(x_test, ids)
x_sample = x_test[:args.num_samples]
# Shuffle and batch the dataset.
from_tensor_slices = tf.data.Dataset.from_tensor_slices
# x_train = from_tensor_slices(x_train).shuffle(10000).batch(args.batch_size)
# x_test = from_tensor_slices(x_test).shuffle(10000).batch(args.batch_size)
x_train = from_tensor_slices(x_train).batch(args.batch_size)
x_test = from_tensor_slices(x_test).batch(args.batch_size)
# Create a model instance.
model = simple_cnn(input_shape=(32, 32, 2))
model.summary()
tf.keras.utils.plot_model(model, show_shapes=True, dpi=50)
# Select optimizer and loss function.
optimizer = tf.keras.optimizers.SGD(learning_rate=args.lr)
criterion = ncc_loss # normalized_cross_correlation_loss() # or mse_loss
# Define the metrics to track training and testing losses.
m_train = tf.keras.metrics.Mean(name='loss_train')
m_test = tf.keras.metrics.Mean(name='loss_test')
# Train and evaluate the model.
for epoch in range(args.epochs):
m_train.reset_states()
m_test.reset_states()
for i, moving in enumerate(x_train):
loss_train = train_step(model, moving, static, criterion,
optimizer)
m_train.update_state(loss_train)
for i, moving in enumerate(x_test):
loss_test = test_step(model, moving, static, criterion)
m_test.update_state(loss_test)
print('Epoch: %3d/%d\tTrain Loss: %.6f\tTest Loss: %.6f'
% (epoch + 1, args.epochs, m_train.result(), m_test.result()))
print('\n')
# Show sample results.
plot_images(model, x_sample, static)
# Save the trained model.
if args.save_model:
model.save('saved_models/simple_cnn')
|
create_process_chain_entry
|
Create a Actinia process description that uses t.rast.series to create the minimum
value of the time series.
:param input_time_series: The input time series name
:param output_map: The name of the output map
:return: A Actinia process chain description
|
# -*- coding: utf-8 -*-
from random import randint
import json
from .base import analyse_process_graph, PROCESS_DICT, PROCESS_DESCRIPTION_DICT
from openeo_grass_gis_driver.process_schemas import Parameter, ProcessDescription, ReturnValue
from .actinia_interface import ActiniaInterface
__license__ = "Apache License, Version 2.0"
__author__ = "Sören Gebbert"
__copyright__ = "Copyright 2018, Sören Gebbert, mundialis"
__maintainer__ = "Soeren Gebbert"
__email__ = "[email protected]"
PROCESS_NAME = "get_data"
def create_process_description():
p_imagery = Parameter(description="Any openEO process object that returns raster datasets "
"or space-time raster dataset",
schema={"type": "object", "format": "eodata"},
required=False)
p_data_id = Parameter(description="The identifier of a single raster-, vector- or space-time raster dataset",
schema={"type": "string",
"examples": ["nc_spm_08.landsat.raster.lsat5_1987_10",
"nc_spm_08.PERMANENT.vector.lakes",
"ECAD.PERMANENT.strds.temperature_1950_2017_yearly"]},
required=True)
rv = ReturnValue(description="Processed EO data.",
schema={"type": "object", "format": "eodata"})
simple_example = {
"process_id": PROCESS_NAME,
"data_id": "nc_spm_08.PERMANENT.vector.lakes",
}
raster_vector_example = {
"process_id": PROCESS_NAME,
"data_id": "nc_spm_08.landsat.raster.lsat5_1987_10",
"imagery": {
"process_id": "get_data",
"data_id": "nc_spm_08.PERMANENT.vector.lakes"
}
}
strds_example = {
"process_id": PROCESS_NAME,
"data_id": "ECAD.PERMANENT.strds.temperature_1950_2017_yearly",
"imagery": {
"process_id": "get_data",
"data_id": "ECAD.PERMANENT.strds.precipitation_1950_2017_yearly"
}
}
examples = dict(simple_example=simple_example,
raster_vector_example=raster_vector_example,
strds_example=strds_example)
pd = ProcessDescription(name=PROCESS_NAME,
description="This process returns a raster-, a vector- or a space-time raster "
"datasets that is available in the /collections endpoint.",
summary="Returns a single dataset that is available in "
"the /collections endpoint for processing",
parameters={"imagery": p_imagery, "data_id": p_data_id},
returns=rv,
examples=examples)
return json.loads(pd.to_json())
PROCESS_DESCRIPTION_DICT[PROCESS_NAME] = create_process_description()
# MASKED: create_process_chain_entry function (lines 73-109)
def get_process_list(process):
"""Analyse the process description and return the Actinia process chain and the name of the processing result
:param process: The process description
:return: (output_names, actinia_process_list)
"""
input_names, process_list = analyse_process_graph(process)
output_names = []
# First analyse the data entrie
if "data_id" not in process:
raise Exception("Process %s requires parameter <data_id>" % PROCESS_NAME)
output_names.append(process["data_id"])
pc = create_process_chain_entry(input_name=process["data_id"])
process_list.append(pc)
# Then add the input to the output
for input_name in input_names:
# Create the output name based on the input name and method
output_name = input_name
output_names.append(output_name)
return output_names, process_list
PROCESS_DICT[PROCESS_NAME] = get_process_list
|
def create_process_chain_entry(input_name):
"""Create a Actinia process description that uses t.rast.series to create the minimum
value of the time series.
:param input_time_series: The input time series name
:param output_map: The name of the output map
:return: A Actinia process chain description
"""
location, mapset, datatype, layer_name = ActiniaInterface.layer_def_to_components(input_name)
input_name = layer_name
if mapset is not None:
input_name = layer_name + "@" + mapset
rn = randint(0, 1000000)
pc = {}
if datatype == "raster":
pc = {"id": "r_info_%i" % rn,
"module": "r.info",
"inputs": [{"param": "map", "value": input_name}, ],
"flags": "g"}
elif datatype == "vector":
pc = {"id": "v_info_%i" % rn,
"module": "v.info",
"inputs": [{"param": "map", "value": input_name}, ],
"flags": "g"}
elif datatype == "strds":
pc = {"id": "t_info_%i" % rn,
"module": "t.info",
"inputs": [{"param": "input", "value": input_name}, ],
"flags": "g"}
else:
raise Exception("Unsupported datatype")
return pc
| 73 | 109 |
# -*- coding: utf-8 -*-
from random import randint
import json
from .base import analyse_process_graph, PROCESS_DICT, PROCESS_DESCRIPTION_DICT
from openeo_grass_gis_driver.process_schemas import Parameter, ProcessDescription, ReturnValue
from .actinia_interface import ActiniaInterface
__license__ = "Apache License, Version 2.0"
__author__ = "Sören Gebbert"
__copyright__ = "Copyright 2018, Sören Gebbert, mundialis"
__maintainer__ = "Soeren Gebbert"
__email__ = "[email protected]"
PROCESS_NAME = "get_data"
def create_process_description():
p_imagery = Parameter(description="Any openEO process object that returns raster datasets "
"or space-time raster dataset",
schema={"type": "object", "format": "eodata"},
required=False)
p_data_id = Parameter(description="The identifier of a single raster-, vector- or space-time raster dataset",
schema={"type": "string",
"examples": ["nc_spm_08.landsat.raster.lsat5_1987_10",
"nc_spm_08.PERMANENT.vector.lakes",
"ECAD.PERMANENT.strds.temperature_1950_2017_yearly"]},
required=True)
rv = ReturnValue(description="Processed EO data.",
schema={"type": "object", "format": "eodata"})
simple_example = {
"process_id": PROCESS_NAME,
"data_id": "nc_spm_08.PERMANENT.vector.lakes",
}
raster_vector_example = {
"process_id": PROCESS_NAME,
"data_id": "nc_spm_08.landsat.raster.lsat5_1987_10",
"imagery": {
"process_id": "get_data",
"data_id": "nc_spm_08.PERMANENT.vector.lakes"
}
}
strds_example = {
"process_id": PROCESS_NAME,
"data_id": "ECAD.PERMANENT.strds.temperature_1950_2017_yearly",
"imagery": {
"process_id": "get_data",
"data_id": "ECAD.PERMANENT.strds.precipitation_1950_2017_yearly"
}
}
examples = dict(simple_example=simple_example,
raster_vector_example=raster_vector_example,
strds_example=strds_example)
pd = ProcessDescription(name=PROCESS_NAME,
description="This process returns a raster-, a vector- or a space-time raster "
"datasets that is available in the /collections endpoint.",
summary="Returns a single dataset that is available in "
"the /collections endpoint for processing",
parameters={"imagery": p_imagery, "data_id": p_data_id},
returns=rv,
examples=examples)
return json.loads(pd.to_json())
PROCESS_DESCRIPTION_DICT[PROCESS_NAME] = create_process_description()
def create_process_chain_entry(input_name):
"""Create a Actinia process description that uses t.rast.series to create the minimum
value of the time series.
:param input_time_series: The input time series name
:param output_map: The name of the output map
:return: A Actinia process chain description
"""
location, mapset, datatype, layer_name = ActiniaInterface.layer_def_to_components(input_name)
input_name = layer_name
if mapset is not None:
input_name = layer_name + "@" + mapset
rn = randint(0, 1000000)
pc = {}
if datatype == "raster":
pc = {"id": "r_info_%i" % rn,
"module": "r.info",
"inputs": [{"param": "map", "value": input_name}, ],
"flags": "g"}
elif datatype == "vector":
pc = {"id": "v_info_%i" % rn,
"module": "v.info",
"inputs": [{"param": "map", "value": input_name}, ],
"flags": "g"}
elif datatype == "strds":
pc = {"id": "t_info_%i" % rn,
"module": "t.info",
"inputs": [{"param": "input", "value": input_name}, ],
"flags": "g"}
else:
raise Exception("Unsupported datatype")
return pc
def get_process_list(process):
"""Analyse the process description and return the Actinia process chain and the name of the processing result
:param process: The process description
:return: (output_names, actinia_process_list)
"""
input_names, process_list = analyse_process_graph(process)
output_names = []
# First analyse the data entrie
if "data_id" not in process:
raise Exception("Process %s requires parameter <data_id>" % PROCESS_NAME)
output_names.append(process["data_id"])
pc = create_process_chain_entry(input_name=process["data_id"])
process_list.append(pc)
# Then add the input to the output
for input_name in input_names:
# Create the output name based on the input name and method
output_name = input_name
output_names.append(output_name)
return output_names, process_list
PROCESS_DICT[PROCESS_NAME] = get_process_list
|
__init__
|
Create a FeatureImportanceSummarySaver Hook.
This hook creates scalar summaries representing feature importance
for each feature column during training.
Args:
model_dir: model base output directory.
every_n_steps: frequency, in number of steps, for logging summaries.
Raises:
ValueError: If one of the arguments is invalid.
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hooks for use with GTFlow Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.contrib.learn.python.learn import session_run_hook
from tensorflow.contrib.learn.python.learn.session_run_hook import SessionRunArgs
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import training_util
from tensorflow.python.training.summary_io import SummaryWriterCache
class FeatureImportanceSummarySaver(session_run_hook.SessionRunHook):
"""Hook to save feature importance summaries."""
# MASKED: __init__ function (lines 35-52)
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use FeatureImportanceSummarySaver.")
graph = ops.get_default_graph()
self._feature_names_tensor = graph.get_tensor_by_name(
"gbdt/feature_names:0")
self._feature_usage_counts_tensor = graph.get_tensor_by_name(
"gbdt/feature_usage_counts:0")
self._feature_gains_tensor = graph.get_tensor_by_name(
"gbdt/feature_gains:0")
def before_run(self, run_context):
del run_context # Unused by feature importance summary saver hook.
requests = {
"global_step": self._global_step_tensor,
"feature_names": self._feature_names_tensor,
"feature_usage_counts": self._feature_usage_counts_tensor,
"feature_gains": self._feature_gains_tensor
}
return SessionRunArgs(requests)
def after_run(self, run_context, run_values):
del run_context # Unused by feature importance summary saver hook.
# Read result tensors.
global_step = run_values.results["global_step"]
feature_names = run_values.results["feature_names"]
feature_usage_counts = run_values.results["feature_usage_counts"]
feature_gains = run_values.results["feature_gains"]
# Ensure summaries are logged at desired frequency
if (self._last_triggered_step is not None and
global_step < self._last_triggered_step + self._every_n_steps):
return
# Validate tensors.
if (len(feature_names) != len(feature_usage_counts) or
len(feature_names) != len(feature_gains)):
raise RuntimeError(
"Feature names and importance measures have inconsistent lengths.")
# Compute total usage.
total_usage_count = 0.0
for usage_count in feature_usage_counts:
total_usage_count += usage_count
usage_count_norm = 1.0 / total_usage_count if total_usage_count else 1.0
# Compute total gain.
total_gain = 0.0
for gain in feature_gains:
total_gain += gain
gain_norm = 1.0 / total_gain if total_gain else 1.0
# Output summary for each feature.
self._last_triggered_step = global_step
for (name, usage_count, gain) in zip(feature_names, feature_usage_counts,
feature_gains):
output_dir = os.path.join(self._model_dir, name.decode("utf-8"))
summary_writer = SummaryWriterCache.get(output_dir)
usage_count_summary = Summary(value=[
Summary.Value(
tag="feature_importance/usage_counts", simple_value=usage_count)
])
usage_fraction_summary = Summary(value=[
Summary.Value(
tag="feature_importance/usage_fraction",
simple_value=usage_count * usage_count_norm)
])
summary_writer.add_summary(usage_count_summary, global_step)
summary_writer.add_summary(usage_fraction_summary, global_step)
gains_summary = Summary(value=[
Summary.Value(tag="feature_importance/gains", simple_value=gain)
])
gains_fraction_summary = Summary(value=[
Summary.Value(
tag="feature_importance/gains_fraction",
simple_value=gain * gain_norm)
])
summary_writer.add_summary(gains_summary, global_step)
summary_writer.add_summary(gains_fraction_summary, global_step)
class FeedFnHook(session_run_hook.SessionRunHook):
"""Runs feed_fn and sets the feed_dict accordingly."""
def __init__(self, feed_fn):
self.feed_fn = feed_fn
def before_run(self, run_context):
del run_context # unused by FeedFnHook.
return session_run_hook.SessionRunArgs(fetches=None, feed_dict=self.feed_fn)
class StopAfterNTrees(session_run_hook.SessionRunHook):
"""Stop training after building N full trees."""
def __init__(self, n, num_attempted_trees_tensor, num_finalized_trees_tensor):
self._num_trees = n
# num_attempted_trees_tensor and num_finalized_trees_tensor are both
# tensors.
self._num_attempted_trees_tensor = num_attempted_trees_tensor
self._num_finalized_trees_tensor = num_finalized_trees_tensor
def before_run(self, run_context):
del run_context # unused by StopTrainingAfterNTrees.
return session_run_hook.SessionRunArgs({
"num_attempted_trees": self._num_attempted_trees_tensor,
"num_finalized_trees": self._num_finalized_trees_tensor,
})
def after_run(self, run_context, run_values):
num_attempted_trees = run_values.results["num_attempted_trees"]
num_finalized_trees = run_values.results["num_finalized_trees"]
assert num_attempted_trees is not None
assert num_finalized_trees is not None
# Stop when the required number of finalized trees is reached, or when we
# try enough times to build a tree but keep failing.
if (num_finalized_trees >= self._num_trees or
num_attempted_trees > 2 * self._num_trees):
logging.info("Requesting stop since we have reached %d trees.",
num_finalized_trees)
run_context.request_stop()
|
def __init__(self, model_dir, every_n_steps=1):
"""Create a FeatureImportanceSummarySaver Hook.
This hook creates scalar summaries representing feature importance
for each feature column during training.
Args:
model_dir: model base output directory.
every_n_steps: frequency, in number of steps, for logging summaries.
Raises:
ValueError: If one of the arguments is invalid.
"""
if model_dir is None:
raise ValueError("model dir must be specified.")
self._model_dir = model_dir
self._every_n_steps = every_n_steps
self._last_triggered_step = None
| 35 | 52 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hooks for use with GTFlow Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.contrib.learn.python.learn import session_run_hook
from tensorflow.contrib.learn.python.learn.session_run_hook import SessionRunArgs
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import training_util
from tensorflow.python.training.summary_io import SummaryWriterCache
class FeatureImportanceSummarySaver(session_run_hook.SessionRunHook):
"""Hook to save feature importance summaries."""
def __init__(self, model_dir, every_n_steps=1):
"""Create a FeatureImportanceSummarySaver Hook.
This hook creates scalar summaries representing feature importance
for each feature column during training.
Args:
model_dir: model base output directory.
every_n_steps: frequency, in number of steps, for logging summaries.
Raises:
ValueError: If one of the arguments is invalid.
"""
if model_dir is None:
raise ValueError("model dir must be specified.")
self._model_dir = model_dir
self._every_n_steps = every_n_steps
self._last_triggered_step = None
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use FeatureImportanceSummarySaver.")
graph = ops.get_default_graph()
self._feature_names_tensor = graph.get_tensor_by_name(
"gbdt/feature_names:0")
self._feature_usage_counts_tensor = graph.get_tensor_by_name(
"gbdt/feature_usage_counts:0")
self._feature_gains_tensor = graph.get_tensor_by_name(
"gbdt/feature_gains:0")
def before_run(self, run_context):
del run_context # Unused by feature importance summary saver hook.
requests = {
"global_step": self._global_step_tensor,
"feature_names": self._feature_names_tensor,
"feature_usage_counts": self._feature_usage_counts_tensor,
"feature_gains": self._feature_gains_tensor
}
return SessionRunArgs(requests)
def after_run(self, run_context, run_values):
del run_context # Unused by feature importance summary saver hook.
# Read result tensors.
global_step = run_values.results["global_step"]
feature_names = run_values.results["feature_names"]
feature_usage_counts = run_values.results["feature_usage_counts"]
feature_gains = run_values.results["feature_gains"]
# Ensure summaries are logged at desired frequency
if (self._last_triggered_step is not None and
global_step < self._last_triggered_step + self._every_n_steps):
return
# Validate tensors.
if (len(feature_names) != len(feature_usage_counts) or
len(feature_names) != len(feature_gains)):
raise RuntimeError(
"Feature names and importance measures have inconsistent lengths.")
# Compute total usage.
total_usage_count = 0.0
for usage_count in feature_usage_counts:
total_usage_count += usage_count
usage_count_norm = 1.0 / total_usage_count if total_usage_count else 1.0
# Compute total gain.
total_gain = 0.0
for gain in feature_gains:
total_gain += gain
gain_norm = 1.0 / total_gain if total_gain else 1.0
# Output summary for each feature.
self._last_triggered_step = global_step
for (name, usage_count, gain) in zip(feature_names, feature_usage_counts,
feature_gains):
output_dir = os.path.join(self._model_dir, name.decode("utf-8"))
summary_writer = SummaryWriterCache.get(output_dir)
usage_count_summary = Summary(value=[
Summary.Value(
tag="feature_importance/usage_counts", simple_value=usage_count)
])
usage_fraction_summary = Summary(value=[
Summary.Value(
tag="feature_importance/usage_fraction",
simple_value=usage_count * usage_count_norm)
])
summary_writer.add_summary(usage_count_summary, global_step)
summary_writer.add_summary(usage_fraction_summary, global_step)
gains_summary = Summary(value=[
Summary.Value(tag="feature_importance/gains", simple_value=gain)
])
gains_fraction_summary = Summary(value=[
Summary.Value(
tag="feature_importance/gains_fraction",
simple_value=gain * gain_norm)
])
summary_writer.add_summary(gains_summary, global_step)
summary_writer.add_summary(gains_fraction_summary, global_step)
class FeedFnHook(session_run_hook.SessionRunHook):
"""Runs feed_fn and sets the feed_dict accordingly."""
def __init__(self, feed_fn):
self.feed_fn = feed_fn
def before_run(self, run_context):
del run_context # unused by FeedFnHook.
return session_run_hook.SessionRunArgs(fetches=None, feed_dict=self.feed_fn)
class StopAfterNTrees(session_run_hook.SessionRunHook):
"""Stop training after building N full trees."""
def __init__(self, n, num_attempted_trees_tensor, num_finalized_trees_tensor):
self._num_trees = n
# num_attempted_trees_tensor and num_finalized_trees_tensor are both
# tensors.
self._num_attempted_trees_tensor = num_attempted_trees_tensor
self._num_finalized_trees_tensor = num_finalized_trees_tensor
def before_run(self, run_context):
del run_context # unused by StopTrainingAfterNTrees.
return session_run_hook.SessionRunArgs({
"num_attempted_trees": self._num_attempted_trees_tensor,
"num_finalized_trees": self._num_finalized_trees_tensor,
})
def after_run(self, run_context, run_values):
num_attempted_trees = run_values.results["num_attempted_trees"]
num_finalized_trees = run_values.results["num_finalized_trees"]
assert num_attempted_trees is not None
assert num_finalized_trees is not None
# Stop when the required number of finalized trees is reached, or when we
# try enough times to build a tree but keep failing.
if (num_finalized_trees >= self._num_trees or
num_attempted_trees > 2 * self._num_trees):
logging.info("Requesting stop since we have reached %d trees.",
num_finalized_trees)
run_context.request_stop()
|
with_parent
|
Add filtering criterion that relates the given instance
to a child object or collection, using its attribute state
as well as an established :func:`.relationship()`
configuration.
The method uses the :func:`.with_parent` function to generate
the clause, the result of which is passed to :meth:`.Query.filter`.
Parameters are the same as :func:`.with_parent`, with the exception
that the given property can be None, in which case a search is
performed against this :class:`.Query` object's target mapper.
|
# orm/query.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The Query class and support.
Defines the :class:`.Query` class, the central
construct used by the ORM to construct database queries.
The :class:`.Query` class should not be confused with the
:class:`.Select` class, which defines database
SELECT operations at the SQL (non-ORM) level. ``Query`` differs from
``Select`` in that it returns ORM-mapped objects and interacts with an
ORM session, whereas the ``Select`` construct interacts directly with the
database to return iterable result sets.
"""
from itertools import chain
from . import (
attributes, interfaces, object_mapper, persistence,
exc as orm_exc, loading
)
from .base import _entity_descriptor, _is_aliased_class, \
_is_mapped_class, _orm_columns, _generative
from .path_registry import PathRegistry
from .util import (
AliasedClass, ORMAdapter, join as orm_join, with_parent, aliased
)
from .. import sql, util, log, exc as sa_exc, inspect, inspection
from ..sql.expression import _interpret_as_from
from ..sql import (
util as sql_util,
expression, visitors
)
from ..sql.base import ColumnCollection
from . import properties
__all__ = ['Query', 'QueryContext', 'aliased']
_path_registry = PathRegistry.root
@inspection._self_inspects
@log.class_logger
class Query(object):
"""ORM-level SQL construction object.
:class:`.Query` is the source of all SELECT statements generated by the
ORM, both those formulated by end-user query operations as well as by
high level internal operations such as related collection loading. It
features a generative interface whereby successive calls return a new
:class:`.Query` object, a copy of the former with additional
criteria and options associated with it.
:class:`.Query` objects are normally initially generated using the
:meth:`~.Session.query` method of :class:`.Session`. For a full
walkthrough of :class:`.Query` usage, see the
:ref:`ormtutorial_toplevel`.
"""
_enable_eagerloads = True
_enable_assertions = True
_with_labels = False
_criterion = None
_yield_per = None
_order_by = False
_group_by = False
_having = None
_distinct = False
_prefixes = None
_offset = None
_limit = None
_for_update_arg = None
_statement = None
_correlate = frozenset()
_populate_existing = False
_invoke_all_eagers = True
_version_check = False
_autoflush = True
_only_load_props = None
_refresh_state = None
_from_obj = ()
_join_entities = ()
_select_from_entity = None
_mapper_adapter_map = {}
_filter_aliases = None
_from_obj_alias = None
_joinpath = _joinpoint = util.immutabledict()
_execution_options = util.immutabledict()
_params = util.immutabledict()
_attributes = util.immutabledict()
_with_options = ()
_with_hints = ()
_enable_single_crit = True
_current_path = _path_registry
def __init__(self, entities, session=None):
self.session = session
self._polymorphic_adapters = {}
self._set_entities(entities)
def _set_entities(self, entities, entity_wrapper=None):
if entity_wrapper is None:
entity_wrapper = _QueryEntity
self._entities = []
self._primary_entity = None
for ent in util.to_list(entities):
entity_wrapper(self, ent)
self._set_entity_selectables(self._entities)
def _set_entity_selectables(self, entities):
self._mapper_adapter_map = d = self._mapper_adapter_map.copy()
for ent in entities:
for entity in ent.entities:
if entity not in d:
ext_info = inspect(entity)
if not ext_info.is_aliased_class and \
ext_info.mapper.with_polymorphic:
if ext_info.mapper.mapped_table not in \
self._polymorphic_adapters:
self._mapper_loads_polymorphically_with(
ext_info.mapper,
sql_util.ColumnAdapter(
ext_info.selectable,
ext_info.mapper._equivalent_columns
)
)
aliased_adapter = None
elif ext_info.is_aliased_class:
aliased_adapter = ext_info._adapter
else:
aliased_adapter = None
d[entity] = (
ext_info,
aliased_adapter
)
ent.setup_entity(*d[entity])
def _mapper_loads_polymorphically_with(self, mapper, adapter):
for m2 in mapper._with_polymorphic_mappers or [mapper]:
self._polymorphic_adapters[m2] = adapter
for m in m2.iterate_to_root():
self._polymorphic_adapters[m.local_table] = adapter
def _set_select_from(self, obj, set_base_alias):
fa = []
select_from_alias = None
for from_obj in obj:
info = inspect(from_obj)
if hasattr(info, 'mapper') and \
(info.is_mapper or info.is_aliased_class):
self._select_from_entity = from_obj
if set_base_alias:
raise sa_exc.ArgumentError(
"A selectable (FromClause) instance is "
"expected when the base alias is being set.")
fa.append(info.selectable)
elif not info.is_selectable:
raise sa_exc.ArgumentError(
"argument is not a mapped class, mapper, "
"aliased(), or FromClause instance.")
else:
if isinstance(from_obj, expression.SelectBase):
from_obj = from_obj.alias()
if set_base_alias:
select_from_alias = from_obj
fa.append(from_obj)
self._from_obj = tuple(fa)
if set_base_alias and \
len(self._from_obj) == 1 and \
isinstance(select_from_alias, expression.Alias):
equivs = self.__all_equivs()
self._from_obj_alias = sql_util.ColumnAdapter(
self._from_obj[0], equivs)
def _reset_polymorphic_adapter(self, mapper):
for m2 in mapper._with_polymorphic_mappers:
self._polymorphic_adapters.pop(m2, None)
for m in m2.iterate_to_root():
self._polymorphic_adapters.pop(m.local_table, None)
def _adapt_polymorphic_element(self, element):
if "parententity" in element._annotations:
search = element._annotations['parententity']
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
if isinstance(element, expression.FromClause):
search = element
elif hasattr(element, 'table'):
search = element.table
else:
return None
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
def _adapt_col_list(self, cols):
return [
self._adapt_clause(
expression._literal_as_label_reference(o),
True, True)
for o in cols
]
@_generative()
def _adapt_all_clauses(self):
self._orm_only_adapt = False
def _adapt_clause(self, clause, as_filter, orm_only):
"""Adapt incoming clauses to transformations which
have been applied within this query."""
adapters = []
# do we adapt all expression elements or only those
# tagged as 'ORM' constructs ?
orm_only = getattr(self, '_orm_only_adapt', orm_only)
if as_filter and self._filter_aliases:
for fa in self._filter_aliases._visitor_iterator:
adapters.append(
(
orm_only, fa.replace
)
)
if self._from_obj_alias:
# for the "from obj" alias, apply extra rule to the
# 'ORM only' check, if this query were generated from a
# subquery of itself, i.e. _from_selectable(), apply adaption
# to all SQL constructs.
adapters.append(
(
getattr(self, '_orm_only_from_obj_alias', orm_only),
self._from_obj_alias.replace
)
)
if self._polymorphic_adapters:
adapters.append(
(
orm_only, self._adapt_polymorphic_element
)
)
if not adapters:
return clause
def replace(elem):
for _orm_only, adapter in adapters:
# if 'orm only', look for ORM annotations
# in the element before adapting.
if not _orm_only or \
'_orm_adapt' in elem._annotations or \
"parententity" in elem._annotations:
e = adapter(elem)
if e is not None:
return e
return visitors.replacement_traverse(
clause,
{},
replace
)
def _entity_zero(self):
return self._entities[0]
def _mapper_zero(self):
return self._select_from_entity or \
self._entity_zero().entity_zero
@property
def _mapper_entities(self):
for ent in self._entities:
if isinstance(ent, _MapperEntity):
yield ent
def _joinpoint_zero(self):
return self._joinpoint.get(
'_joinpoint_entity',
self._mapper_zero()
)
def _mapper_zero_or_none(self):
if self._primary_entity:
return self._primary_entity.mapper
else:
return None
def _only_mapper_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._mapper_zero()
def _only_full_mapper_zero(self, methname):
if self._entities != [self._primary_entity]:
raise sa_exc.InvalidRequestError(
"%s() can only be used against "
"a single mapped class." % methname)
return self._primary_entity.entity_zero
def _only_entity_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._entity_zero()
def __all_equivs(self):
equivs = {}
for ent in self._mapper_entities:
equivs.update(ent.mapper._equivalent_columns)
return equivs
def _get_condition(self):
return self._no_criterion_condition(
"get", order_by=False, distinct=False)
def _get_existing_condition(self):
self._no_criterion_assertion("get", order_by=False, distinct=False)
def _no_criterion_assertion(self, meth, order_by=True, distinct=True):
if not self._enable_assertions:
return
if self._criterion is not None or \
self._statement is not None or self._from_obj or \
self._limit is not None or self._offset is not None or \
self._group_by or (order_by and self._order_by) or \
(distinct and self._distinct):
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
def _no_criterion_condition(self, meth, order_by=True, distinct=True):
self._no_criterion_assertion(meth, order_by, distinct)
self._from_obj = ()
self._statement = self._criterion = None
self._order_by = self._group_by = self._distinct = False
def _no_clauseelement_condition(self, meth):
if not self._enable_assertions:
return
if self._order_by:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
self._no_criterion_condition(meth)
def _no_statement_condition(self, meth):
if not self._enable_assertions:
return
if self._statement is not None:
raise sa_exc.InvalidRequestError(
("Query.%s() being called on a Query with an existing full "
"statement - can't apply criterion.") % meth)
def _no_limit_offset(self, meth):
if not self._enable_assertions:
return
if self._limit is not None or self._offset is not None:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a Query which already has LIMIT "
"or OFFSET applied. To modify the row-limited results of a "
" Query, call from_self() first. "
"Otherwise, call %s() before limit() or offset() "
"are applied."
% (meth, meth)
)
def _no_select_modifiers(self, meth):
if not self._enable_assertions:
return
for attr, methname, notset in (
('_limit', 'limit()', None),
('_offset', 'offset()', None),
('_order_by', 'order_by()', False),
('_group_by', 'group_by()', False),
('_distinct', 'distinct()', False),
):
if getattr(self, attr) is not notset:
raise sa_exc.InvalidRequestError(
"Can't call Query.%s() when %s has been called" %
(meth, methname)
)
def _get_options(self, populate_existing=None,
version_check=None,
only_load_props=None,
refresh_state=None):
if populate_existing:
self._populate_existing = populate_existing
if version_check:
self._version_check = version_check
if refresh_state:
self._refresh_state = refresh_state
if only_load_props:
self._only_load_props = set(only_load_props)
return self
def _clone(self):
cls = self.__class__
q = cls.__new__(cls)
q.__dict__ = self.__dict__.copy()
return q
@property
def statement(self):
"""The full SELECT statement represented by this Query.
The statement by default will not have disambiguating labels
applied to the construct unless with_labels(True) is called
first.
"""
stmt = self._compile_context(labels=self._with_labels).\
statement
if self._params:
stmt = stmt.params(self._params)
# TODO: there's no tests covering effects of
# the annotation not being there
return stmt._annotate({'no_replacement_traverse': True})
def subquery(self, name=None, with_labels=False, reduce_columns=False):
"""return the full SELECT statement represented by
this :class:`.Query`, embedded within an :class:`.Alias`.
Eager JOIN generation within the query is disabled.
:param name: string name to be assigned as the alias;
this is passed through to :meth:`.FromClause.alias`.
If ``None``, a name will be deterministically generated
at compile time.
:param with_labels: if True, :meth:`.with_labels` will be called
on the :class:`.Query` first to apply table-qualified labels
to all columns.
:param reduce_columns: if True, :meth:`.Select.reduce_columns` will
be called on the resulting :func:`.select` construct,
to remove same-named columns where one also refers to the other
via foreign key or WHERE clause equivalence.
.. versionchanged:: 0.8 the ``with_labels`` and ``reduce_columns``
keyword arguments were added.
"""
q = self.enable_eagerloads(False)
if with_labels:
q = q.with_labels()
q = q.statement
if reduce_columns:
q = q.reduce_columns()
return q.alias(name=name)
def cte(self, name=None, recursive=False):
"""Return the full SELECT statement represented by this
:class:`.Query` represented as a common table expression (CTE).
.. versionadded:: 0.7.6
Parameters and usage are the same as those of the
:meth:`.SelectBase.cte` method; see that method for
further details.
Here is the `Postgresql WITH
RECURSIVE example
<http://www.postgresql.org/docs/8.4/static/queries-with.html>`_.
Note that, in this example, the ``included_parts`` cte and the
``incl_alias`` alias of it are Core selectables, which
means the columns are accessed via the ``.c.`` attribute. The
``parts_alias`` object is an :func:`.orm.aliased` instance of the
``Part`` entity, so column-mapped attributes are available
directly::
from sqlalchemy.orm import aliased
class Part(Base):
__tablename__ = 'part'
part = Column(String, primary_key=True)
sub_part = Column(String, primary_key=True)
quantity = Column(Integer)
included_parts = session.query(
Part.sub_part,
Part.part,
Part.quantity).\\
filter(Part.part=="our part").\\
cte(name="included_parts", recursive=True)
incl_alias = aliased(included_parts, name="pr")
parts_alias = aliased(Part, name="p")
included_parts = included_parts.union_all(
session.query(
parts_alias.sub_part,
parts_alias.part,
parts_alias.quantity).\\
filter(parts_alias.part==incl_alias.c.sub_part)
)
q = session.query(
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).
label('total_quantity')
).\\
group_by(included_parts.c.sub_part)
.. seealso::
:meth:`.SelectBase.cte`
"""
return self.enable_eagerloads(False).\
statement.cte(name=name, recursive=recursive)
def label(self, name):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted
to a scalar subquery with a label of the given name.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.label`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.label(name)
def as_scalar(self):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted to a scalar subquery.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.as_scalar`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.as_scalar()
@property
def selectable(self):
"""Return the :class:`.Select` object emitted by this :class:`.Query`.
Used for :func:`.inspect` compatibility, this is equivalent to::
query.enable_eagerloads(False).with_labels().statement
"""
return self.__clause_element__()
def __clause_element__(self):
return self.enable_eagerloads(False).with_labels().statement
@_generative()
def enable_eagerloads(self, value):
"""Control whether or not eager joins and subqueries are
rendered.
When set to False, the returned Query will not render
eager joins regardless of :func:`~sqlalchemy.orm.joinedload`,
:func:`~sqlalchemy.orm.subqueryload` options
or mapper-level ``lazy='joined'``/``lazy='subquery'``
configurations.
This is used primarily when nesting the Query's
statement into a subquery or other
selectable, or when using :meth:`.Query.yield_per`.
"""
self._enable_eagerloads = value
def _no_yield_per(self, message):
raise sa_exc.InvalidRequestError(
"The yield_per Query option is currently not "
"compatible with %s eager loading. Please "
"specify lazyload('*') or query.enable_eagerloads(False) in "
"order to "
"proceed with query.yield_per()." % message)
@_generative()
def with_labels(self):
"""Apply column labels to the return value of Query.statement.
Indicates that this Query's `statement` accessor should return
a SELECT statement that applies labels to all columns in the
form <tablename>_<columnname>; this is commonly used to
disambiguate columns from multiple tables which have the same
name.
When the `Query` actually issues SQL to load rows, it always
uses column labeling.
"""
self._with_labels = True
@_generative()
def enable_assertions(self, value):
"""Control whether assertions are generated.
When set to False, the returned Query will
not assert its state before certain operations,
including that LIMIT/OFFSET has not been applied
when filter() is called, no criterion exists
when get() is called, and no "from_statement()"
exists when filter()/order_by()/group_by() etc.
is called. This more permissive mode is used by
custom Query subclasses to specify criterion or
other modifiers outside of the usual usage patterns.
Care should be taken to ensure that the usage
pattern is even possible. A statement applied
by from_statement() will override any criterion
set by filter() or order_by(), for example.
"""
self._enable_assertions = value
@property
def whereclause(self):
"""A readonly attribute which returns the current WHERE criterion for
this Query.
This returned value is a SQL expression construct, or ``None`` if no
criterion has been established.
"""
return self._criterion
@_generative()
def _with_current_path(self, path):
"""indicate that this query applies to objects loaded
within a certain path.
Used by deferred loaders (see strategies.py) which transfer
query options from an originating query to a newly generated
query intended for the deferred load.
"""
self._current_path = path
@_generative(_no_clauseelement_condition)
def with_polymorphic(self,
cls_or_mappers,
selectable=None,
polymorphic_on=None):
"""Load columns for inheriting classes.
:meth:`.Query.with_polymorphic` applies transformations
to the "main" mapped class represented by this :class:`.Query`.
The "main" mapped class here means the :class:`.Query`
object's first argument is a full class, i.e.
``session.query(SomeClass)``. These transformations allow additional
tables to be present in the FROM clause so that columns for a
joined-inheritance subclass are available in the query, both for the
purposes of load-time efficiency as well as the ability to use
these columns at query time.
See the documentation section :ref:`with_polymorphic` for
details on how this method is used.
.. versionchanged:: 0.8
A new and more flexible function
:func:`.orm.with_polymorphic` supersedes
:meth:`.Query.with_polymorphic`, as it can apply the equivalent
functionality to any set of columns or classes in the
:class:`.Query`, not just the "zero mapper". See that
function for a description of arguments.
"""
if not self._primary_entity:
raise sa_exc.InvalidRequestError(
"No primary mapper set up for this Query.")
entity = self._entities[0]._clone()
self._entities = [entity] + self._entities[1:]
entity.set_with_polymorphic(self,
cls_or_mappers,
selectable=selectable,
polymorphic_on=polymorphic_on)
@_generative()
def yield_per(self, count):
"""Yield only ``count`` rows at a time.
The purpose of this method is when fetching very large result sets
(> 10K rows), to batch results in sub-collections and yield them
out partially, so that the Python interpreter doesn't need to declare
very large areas of memory which is both time consuming and leads
to excessive memory use. The performance from fetching hundreds of
thousands of rows can often double when a suitable yield-per setting
(e.g. approximately 1000) is used, even with DBAPIs that buffer
rows (which are most).
The :meth:`.Query.yield_per` method **is not compatible with most
eager loading schemes, including subqueryload and joinedload with
collections**. For this reason, it may be helpful to disable
eager loads, either unconditionally with
:meth:`.Query.enable_eagerloads`::
q = sess.query(Object).yield_per(100).enable_eagerloads(False)
Or more selectively using :func:`.lazyload`; such as with
an asterisk to specify the default loader scheme::
q = sess.query(Object).yield_per(100).\\
options(lazyload('*'), joinedload(Object.some_related))
.. warning::
Use this method with caution; if the same instance is
present in more than one batch of rows, end-user changes
to attributes will be overwritten.
In particular, it's usually impossible to use this setting
with eagerly loaded collections (i.e. any lazy='joined' or
'subquery') since those collections will be cleared for a
new load when encountered in a subsequent result batch.
In the case of 'subquery' loading, the full result for all
rows is fetched which generally defeats the purpose of
:meth:`~sqlalchemy.orm.query.Query.yield_per`.
Also note that while
:meth:`~sqlalchemy.orm.query.Query.yield_per` will set the
``stream_results`` execution option to True, currently
this is only understood by
:mod:`~sqlalchemy.dialects.postgresql.psycopg2` dialect
which will stream results using server side cursors
instead of pre-buffer all rows for this query. Other
DBAPIs **pre-buffer all rows** before making them
available. The memory use of raw database rows is much less
than that of an ORM-mapped object, but should still be taken into
consideration when benchmarking.
.. seealso::
:meth:`.Query.enable_eagerloads`
"""
self._yield_per = count
self._execution_options = self._execution_options.union(
{"stream_results": True})
def get(self, ident):
"""Return an instance based on the given primary key identifier,
or ``None`` if not found.
E.g.::
my_user = session.query(User).get(5)
some_object = session.query(VersionedFoo).get((5, 10))
:meth:`~.Query.get` is special in that it provides direct
access to the identity map of the owning :class:`.Session`.
If the given primary key identifier is present
in the local identity map, the object is returned
directly from this collection and no SQL is emitted,
unless the object has been marked fully expired.
If not present,
a SELECT is performed in order to locate the object.
:meth:`~.Query.get` also will perform a check if
the object is present in the identity map and
marked as expired - a SELECT
is emitted to refresh the object as well as to
ensure that the row is still present.
If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
:meth:`~.Query.get` is only used to return a single
mapped instance, not multiple instances or
individual column constructs, and strictly
on a single primary key value. The originating
:class:`.Query` must be constructed in this way,
i.e. against a single mapped entity,
with no additional filtering criterion. Loading
options via :meth:`~.Query.options` may be applied
however, and will be used if the object is not
yet locally present.
A lazy-loading, many-to-one attribute configured
by :func:`.relationship`, using a simple
foreign-key-to-primary-key criterion, will also use an
operation equivalent to :meth:`~.Query.get` in order to retrieve
the target value from the local identity map
before querying the database. See :doc:`/orm/loading`
for further details on relationship loading.
:param ident: A scalar or tuple value representing
the primary key. For a composite primary key,
the order of identifiers corresponds in most cases
to that of the mapped :class:`.Table` object's
primary key columns. For a :func:`.mapper` that
was given the ``primary key`` argument during
construction, the order of identifiers corresponds
to the elements present in this collection.
:return: The object instance, or ``None``.
"""
# convert composite types to individual args
if hasattr(ident, '__composite_values__'):
ident = ident.__composite_values__()
ident = util.to_list(ident)
mapper = self._only_full_mapper_zero("get")
if len(ident) != len(mapper.primary_key):
raise sa_exc.InvalidRequestError(
"Incorrect number of values in identifier to formulate "
"primary key for query.get(); primary key columns are %s" %
','.join("'%s'" % c for c in mapper.primary_key))
key = mapper.identity_key_from_primary_key(ident)
if not self._populate_existing and \
not mapper.always_refresh and \
self._for_update_arg is None:
instance = loading.get_from_identity(
self.session, key, attributes.PASSIVE_OFF)
if instance is not None:
self._get_existing_condition()
# reject calls for id in identity map but class
# mismatch.
if not issubclass(instance.__class__, mapper.class_):
return None
return instance
return loading.load_on_ident(self, key)
@_generative()
def correlate(self, *args):
"""Return a :class:`.Query` construct which will correlate the given
FROM clauses to that of an enclosing :class:`.Query` or
:func:`~.expression.select`.
The method here accepts mapped classes, :func:`.aliased` constructs,
and :func:`.mapper` constructs as arguments, which are resolved into
expression constructs, in addition to appropriate expression
constructs.
The correlation arguments are ultimately passed to
:meth:`.Select.correlate` after coercion to expression constructs.
The correlation arguments take effect in such cases
as when :meth:`.Query.from_self` is used, or when
a subquery as returned by :meth:`.Query.subquery` is
embedded in another :func:`~.expression.select` construct.
"""
self._correlate = self._correlate.union(
_interpret_as_from(s)
if s is not None else None
for s in args)
@_generative()
def autoflush(self, setting):
"""Return a Query with a specific 'autoflush' setting.
Note that a Session with autoflush=False will
not autoflush, even if this flag is set to True at the
Query level. Therefore this flag is usually used only
to disable autoflush for a specific Query.
"""
self._autoflush = setting
@_generative()
def populate_existing(self):
"""Return a :class:`.Query` that will expire and refresh all instances
as they are loaded, or reused from the current :class:`.Session`.
:meth:`.populate_existing` does not improve behavior when
the ORM is used normally - the :class:`.Session` object's usual
behavior of maintaining a transaction and expiring all attributes
after rollback or commit handles object state automatically.
This method is not intended for general use.
"""
self._populate_existing = True
@_generative()
def _with_invoke_all_eagers(self, value):
"""Set the 'invoke all eagers' flag which causes joined- and
subquery loaders to traverse into already-loaded related objects
and collections.
Default is that of :attr:`.Query._invoke_all_eagers`.
"""
self._invoke_all_eagers = value
# MASKED: with_parent function (lines 924-956)
@_generative()
def add_entity(self, entity, alias=None):
"""add a mapped entity to the list of result columns
to be returned."""
if alias is not None:
entity = aliased(entity, alias)
self._entities = list(self._entities)
m = _MapperEntity(self, entity)
self._set_entity_selectables([m])
@_generative()
def with_session(self, session):
"""Return a :class:`.Query` that will use the given :class:`.Session`.
"""
self.session = session
def from_self(self, *entities):
"""return a Query that selects from this Query's
SELECT statement.
\*entities - optional list of entities which will replace
those being selected.
"""
fromclause = self.with_labels().enable_eagerloads(False).\
statement.correlate(None)
q = self._from_selectable(fromclause)
q._enable_single_crit = False
if entities:
q._set_entities(entities)
return q
@_generative()
def _set_enable_single_crit(self, val):
self._enable_single_crit = val
@_generative()
def _from_selectable(self, fromclause):
for attr in (
'_statement', '_criterion',
'_order_by', '_group_by',
'_limit', '_offset',
'_joinpath', '_joinpoint',
'_distinct', '_having',
'_prefixes',
):
self.__dict__.pop(attr, None)
self._set_select_from([fromclause], True)
# this enables clause adaptation for non-ORM
# expressions.
self._orm_only_from_obj_alias = False
old_entities = self._entities
self._entities = []
for e in old_entities:
e.adapt_to_selectable(self, self._from_obj[0])
def values(self, *columns):
"""Return an iterator yielding result tuples corresponding
to the given list of columns"""
if not columns:
return iter(())
q = self._clone()
q._set_entities(columns, entity_wrapper=_ColumnEntity)
if not q._yield_per:
q._yield_per = 10
return iter(q)
_values = values
def value(self, column):
"""Return a scalar result corresponding to the given
column expression."""
try:
return next(self.values(column))[0]
except StopIteration:
return None
@_generative()
def with_entities(self, *entities):
"""Return a new :class:`.Query` replacing the SELECT list with the
given entities.
e.g.::
# Users, filtered on some arbitrary criterion
# and then ordered by related email address
q = session.query(User).\\
join(User.address).\\
filter(User.name.like('%ed%')).\\
order_by(Address.email)
# given *only* User.id==5, Address.email, and 'q', what
# would the *next* User in the result be ?
subq = q.with_entities(Address.email).\\
order_by(None).\\
filter(User.id==5).\\
subquery()
q = q.join((subq, subq.c.email < Address.email)).\\
limit(1)
.. versionadded:: 0.6.5
"""
self._set_entities(entities)
@_generative()
def add_columns(self, *column):
"""Add one or more column expressions to the list
of result columns to be returned."""
self._entities = list(self._entities)
l = len(self._entities)
for c in column:
_ColumnEntity(self, c)
# _ColumnEntity may add many entities if the
# given arg is a FROM clause
self._set_entity_selectables(self._entities[l:])
@util.pending_deprecation("0.7",
":meth:`.add_column` is superseded "
"by :meth:`.add_columns`",
False)
def add_column(self, column):
"""Add a column expression to the list of result columns to be
returned.
Pending deprecation: :meth:`.add_column` will be superseded by
:meth:`.add_columns`.
"""
return self.add_columns(column)
def options(self, *args):
"""Return a new Query object, applying the given list of
mapper options.
Most supplied options regard changing how column- and
relationship-mapped attributes are loaded. See the sections
:ref:`deferred` and :doc:`/orm/loading` for reference
documentation.
"""
return self._options(False, *args)
def _conditional_options(self, *args):
return self._options(True, *args)
@_generative()
def _options(self, conditional, *args):
# most MapperOptions write to the '_attributes' dictionary,
# so copy that as well
self._attributes = self._attributes.copy()
opts = tuple(util.flatten_iterator(args))
self._with_options = self._with_options + opts
if conditional:
for opt in opts:
opt.process_query_conditionally(self)
else:
for opt in opts:
opt.process_query(self)
def with_transformation(self, fn):
"""Return a new :class:`.Query` object transformed by
the given function.
E.g.::
def filter_something(criterion):
def transform(q):
return q.filter(criterion)
return transform
q = q.with_transformation(filter_something(x==5))
This allows ad-hoc recipes to be created for :class:`.Query`
objects. See the example at :ref:`hybrid_transformers`.
.. versionadded:: 0.7.4
"""
return fn(self)
@_generative()
def with_hint(self, selectable, text, dialect_name='*'):
"""Add an indexing or other executional context
hint for the given entity or selectable to
this :class:`.Query`.
Functionality is passed straight through to
:meth:`~sqlalchemy.sql.expression.Select.with_hint`,
with the addition that ``selectable`` can be a
:class:`.Table`, :class:`.Alias`, or ORM entity / mapped class
/etc.
.. seealso::
:meth:`.Query.with_statement_hint`
"""
if selectable is not None:
selectable = inspect(selectable).selectable
self._with_hints += ((selectable, text, dialect_name),)
def with_statement_hint(self, text, dialect_name='*'):
"""add a statement hint to this :class:`.Select`.
This method is similar to :meth:`.Select.with_hint` except that
it does not require an individual table, and instead applies to the
statement as a whole.
This feature calls down into :meth:`.Select.with_statement_hint`.
.. versionadded:: 1.0.0
.. seealso::
:meth:`.Query.with_hint`
"""
return self.with_hint(None, text, dialect_name)
@_generative()
def execution_options(self, **kwargs):
""" Set non-SQL options which take effect during execution.
The options are the same as those accepted by
:meth:`.Connection.execution_options`.
Note that the ``stream_results`` execution option is enabled
automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()`
method is used.
"""
self._execution_options = self._execution_options.union(kwargs)
@_generative()
def with_lockmode(self, mode):
"""Return a new :class:`.Query` object with the specified "locking mode",
which essentially refers to the ``FOR UPDATE`` clause.
.. deprecated:: 0.9.0 superseded by :meth:`.Query.with_for_update`.
:param mode: a string representing the desired locking mode.
Valid values are:
* ``None`` - translates to no lockmode
* ``'update'`` - translates to ``FOR UPDATE``
(standard SQL, supported by most dialects)
* ``'update_nowait'`` - translates to ``FOR UPDATE NOWAIT``
(supported by Oracle, PostgreSQL 8.1 upwards)
* ``'read'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL),
and ``FOR SHARE`` (for PostgreSQL)
.. seealso::
:meth:`.Query.with_for_update` - improved API for
specifying the ``FOR UPDATE`` clause.
"""
self._for_update_arg = LockmodeArg.parse_legacy_query(mode)
@_generative()
def with_for_update(self, read=False, nowait=False, of=None):
"""return a new :class:`.Query` with the specified options for the
``FOR UPDATE`` clause.
The behavior of this method is identical to that of
:meth:`.SelectBase.with_for_update`. When called with no arguments,
the resulting ``SELECT`` statement will have a ``FOR UPDATE`` clause
appended. When additional arguments are specified, backend-specific
options such as ``FOR UPDATE NOWAIT`` or ``LOCK IN SHARE MODE``
can take effect.
E.g.::
q = sess.query(User).with_for_update(nowait=True, of=User)
The above query on a Postgresql backend will render like::
SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT
.. versionadded:: 0.9.0 :meth:`.Query.with_for_update` supersedes
the :meth:`.Query.with_lockmode` method.
.. seealso::
:meth:`.GenerativeSelect.with_for_update` - Core level method with
full argument and behavioral description.
"""
self._for_update_arg = LockmodeArg(read=read, nowait=nowait, of=of)
@_generative()
def params(self, *args, **kwargs):
"""add values for bind parameters which may have been
specified in filter().
parameters may be specified using \**kwargs, or optionally a single
dictionary as the first positional argument. The reason for both is
that \**kwargs is convenient, however some parameter dictionaries
contain unicode keys in which case \**kwargs cannot be used.
"""
if len(args) == 1:
kwargs.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary.")
self._params = self._params.copy()
self._params.update(kwargs)
@_generative(_no_statement_condition, _no_limit_offset)
def filter(self, *criterion):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using SQL expressions.
e.g.::
session.query(MyClass).filter(MyClass.name == 'some name')
Multiple criteria are joined together by AND::
session.query(MyClass).\\
filter(MyClass.name == 'some name', MyClass.id > 5)
The criterion is any SQL expression object applicable to the
WHERE clause of a select. String expressions are coerced
into SQL expression constructs via the :func:`.text` construct.
.. versionchanged:: 0.7.5
Multiple criteria joined by AND.
.. seealso::
:meth:`.Query.filter_by` - filter on keyword expressions.
"""
for criterion in list(criterion):
criterion = expression._expression_literal_as_text(criterion)
criterion = self._adapt_clause(criterion, True, True)
if self._criterion is not None:
self._criterion = self._criterion & criterion
else:
self._criterion = criterion
def filter_by(self, **kwargs):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using keyword expressions.
e.g.::
session.query(MyClass).filter_by(name = 'some name')
Multiple criteria are joined together by AND::
session.query(MyClass).\\
filter_by(name = 'some name', id = 5)
The keyword expressions are extracted from the primary
entity of the query, or the last entity that was the
target of a call to :meth:`.Query.join`.
.. seealso::
:meth:`.Query.filter` - filter on SQL expressions.
"""
clauses = [_entity_descriptor(self._joinpoint_zero(), key) == value
for key, value in kwargs.items()]
return self.filter(sql.and_(*clauses))
@_generative(_no_statement_condition, _no_limit_offset)
def order_by(self, *criterion):
"""apply one or more ORDER BY criterion to the query and return
the newly resulting ``Query``
All existing ORDER BY settings can be suppressed by
passing ``None`` - this will suppress any ORDER BY configured
on mappers as well.
Alternatively, an existing ORDER BY setting on the Query
object can be entirely cancelled by passing ``False``
as the value - use this before calling methods where
an ORDER BY is invalid.
"""
if len(criterion) == 1:
if criterion[0] is False:
if '_order_by' in self.__dict__:
del self._order_by
return
if criterion[0] is None:
self._order_by = None
return
criterion = self._adapt_col_list(criterion)
if self._order_by is False or self._order_by is None:
self._order_by = criterion
else:
self._order_by = self._order_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def group_by(self, *criterion):
"""apply one or more GROUP BY criterion to the query and return
the newly resulting :class:`.Query`"""
criterion = list(chain(*[_orm_columns(c) for c in criterion]))
criterion = self._adapt_col_list(criterion)
if self._group_by is False:
self._group_by = criterion
else:
self._group_by = self._group_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def having(self, criterion):
"""apply a HAVING criterion to the query and return the
newly resulting :class:`.Query`.
:meth:`~.Query.having` is used in conjunction with
:meth:`~.Query.group_by`.
HAVING criterion makes it possible to use filters on aggregate
functions like COUNT, SUM, AVG, MAX, and MIN, eg.::
q = session.query(User.id).\\
join(User.addresses).\\
group_by(User.id).\\
having(func.count(Address.id) > 2)
"""
criterion = expression._expression_literal_as_text(criterion)
if criterion is not None and \
not isinstance(criterion, sql.ClauseElement):
raise sa_exc.ArgumentError(
"having() argument must be of type "
"sqlalchemy.sql.ClauseElement or string")
criterion = self._adapt_clause(criterion, True, True)
if self._having is not None:
self._having = self._having & criterion
else:
self._having = criterion
def union(self, *q):
"""Produce a UNION of this Query against one or more queries.
e.g.::
q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar')
q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo')
q3 = q1.union(q2)
The method accepts multiple Query objects so as to control
the level of nesting. A series of ``union()`` calls such as::
x.union(y).union(z).all()
will nest on each ``union()``, and produces::
SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION
SELECT * FROM y) UNION SELECT * FROM Z)
Whereas::
x.union(y, z).all()
produces::
SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION
SELECT * FROM Z)
Note that many database backends do not allow ORDER BY to
be rendered on a query called within UNION, EXCEPT, etc.
To disable all ORDER BY clauses including those configured
on mappers, issue ``query.order_by(None)`` - the resulting
:class:`.Query` object will not render ORDER BY within
its SELECT statement.
"""
return self._from_selectable(
expression.union(*([self] + list(q))))
def union_all(self, *q):
"""Produce a UNION ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.union_all(*([self] + list(q)))
)
def intersect(self, *q):
"""Produce an INTERSECT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect(*([self] + list(q)))
)
def intersect_all(self, *q):
"""Produce an INTERSECT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect_all(*([self] + list(q)))
)
def except_(self, *q):
"""Produce an EXCEPT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_(*([self] + list(q)))
)
def except_all(self, *q):
"""Produce an EXCEPT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_all(*([self] + list(q)))
)
def join(self, *props, **kwargs):
"""Create a SQL JOIN against this :class:`.Query` object's criterion
and apply generatively, returning the newly resulting :class:`.Query`.
**Simple Relationship Joins**
Consider a mapping between two classes ``User`` and ``Address``,
with a relationship ``User.addresses`` representing a collection
of ``Address`` objects associated with each ``User``. The most
common usage of :meth:`~.Query.join` is to create a JOIN along this
relationship, using the ``User.addresses`` attribute as an indicator
for how this should occur::
q = session.query(User).join(User.addresses)
Where above, the call to :meth:`~.Query.join` along ``User.addresses``
will result in SQL equivalent to::
SELECT user.* FROM user JOIN address ON user.id = address.user_id
In the above example we refer to ``User.addresses`` as passed to
:meth:`~.Query.join` as the *on clause*, that is, it indicates
how the "ON" portion of the JOIN should be constructed. For a
single-entity query such as the one above (i.e. we start by selecting
only from ``User`` and nothing else), the relationship can also be
specified by its string name::
q = session.query(User).join("addresses")
:meth:`~.Query.join` can also accommodate multiple
"on clause" arguments to produce a chain of joins, such as below
where a join across four related entities is constructed::
q = session.query(User).join("orders", "items", "keywords")
The above would be shorthand for three separate calls to
:meth:`~.Query.join`, each using an explicit attribute to indicate
the source entity::
q = session.query(User).\\
join(User.orders).\\
join(Order.items).\\
join(Item.keywords)
**Joins to a Target Entity or Selectable**
A second form of :meth:`~.Query.join` allows any mapped entity
or core selectable construct as a target. In this usage,
:meth:`~.Query.join` will attempt
to create a JOIN along the natural foreign key relationship between
two entities::
q = session.query(User).join(Address)
The above calling form of :meth:`~.Query.join` will raise an error if
either there are no foreign keys between the two entities, or if
there are multiple foreign key linkages between them. In the
above calling form, :meth:`~.Query.join` is called upon to
create the "on clause" automatically for us. The target can
be any mapped entity or selectable, such as a :class:`.Table`::
q = session.query(User).join(addresses_table)
**Joins to a Target with an ON Clause**
The third calling form allows both the target entity as well
as the ON clause to be passed explicitly. Suppose for
example we wanted to join to ``Address`` twice, using
an alias the second time. We use :func:`~sqlalchemy.orm.aliased`
to create a distinct alias of ``Address``, and join
to it using the ``target, onclause`` form, so that the
alias can be specified explicitly as the target along with
the relationship to instruct how the ON clause should proceed::
a_alias = aliased(Address)
q = session.query(User).\\
join(User.addresses).\\
join(a_alias, User.addresses).\\
filter(Address.email_address=='[email protected]').\\
filter(a_alias.email_address=='[email protected]')
Where above, the generated SQL would be similar to::
SELECT user.* FROM user
JOIN address ON user.id = address.user_id
JOIN address AS address_1 ON user.id=address_1.user_id
WHERE address.email_address = :email_address_1
AND address_1.email_address = :email_address_2
The two-argument calling form of :meth:`~.Query.join`
also allows us to construct arbitrary joins with SQL-oriented
"on clause" expressions, not relying upon configured relationships
at all. Any SQL expression can be passed as the ON clause
when using the two-argument form, which should refer to the target
entity in some way as well as an applicable source entity::
q = session.query(User).join(Address, User.id==Address.user_id)
.. versionchanged:: 0.7
In SQLAlchemy 0.6 and earlier, the two argument form of
:meth:`~.Query.join` requires the usage of a tuple:
``query(User).join((Address, User.id==Address.user_id))``\ .
This calling form is accepted in 0.7 and further, though
is not necessary unless multiple join conditions are passed to
a single :meth:`~.Query.join` call, which itself is also not
generally necessary as it is now equivalent to multiple
calls (this wasn't always the case).
**Advanced Join Targeting and Adaption**
There is a lot of flexibility in what the "target" can be when using
:meth:`~.Query.join`. As noted previously, it also accepts
:class:`.Table` constructs and other selectables such as
:func:`.alias` and :func:`.select` constructs, with either the one
or two-argument forms::
addresses_q = select([Address.user_id]).\\
where(Address.email_address.endswith("@bar.com")).\\
alias()
q = session.query(User).\\
join(addresses_q, addresses_q.c.user_id==User.id)
:meth:`~.Query.join` also features the ability to *adapt* a
:meth:`~sqlalchemy.orm.relationship` -driven ON clause to the target
selectable. Below we construct a JOIN from ``User`` to a subquery
against ``Address``, allowing the relationship denoted by
``User.addresses`` to *adapt* itself to the altered target::
address_subq = session.query(Address).\\
filter(Address.email_address == '[email protected]').\\
subquery()
q = session.query(User).join(address_subq, User.addresses)
Producing SQL similar to::
SELECT user.* FROM user
JOIN (
SELECT address.id AS id,
address.user_id AS user_id,
address.email_address AS email_address
FROM address
WHERE address.email_address = :email_address_1
) AS anon_1 ON user.id = anon_1.user_id
The above form allows one to fall back onto an explicit ON
clause at any time::
q = session.query(User).\\
join(address_subq, User.id==address_subq.c.user_id)
**Controlling what to Join From**
While :meth:`~.Query.join` exclusively deals with the "right"
side of the JOIN, we can also control the "left" side, in those
cases where it's needed, using :meth:`~.Query.select_from`.
Below we construct a query against ``Address`` but can still
make usage of ``User.addresses`` as our ON clause by instructing
the :class:`.Query` to select first from the ``User``
entity::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which will produce SQL similar to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
**Constructing Aliases Anonymously**
:meth:`~.Query.join` can construct anonymous aliases
using the ``aliased=True`` flag. This feature is useful
when a query is being joined algorithmically, such as
when querying self-referentially to an arbitrary depth::
q = session.query(Node).\\
join("children", "children", aliased=True)
When ``aliased=True`` is used, the actual "alias" construct
is not explicitly available. To work with it, methods such as
:meth:`.Query.filter` will adapt the incoming entity to
the last join point::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1')
When using automatic aliasing, the ``from_joinpoint=True``
argument can allow a multi-node join to be broken into
multiple calls to :meth:`~.Query.join`, so that
each path along the way can be further filtered::
q = session.query(Node).\\
join("children", aliased=True).\\
filter(Node.name='child 1').\\
join("children", aliased=True, from_joinpoint=True).\\
filter(Node.name == 'grandchild 1')
The filtering aliases above can then be reset back to the
original ``Node`` entity using :meth:`~.Query.reset_joinpoint`::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1').\\
reset_joinpoint().\\
filter(Node.name == 'parent 1)
For an example of ``aliased=True``, see the distribution
example :ref:`examples_xmlpersistence` which illustrates
an XPath-like query system using algorithmic joins.
:param \*props: A collection of one or more join conditions,
each consisting of a relationship-bound attribute or string
relationship name representing an "on clause", or a single
target entity, or a tuple in the form of ``(target, onclause)``.
A special two-argument calling form of the form ``target, onclause``
is also accepted.
:param aliased=False: If True, indicate that the JOIN target should be
anonymously aliased. Subsequent calls to :meth:`~.Query.filter`
and similar will adapt the incoming criterion to the target
alias, until :meth:`~.Query.reset_joinpoint` is called.
:param isouter=False: If True, the join used will be a left outer join,
just as if the :meth:`.Query.outerjoin` method were called. This
flag is here to maintain consistency with the same flag as accepted
by :meth:`.FromClause.join` and other Core constructs.
.. versionadded:: 1.0.0
:param from_joinpoint=False: When using ``aliased=True``, a setting
of True here will cause the join to be from the most recent
joined target, rather than starting back from the original
FROM clauses of the query.
.. seealso::
:ref:`ormtutorial_joins` in the ORM tutorial.
:ref:`inheritance_toplevel` for details on how
:meth:`~.Query.join` is used for inheritance relationships.
:func:`.orm.join` - a standalone ORM-level join function,
used internally by :meth:`.Query.join`, which in previous
SQLAlchemy versions was the primary ORM-level joining interface.
"""
aliased, from_joinpoint, isouter = kwargs.pop('aliased', False),\
kwargs.pop('from_joinpoint', False),\
kwargs.pop('isouter', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
','.join(kwargs.keys))
isouter = isouter
return self._join(props,
outerjoin=isouter, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def outerjoin(self, *props, **kwargs):
"""Create a left outer join against this ``Query`` object's criterion
and apply generatively, returning the newly resulting ``Query``.
Usage is the same as the ``join()`` method.
"""
aliased, from_joinpoint = kwargs.pop('aliased', False), \
kwargs.pop('from_joinpoint', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
','.join(kwargs))
return self._join(props,
outerjoin=True, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def _update_joinpoint(self, jp):
self._joinpoint = jp
# copy backwards to the root of the _joinpath
# dict, so that no existing dict in the path is mutated
while 'prev' in jp:
f, prev = jp['prev']
prev = prev.copy()
prev[f] = jp
jp['prev'] = (f, prev)
jp = prev
self._joinpath = jp
@_generative(_no_statement_condition, _no_limit_offset)
def _join(self, keys, outerjoin, create_aliases, from_joinpoint):
"""consumes arguments from join() or outerjoin(), places them into a
consistent format with which to form the actual JOIN constructs.
"""
if not from_joinpoint:
self._reset_joinpoint()
if len(keys) == 2 and \
isinstance(keys[0], (expression.FromClause,
type, AliasedClass)) and \
isinstance(keys[1], (str, expression.ClauseElement,
interfaces.PropComparator)):
# detect 2-arg form of join and
# convert to a tuple.
keys = (keys,)
for arg1 in util.to_list(keys):
if isinstance(arg1, tuple):
# "tuple" form of join, multiple
# tuples are accepted as well. The simpler
# "2-arg" form is preferred. May deprecate
# the "tuple" usage.
arg1, arg2 = arg1
else:
arg2 = None
# determine onclause/right_entity. there
# is a little bit of legacy behavior still at work here
# which means they might be in either order. may possibly
# lock this down to (right_entity, onclause) in 0.6.
if isinstance(
arg1, (interfaces.PropComparator, util.string_types)):
right_entity, onclause = arg2, arg1
else:
right_entity, onclause = arg1, arg2
left_entity = prop = None
if isinstance(onclause, interfaces.PropComparator):
of_type = getattr(onclause, '_of_type', None)
else:
of_type = None
if isinstance(onclause, util.string_types):
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity, onclause)
onclause = descriptor
# check for q.join(Class.propname, from_joinpoint=True)
# and Class is that of the current joinpoint
elif from_joinpoint and \
isinstance(onclause, interfaces.PropComparator):
left_entity = onclause._parententity
info = inspect(self._joinpoint_zero())
left_mapper, left_selectable, left_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', None)
if left_mapper is left_entity:
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity,
onclause.key)
onclause = descriptor
if isinstance(onclause, interfaces.PropComparator):
if right_entity is None:
if of_type:
right_entity = of_type
else:
right_entity = onclause.property.mapper
left_entity = onclause._parententity
prop = onclause.property
if not isinstance(onclause, attributes.QueryableAttribute):
onclause = prop
if not create_aliases:
# check for this path already present.
# don't render in that case.
edge = (left_entity, right_entity, prop.key)
if edge in self._joinpoint:
# The child's prev reference might be stale --
# it could point to a parent older than the
# current joinpoint. If this is the case,
# then we need to update it and then fix the
# tree's spine with _update_joinpoint. Copy
# and then mutate the child, which might be
# shared by a different query object.
jp = self._joinpoint[edge].copy()
jp['prev'] = (edge, self._joinpoint)
self._update_joinpoint(jp)
continue
elif onclause is not None and right_entity is None:
# TODO: no coverage here
raise NotImplementedError("query.join(a==b) not supported.")
self._join_left_to_right(
left_entity,
right_entity, onclause,
outerjoin, create_aliases, prop)
def _join_left_to_right(self, left, right,
onclause, outerjoin, create_aliases, prop):
"""append a JOIN to the query's from clause."""
self._polymorphic_adapters = self._polymorphic_adapters.copy()
if left is None:
if self._from_obj:
left = self._from_obj[0]
elif self._entities:
left = self._entities[0].entity_zero_or_selectable
if left is None:
raise sa_exc.InvalidRequestError(
"Don't know how to join from %s; please use "
"select_from() to establish the left "
"entity/selectable of this join" % self._entities[0])
if left is right and \
not create_aliases:
raise sa_exc.InvalidRequestError(
"Can't construct a join from %s to %s, they "
"are the same entity" %
(left, right))
l_info = inspect(left)
r_info = inspect(right)
overlap = False
if not create_aliases:
right_mapper = getattr(r_info, "mapper", None)
# if the target is a joined inheritance mapping,
# be more liberal about auto-aliasing.
if right_mapper and (
right_mapper.with_polymorphic or
isinstance(right_mapper.mapped_table, expression.Join)
):
for from_obj in self._from_obj or [l_info.selectable]:
if sql_util.selectables_overlap(
l_info.selectable, from_obj) and \
sql_util.selectables_overlap(
from_obj, r_info.selectable):
overlap = True
break
if (overlap or not create_aliases) and \
l_info.selectable is r_info.selectable:
raise sa_exc.InvalidRequestError(
"Can't join table/selectable '%s' to itself" %
l_info.selectable)
right, onclause = self._prepare_right_side(
r_info, right, onclause,
create_aliases,
prop, overlap)
# if joining on a MapperProperty path,
# track the path to prevent redundant joins
if not create_aliases and prop:
self._update_joinpoint({
'_joinpoint_entity': right,
'prev': ((left, right, prop.key), self._joinpoint)
})
else:
self._joinpoint = {'_joinpoint_entity': right}
self._join_to_left(l_info, left, right, onclause, outerjoin)
def _prepare_right_side(self, r_info, right, onclause, create_aliases,
prop, overlap):
info = r_info
right_mapper, right_selectable, right_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', False)
if right_mapper:
self._join_entities += (info, )
if right_mapper and prop and \
not right_mapper.common_parent(prop.mapper):
raise sa_exc.InvalidRequestError(
"Join target %s does not correspond to "
"the right side of join condition %s" % (right, onclause)
)
if not right_mapper and prop:
right_mapper = prop.mapper
need_adapter = False
if right_mapper and right is right_selectable:
if not right_selectable.is_derived_from(
right_mapper.mapped_table):
raise sa_exc.InvalidRequestError(
"Selectable '%s' is not derived from '%s'" %
(right_selectable.description,
right_mapper.mapped_table.description))
if isinstance(right_selectable, expression.SelectBase):
# TODO: this isn't even covered now!
right_selectable = right_selectable.alias()
need_adapter = True
right = aliased(right_mapper, right_selectable)
aliased_entity = right_mapper and \
not right_is_aliased and \
(
right_mapper.with_polymorphic and isinstance(
right_mapper._with_polymorphic_selectable,
expression.Alias)
or
overlap # test for overlap:
# orm/inheritance/relationships.py
# SelfReferentialM2MTest
)
if not need_adapter and (create_aliases or aliased_entity):
right = aliased(right, flat=True)
need_adapter = True
# if an alias() of the right side was generated here,
# apply an adapter to all subsequent filter() calls
# until reset_joinpoint() is called.
if need_adapter:
self._filter_aliases = ORMAdapter(
right,
equivalents=right_mapper and
right_mapper._equivalent_columns or {},
chain_to=self._filter_aliases)
# if the onclause is a ClauseElement, adapt it with any
# adapters that are in place right now
if isinstance(onclause, expression.ClauseElement):
onclause = self._adapt_clause(onclause, True, True)
# if an alias() on the right side was generated,
# which is intended to wrap a the right side in a subquery,
# ensure that columns retrieved from this target in the result
# set are also adapted.
if aliased_entity and not create_aliases:
self._mapper_loads_polymorphically_with(
right_mapper,
ORMAdapter(
right,
equivalents=right_mapper._equivalent_columns
)
)
return right, onclause
def _join_to_left(self, l_info, left, right, onclause, outerjoin):
info = l_info
left_mapper = getattr(info, 'mapper', None)
left_selectable = info.selectable
if self._from_obj:
replace_clause_index, clause = sql_util.find_join_source(
self._from_obj,
left_selectable)
if clause is not None:
try:
clause = orm_join(clause,
right,
onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = \
self._from_obj[:replace_clause_index] + \
(clause, ) + \
self._from_obj[replace_clause_index + 1:]
return
if left_mapper:
for ent in self._entities:
if ent.corresponds_to(left):
clause = ent.selectable
break
else:
clause = left
else:
clause = left_selectable
assert clause is not None
try:
clause = orm_join(clause, right, onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = self._from_obj + (clause,)
def _reset_joinpoint(self):
self._joinpoint = self._joinpath
self._filter_aliases = None
@_generative(_no_statement_condition)
def reset_joinpoint(self):
"""Return a new :class:`.Query`, where the "join point" has
been reset back to the base FROM entities of the query.
This method is usually used in conjunction with the
``aliased=True`` feature of the :meth:`~.Query.join`
method. See the example in :meth:`~.Query.join` for how
this is used.
"""
self._reset_joinpoint()
@_generative(_no_clauseelement_condition)
def select_from(self, *from_obj):
"""Set the FROM clause of this :class:`.Query` explicitly.
:meth:`.Query.select_from` is often used in conjunction with
:meth:`.Query.join` in order to control which entity is selected
from on the "left" side of the join.
The entity or selectable object here effectively replaces the
"left edge" of any calls to :meth:`~.Query.join`, when no
joinpoint is otherwise established - usually, the default "join
point" is the leftmost entity in the :class:`~.Query` object's
list of entities to be selected.
A typical example::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which produces SQL equivalent to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
:param \*from_obj: collection of one or more entities to apply
to the FROM clause. Entities can be mapped classes,
:class:`.AliasedClass` objects, :class:`.Mapper` objects
as well as core :class:`.FromClause` elements like subqueries.
.. versionchanged:: 0.9
This method no longer applies the given FROM object
to be the selectable from which matching entities
select from; the :meth:`.select_entity_from` method
now accomplishes this. See that method for a description
of this behavior.
.. seealso::
:meth:`~.Query.join`
:meth:`.Query.select_entity_from`
"""
self._set_select_from(from_obj, False)
@_generative(_no_clauseelement_condition)
def select_entity_from(self, from_obj):
"""Set the FROM clause of this :class:`.Query` to a
core selectable, applying it as a replacement FROM clause
for corresponding mapped entities.
This method is similar to the :meth:`.Query.select_from`
method, in that it sets the FROM clause of the query. However,
where :meth:`.Query.select_from` only affects what is placed
in the FROM, this method also applies the given selectable
to replace the FROM which the selected entities would normally
select from.
The given ``from_obj`` must be an instance of a :class:`.FromClause`,
e.g. a :func:`.select` or :class:`.Alias` construct.
An example would be a :class:`.Query` that selects ``User`` entities,
but uses :meth:`.Query.select_entity_from` to have the entities
selected from a :func:`.select` construct instead of the
base ``user`` table::
select_stmt = select([User]).where(User.id == 7)
q = session.query(User).\\
select_entity_from(select_stmt).\\
filter(User.name == 'ed')
The query generated will select ``User`` entities directly
from the given :func:`.select` construct, and will be::
SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name
FROM (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE anon_1.name = :name_1
Notice above that even the WHERE criterion was "adapted" such that
the ``anon_1`` subquery effectively replaces all references to the
``user`` table, except for the one that it refers to internally.
Compare this to :meth:`.Query.select_from`, which as of
version 0.9, does not affect existing entities. The
statement below::
q = session.query(User).\\
select_from(select_stmt).\\
filter(User.name == 'ed')
Produces SQL where both the ``user`` table as well as the
``select_stmt`` construct are present as separate elements
in the FROM clause. No "adaptation" of the ``user`` table
is applied::
SELECT "user".id AS user_id, "user".name AS user_name
FROM "user", (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE "user".name = :name_1
:meth:`.Query.select_entity_from` maintains an older
behavior of :meth:`.Query.select_from`. In modern usage,
similar results can also be achieved using :func:`.aliased`::
select_stmt = select([User]).where(User.id == 7)
user_from_select = aliased(User, select_stmt.alias())
q = session.query(user_from_select)
:param from_obj: a :class:`.FromClause` object that will replace
the FROM clause of this :class:`.Query`.
.. seealso::
:meth:`.Query.select_from`
.. versionadded:: 0.8
:meth:`.Query.select_entity_from` was added to specify
the specific behavior of entity replacement, however
the :meth:`.Query.select_from` maintains this behavior
as well until 0.9.
"""
self._set_select_from([from_obj], True)
def __getitem__(self, item):
if isinstance(item, slice):
start, stop, step = util.decode_slice(item)
if isinstance(stop, int) and \
isinstance(start, int) and \
stop - start <= 0:
return []
# perhaps we should execute a count() here so that we
# can still use LIMIT/OFFSET ?
elif (isinstance(start, int) and start < 0) \
or (isinstance(stop, int) and stop < 0):
return list(self)[item]
res = self.slice(start, stop)
if step is not None:
return list(res)[None:None:item.step]
else:
return list(res)
else:
if item == -1:
return list(self)[-1]
else:
return list(self[item:item + 1])[0]
@_generative(_no_statement_condition)
def slice(self, start, stop):
"""apply LIMIT/OFFSET to the ``Query`` based on a "
"range and return the newly resulting ``Query``."""
if start is not None and stop is not None:
self._offset = (self._offset or 0) + start
self._limit = stop - start
elif start is None and stop is not None:
self._limit = stop
elif start is not None and stop is None:
self._offset = (self._offset or 0) + start
if self._offset == 0:
self._offset = None
@_generative(_no_statement_condition)
def limit(self, limit):
"""Apply a ``LIMIT`` to the query and return the newly resulting
``Query``.
"""
self._limit = limit
@_generative(_no_statement_condition)
def offset(self, offset):
"""Apply an ``OFFSET`` to the query and return the newly resulting
``Query``.
"""
self._offset = offset
@_generative(_no_statement_condition)
def distinct(self, *criterion):
"""Apply a ``DISTINCT`` to the query and return the newly resulting
``Query``.
:param \*expr: optional column expressions. When present,
the Postgresql dialect will render a ``DISTINCT ON (<expressions>>)``
construct.
"""
if not criterion:
self._distinct = True
else:
criterion = self._adapt_col_list(criterion)
if isinstance(self._distinct, list):
self._distinct += criterion
else:
self._distinct = criterion
@_generative()
def prefix_with(self, *prefixes):
"""Apply the prefixes to the query and return the newly resulting
``Query``.
:param \*prefixes: optional prefixes, typically strings,
not using any commas. In particular is useful for MySQL keywords.
e.g.::
query = sess.query(User.name).\\
prefix_with('HIGH_PRIORITY').\\
prefix_with('SQL_SMALL_RESULT', 'ALL')
Would render::
SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL users.name AS users_name
FROM users
.. versionadded:: 0.7.7
"""
if self._prefixes:
self._prefixes += prefixes
else:
self._prefixes = prefixes
def all(self):
"""Return the results represented by this ``Query`` as a list.
This results in an execution of the underlying query.
"""
return list(self)
@_generative(_no_clauseelement_condition)
def from_statement(self, statement):
"""Execute the given SELECT statement and return results.
This method bypasses all internal statement compilation, and the
statement is executed without modification.
The statement is typically either a :func:`~.expression.text`
or :func:`~.expression.select` construct, and should return the set
of columns
appropriate to the entity class represented by this :class:`.Query`.
.. seealso::
:ref:`orm_tutorial_literal_sql` - usage examples in the
ORM tutorial
"""
statement = expression._expression_literal_as_text(statement)
if not isinstance(statement,
(expression.TextClause,
expression.SelectBase)):
raise sa_exc.ArgumentError(
"from_statement accepts text(), select(), "
"and union() objects only.")
self._statement = statement
def first(self):
"""Return the first result of this ``Query`` or
None if the result doesn't contain any row.
first() applies a limit of one within the generated SQL, so that
only one primary entity row is generated on the server side
(note this may consist of multiple result rows if join-loaded
collections are present).
Calling ``first()`` results in an execution of the underlying query.
"""
if self._statement is not None:
ret = list(self)[0:1]
else:
ret = list(self[0:1])
if len(ret) > 0:
return ret[0]
else:
return None
def one(self):
"""Return exactly one result or raise an exception.
Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects
no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
if multiple object identities are returned, or if multiple
rows are returned for a query that does not return object
identities.
Note that an entity query, that is, one which selects one or
more mapped classes as opposed to individual column attributes,
may ultimately represent many rows but only one row of
unique entity or entities - this is a successful result for one().
Calling ``one()`` results in an execution of the underlying query.
.. versionchanged:: 0.6
``one()`` fully fetches all results instead of applying
any kind of limit, so that the "unique"-ing of entities does not
conceal multiple object identities.
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
raise orm_exc.NoResultFound("No row was found for one()")
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one()")
def scalar(self):
"""Return the first element of the first result or None
if no rows present. If multiple rows are returned,
raises MultipleResultsFound.
>>> session.query(Item).scalar()
<Item>
>>> session.query(Item.id).scalar()
1
>>> session.query(Item.id).filter(Item.id < 0).scalar()
None
>>> session.query(Item.id, Item.name).scalar()
1
>>> session.query(func.count(Parent.id)).scalar()
20
This results in an execution of the underlying query.
"""
try:
ret = self.one()
if not isinstance(ret, tuple):
return ret
return ret[0]
except orm_exc.NoResultFound:
return None
def __iter__(self):
context = self._compile_context()
context.statement.use_labels = True
if self._autoflush and not self._populate_existing:
self.session._autoflush()
return self._execute_and_instances(context)
def _connection_from_session(self, **kw):
conn = self.session.connection(
**kw)
if self._execution_options:
conn = conn.execution_options(**self._execution_options)
return conn
def _execute_and_instances(self, querycontext):
conn = self._connection_from_session(
mapper=self._mapper_zero_or_none(),
clause=querycontext.statement,
close_with_result=True)
result = conn.execute(querycontext.statement, self._params)
return loading.instances(self, result, querycontext)
@property
def column_descriptions(self):
"""Return metadata about the columns which would be
returned by this :class:`.Query`.
Format is a list of dictionaries::
user_alias = aliased(User, name='user2')
q = sess.query(User, User.id, user_alias)
# this expression:
q.column_descriptions
# would return:
[
{
'name':'User',
'type':User,
'aliased':False,
'expr':User,
},
{
'name':'id',
'type':Integer(),
'aliased':False,
'expr':User.id,
},
{
'name':'user2',
'type':User,
'aliased':True,
'expr':user_alias
}
]
"""
return [
{
'name': ent._label_name,
'type': ent.type,
'aliased': getattr(ent, 'is_aliased_class', False),
'expr': ent.expr
}
for ent in self._entities
]
def instances(self, cursor, __context=None):
"""Given a ResultProxy cursor as returned by connection.execute(),
return an ORM result as an iterator.
e.g.::
result = engine.execute("select * from users")
for u in session.query(User).instances(result):
print u
"""
context = __context
if context is None:
context = QueryContext(self)
return loading.instances(self, cursor, context)
def merge_result(self, iterator, load=True):
"""Merge a result into this :class:`.Query` object's Session.
Given an iterator returned by a :class:`.Query` of the same structure
as this one, return an identical iterator of results, with all mapped
instances merged into the session using :meth:`.Session.merge`. This
is an optimized method which will merge all mapped instances,
preserving the structure of the result rows and unmapped columns with
less method overhead than that of calling :meth:`.Session.merge`
explicitly for each value.
The structure of the results is determined based on the column list of
this :class:`.Query` - if these do not correspond, unchecked errors
will occur.
The 'load' argument is the same as that of :meth:`.Session.merge`.
For an example of how :meth:`~.Query.merge_result` is used, see
the source code for the example :ref:`examples_caching`, where
:meth:`~.Query.merge_result` is used to efficiently restore state
from a cache back into a target :class:`.Session`.
"""
return loading.merge_result(self, iterator, load)
@property
def _select_args(self):
return {
'limit': self._limit,
'offset': self._offset,
'distinct': self._distinct,
'prefixes': self._prefixes,
'group_by': self._group_by or None,
'having': self._having
}
@property
def _should_nest_selectable(self):
kwargs = self._select_args
return (kwargs.get('limit') is not None or
kwargs.get('offset') is not None or
kwargs.get('distinct', False))
def exists(self):
"""A convenience method that turns a query into an EXISTS subquery
of the form EXISTS (SELECT 1 FROM ... WHERE ...).
e.g.::
q = session.query(User).filter(User.name == 'fred')
session.query(q.exists())
Producing SQL similar to::
SELECT EXISTS (
SELECT 1 FROM users WHERE users.name = :name_1
) AS anon_1
The EXISTS construct is usually used in the WHERE clause::
session.query(User.id).filter(q.exists()).scalar()
Note that some databases such as SQL Server don't allow an
EXISTS expression to be present in the columns clause of a
SELECT. To select a simple boolean value based on the exists
as a WHERE, use :func:`.literal`::
from sqlalchemy import literal
session.query(literal(True)).filter(q.exists()).scalar()
.. versionadded:: 0.8.1
"""
# .add_columns() for the case that we are a query().select_from(X),
# so that ".statement" can be produced (#2995) but also without
# omitting the FROM clause from a query(X) (#2818);
# .with_only_columns() after we have a core select() so that
# we get just "SELECT 1" without any entities.
return sql.exists(self.add_columns('1').with_labels().
statement.with_only_columns([1]))
def count(self):
"""Return a count of rows this Query would return.
This generates the SQL for this Query as follows::
SELECT count(1) AS count_1 FROM (
SELECT <rest of query follows...>
) AS anon_1
.. versionchanged:: 0.7
The above scheme is newly refined as of 0.7b3.
For fine grained control over specific columns
to count, to skip the usage of a subquery or
otherwise control of the FROM clause,
or to use other aggregate functions,
use :attr:`~sqlalchemy.sql.expression.func`
expressions in conjunction
with :meth:`~.Session.query`, i.e.::
from sqlalchemy import func
# count User records, without
# using a subquery.
session.query(func.count(User.id))
# return count of user "id" grouped
# by "name"
session.query(func.count(User.id)).\\
group_by(User.name)
from sqlalchemy import distinct
# count distinct "name" values
session.query(func.count(distinct(User.name)))
"""
col = sql.func.count(sql.literal_column('*'))
return self.from_self(col).scalar()
def delete(self, synchronize_session='evaluate'):
"""Perform a bulk delete query.
Deletes rows matched by this query from the database.
:param synchronize_session: chooses the strategy for the removal of
matched objects from the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, objects may still remain in
the session which were in fact deleted which can lead to confusing
results if they are accessed via get() or already loaded
collections.
``'fetch'`` - performs a select query before the delete to find
objects that are matched by the delete query and need to be
removed from the session. Matched objects are removed from the
session.
``'evaluate'`` - Evaluate the query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an error is raised. In that case you probably
want to use the 'fetch' strategy as a fallback.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
This method has several key caveats:
* The method does **not** offer in-Python cascading of relationships
- it is assumed that ON DELETE CASCADE/SET NULL/etc. is configured
for any foreign key references which require it, otherwise the
database may emit an integrity violation if foreign key references
are being enforced.
After the DELETE, dependent objects in the :class:`.Session` which
were impacted by an ON DELETE may not contain the current
state, or may have been deleted. This issue is resolved once the
:class:`.Session` is expired,
which normally occurs upon :meth:`.Session.commit` or can be forced
by using :meth:`.Session.expire_all`. Accessing an expired object
whose row has been deleted will invoke a SELECT to locate the
row; when the row is not found, an
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
* The :meth:`.MapperEvents.before_delete` and
:meth:`.MapperEvents.after_delete`
events are **not** invoked from this method. Instead, the
:meth:`.SessionEvents.after_bulk_delete` method is provided to act
upon a mass DELETE of entity rows.
.. seealso::
:meth:`.Query.update`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
# TODO: cascades need handling.
delete_op = persistence.BulkDelete.factory(
self, synchronize_session)
delete_op.exec_()
return delete_op.rowcount
def update(self, values, synchronize_session='evaluate'):
"""Perform a bulk update query.
Updates rows matched by this query in the database.
E.g.::
sess.query(User).filter(User.age == 25).\
update({User.age: User.age - 10}, synchronize_session='fetch')
sess.query(User).filter(User.age == 25).\
update({"age": User.age - 10}, synchronize_session='evaluate')
:param values: a dictionary with attributes names, or alternatively
mapped attributes or SQL expressions, as keys, and literal
values or sql expressions as values.
.. versionchanged:: 1.0.0 - string names in the values dictionary
are now resolved against the mapped entity; previously, these
strings were passed as literal column names with no mapper-level
translation.
:param synchronize_session: chooses the strategy to update the
attributes on objects in the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, updated objects may still
remain in the session with stale values on their attributes, which
can lead to confusing results.
``'fetch'`` - performs a select query before the update to find
objects that are matched by the update query. The updated
attributes are expired on matched objects.
``'evaluate'`` - Evaluate the Query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an exception is raised.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
This method has several key caveats:
* The method does **not** offer in-Python cascading of relationships
- it is assumed that ON UPDATE CASCADE is configured for any foreign
key references which require it, otherwise the database may emit an
integrity violation if foreign key references are being enforced.
After the UPDATE, dependent objects in the :class:`.Session` which
were impacted by an ON UPDATE CASCADE may not contain the current
state; this issue is resolved once the :class:`.Session` is expired,
which normally occurs upon :meth:`.Session.commit` or can be forced
by using :meth:`.Session.expire_all`.
* The method supports multiple table updates, as
detailed in :ref:`multi_table_updates`, and this behavior does
extend to support updates of joined-inheritance and other multiple
table mappings. However, the **join condition of an inheritance
mapper is currently not automatically rendered**.
Care must be taken in any multiple-table update to explicitly
include the joining condition between those tables, even in mappings
where this is normally automatic.
E.g. if a class ``Engineer`` subclasses ``Employee``, an UPDATE of
the ``Engineer`` local table using criteria against the ``Employee``
local table might look like::
session.query(Engineer).\\
filter(Engineer.id == Employee.id).\\
filter(Employee.name == 'dilbert').\\
update({"engineer_type": "programmer"})
* The :meth:`.MapperEvents.before_update` and
:meth:`.MapperEvents.after_update`
events are **not** invoked from this method. Instead, the
:meth:`.SessionEvents.after_bulk_update` method is provided to act
upon a mass UPDATE of entity rows.
.. seealso::
:meth:`.Query.delete`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
update_op = persistence.BulkUpdate.factory(
self, synchronize_session, values)
update_op.exec_()
return update_op.rowcount
def _compile_context(self, labels=True):
context = QueryContext(self)
if context.statement is not None:
return context
context.labels = labels
context._for_update_arg = self._for_update_arg
for entity in self._entities:
entity.setup_context(self, context)
for rec in context.create_eager_joins:
strategy = rec[0]
strategy(*rec[1:])
if context.from_clause:
# "load from explicit FROMs" mode,
# i.e. when select_from() or join() is used
context.froms = list(context.from_clause)
else:
# "load from discrete FROMs" mode,
# i.e. when each _MappedEntity has its own FROM
context.froms = context.froms
if self._enable_single_crit:
self._adjust_for_single_inheritance(context)
if not context.primary_columns:
if self._only_load_props:
raise sa_exc.InvalidRequestError(
"No column-based properties specified for "
"refresh operation. Use session.expire() "
"to reload collections and related items.")
else:
raise sa_exc.InvalidRequestError(
"Query contains no columns with which to "
"SELECT from.")
if context.multi_row_eager_loaders and self._should_nest_selectable:
context.statement = self._compound_eager_statement(context)
else:
context.statement = self._simple_statement(context)
return context
def _compound_eager_statement(self, context):
# for eager joins present and LIMIT/OFFSET/DISTINCT,
# wrap the query inside a select,
# then append eager joins onto that
if context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
else:
context.order_by = None
order_by_col_expr = []
inner = sql.select(
context.primary_columns + order_by_col_expr,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
# TODO: this order_by is only needed if
# LIMIT/OFFSET is present in self._select_args,
# else the application on the outside is enough
order_by=context.order_by,
**self._select_args
)
for hint in self._with_hints:
inner = inner.with_hint(*hint)
if self._correlate:
inner = inner.correlate(*self._correlate)
inner = inner.alias()
equivs = self.__all_equivs()
context.adapter = sql_util.ColumnAdapter(inner, equivs)
statement = sql.select(
[inner] + context.secondary_columns,
use_labels=context.labels)
statement._for_update_arg = context._for_update_arg
from_clause = inner
for eager_join in context.eager_joins.values():
# EagerLoader places a 'stop_on' attribute on the join,
# giving us a marker as to where the "splice point" of
# the join should be
from_clause = sql_util.splice_joins(
from_clause,
eager_join, eager_join.stop_on)
statement.append_from(from_clause)
if context.order_by:
statement.append_order_by(
*context.adapter.copy_and_process(
context.order_by
)
)
statement.append_order_by(*context.eager_order_by)
return statement
def _simple_statement(self, context):
if not context.order_by:
context.order_by = None
if self._distinct and context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
context.primary_columns += order_by_col_expr
context.froms += tuple(context.eager_joins.values())
statement = sql.select(
context.primary_columns +
context.secondary_columns,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
order_by=context.order_by,
**self._select_args
)
statement._for_update_arg = context._for_update_arg
for hint in self._with_hints:
statement = statement.with_hint(*hint)
if self._correlate:
statement = statement.correlate(*self._correlate)
if context.eager_order_by:
statement.append_order_by(*context.eager_order_by)
return statement
def _adjust_for_single_inheritance(self, context):
"""Apply single-table-inheritance filtering.
For all distinct single-table-inheritance mappers represented in
the columns clause of this query, add criterion to the WHERE
clause of the given QueryContext such that only the appropriate
subtypes are selected from the total results.
"""
for (ext_info, adapter) in set(self._mapper_adapter_map.values()):
if ext_info in self._join_entities:
continue
single_crit = ext_info.mapper._single_table_criterion
if single_crit is not None:
if adapter:
single_crit = adapter.traverse(single_crit)
single_crit = self._adapt_clause(single_crit, False, False)
context.whereclause = sql.and_(
sql.True_._ifnone(context.whereclause),
single_crit)
def __str__(self):
return str(self._compile_context().statement)
from ..sql.selectable import ForUpdateArg
class LockmodeArg(ForUpdateArg):
@classmethod
def parse_legacy_query(self, mode):
if mode in (None, False):
return None
if mode == "read":
read = True
nowait = False
elif mode == "update":
read = nowait = False
elif mode == "update_nowait":
nowait = True
read = False
else:
raise sa_exc.ArgumentError(
"Unknown with_lockmode argument: %r" % mode)
return LockmodeArg(read=read, nowait=nowait)
class _QueryEntity(object):
"""represent an entity column returned within a Query result."""
def __new__(cls, *args, **kwargs):
if cls is _QueryEntity:
entity = args[1]
if not isinstance(entity, util.string_types) and \
_is_mapped_class(entity):
cls = _MapperEntity
elif isinstance(entity, Bundle):
cls = _BundleEntity
else:
cls = _ColumnEntity
return object.__new__(cls)
def _clone(self):
q = self.__class__.__new__(self.__class__)
q.__dict__ = self.__dict__.copy()
return q
class _MapperEntity(_QueryEntity):
"""mapper/class/AliasedClass entity"""
def __init__(self, query, entity):
if not query._primary_entity:
query._primary_entity = self
query._entities.append(self)
self.entities = [entity]
self.expr = entity
supports_single_entity = True
def setup_entity(self, ext_info, aliased_adapter):
self.mapper = ext_info.mapper
self.aliased_adapter = aliased_adapter
self.selectable = ext_info.selectable
self.is_aliased_class = ext_info.is_aliased_class
self._with_polymorphic = ext_info.with_polymorphic_mappers
self._polymorphic_discriminator = \
ext_info.polymorphic_on
self.entity_zero = ext_info
if ext_info.is_aliased_class:
self._label_name = self.entity_zero.name
else:
self._label_name = self.mapper.class_.__name__
self.path = self.entity_zero._path_registry
def set_with_polymorphic(self, query, cls_or_mappers,
selectable, polymorphic_on):
"""Receive an update from a call to query.with_polymorphic().
Note the newer style of using a free standing with_polymporphic()
construct doesn't make use of this method.
"""
if self.is_aliased_class:
# TODO: invalidrequest ?
raise NotImplementedError(
"Can't use with_polymorphic() against "
"an Aliased object"
)
if cls_or_mappers is None:
query._reset_polymorphic_adapter(self.mapper)
return
mappers, from_obj = self.mapper._with_polymorphic_args(
cls_or_mappers, selectable)
self._with_polymorphic = mappers
self._polymorphic_discriminator = polymorphic_on
self.selectable = from_obj
query._mapper_loads_polymorphically_with(
self.mapper, sql_util.ColumnAdapter(
from_obj, self.mapper._equivalent_columns))
filter_fn = id
@property
def type(self):
return self.mapper.class_
@property
def entity_zero_or_selectable(self):
return self.entity_zero
def corresponds_to(self, entity):
if entity.is_aliased_class:
if self.is_aliased_class:
if entity._base_alias is self.entity_zero._base_alias:
return True
return False
elif self.is_aliased_class:
if self.entity_zero._use_mapper_path:
return entity in self._with_polymorphic
else:
return entity is self.entity_zero
return entity.common_parent(self.entity_zero)
def adapt_to_selectable(self, query, sel):
query._entities.append(self)
def _get_entity_clauses(self, query, context):
adapter = None
if not self.is_aliased_class:
if query._polymorphic_adapters:
adapter = query._polymorphic_adapters.get(self.mapper, None)
else:
adapter = self.aliased_adapter
if adapter:
if query._from_obj_alias:
ret = adapter.wrap(query._from_obj_alias)
else:
ret = adapter
else:
ret = query._from_obj_alias
return ret
def row_processor(self, query, context, result):
adapter = self._get_entity_clauses(query, context)
if context.adapter and adapter:
adapter = adapter.wrap(context.adapter)
elif not adapter:
adapter = context.adapter
# polymorphic mappers which have concrete tables in
# their hierarchy usually
# require row aliasing unconditionally.
if not adapter and self.mapper._requires_row_aliasing:
adapter = sql_util.ColumnAdapter(
self.selectable,
self.mapper._equivalent_columns)
if query._primary_entity is self:
_instance = loading.instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
only_load_props=query._only_load_props,
refresh_state=context.refresh_state,
polymorphic_discriminator=self._polymorphic_discriminator
)
else:
_instance = loading.instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
polymorphic_discriminator=self._polymorphic_discriminator
)
return _instance, self._label_name
def setup_context(self, query, context):
adapter = self._get_entity_clauses(query, context)
# if self._adapted_selectable is None:
context.froms += (self.selectable,)
if context.order_by is False and self.mapper.order_by:
context.order_by = self.mapper.order_by
# apply adaptation to the mapper's order_by if needed.
if adapter:
context.order_by = adapter.adapt_list(
util.to_list(
context.order_by
)
)
if self._with_polymorphic:
poly_properties = self.mapper._iterate_polymorphic_properties(
self._with_polymorphic)
else:
poly_properties = self.mapper._polymorphic_properties
for value in poly_properties:
if query._only_load_props and \
value.key not in query._only_load_props:
continue
value.setup(
context,
self,
self.path,
adapter,
only_load_props=query._only_load_props,
column_collection=context.primary_columns
)
if self._polymorphic_discriminator is not None and \
self._polymorphic_discriminator \
is not self.mapper.polymorphic_on:
if adapter:
pd = adapter.columns[self._polymorphic_discriminator]
else:
pd = self._polymorphic_discriminator
context.primary_columns.append(pd)
def __str__(self):
return str(self.mapper)
@inspection._self_inspects
class Bundle(object):
"""A grouping of SQL expressions that are returned by a :class:`.Query`
under one namespace.
The :class:`.Bundle` essentially allows nesting of the tuple-based
results returned by a column-oriented :class:`.Query` object. It also
is extensible via simple subclassing, where the primary capability
to override is that of how the set of expressions should be returned,
allowing post-processing as well as custom return types, without
involving ORM identity-mapped classes.
.. versionadded:: 0.9.0
.. seealso::
:ref:`bundles`
"""
single_entity = False
"""If True, queries for a single Bundle will be returned as a single
entity, rather than an element within a keyed tuple."""
def __init__(self, name, *exprs, **kw):
"""Construct a new :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
for row in session.query(bn).filter(
bn.c.x == 5).filter(bn.c.y == 4):
print(row.mybundle.x, row.mybundle.y)
:param name: name of the bundle.
:param \*exprs: columns or SQL expressions comprising the bundle.
:param single_entity=False: if True, rows for this :class:`.Bundle`
can be returned as a "single entity" outside of any enclosing tuple
in the same manner as a mapped entity.
"""
self.name = self._label = name
self.exprs = exprs
self.c = self.columns = ColumnCollection()
self.columns.update((getattr(col, "key", col._label), col)
for col in exprs)
self.single_entity = kw.pop('single_entity', self.single_entity)
columns = None
"""A namespace of SQL expressions referred to by this :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
q = sess.query(bn).filter(bn.c.x == 5)
Nesting of bundles is also supported::
b1 = Bundle("b1",
Bundle('b2', MyClass.a, MyClass.b),
Bundle('b3', MyClass.x, MyClass.y)
)
q = sess.query(b1).filter(
b1.c.b2.c.a == 5).filter(b1.c.b3.c.y == 9)
.. seealso::
:attr:`.Bundle.c`
"""
c = None
"""An alias for :attr:`.Bundle.columns`."""
def _clone(self):
cloned = self.__class__.__new__(self.__class__)
cloned.__dict__.update(self.__dict__)
return cloned
def __clause_element__(self):
return expression.ClauseList(group=False, *self.c)
@property
def clauses(self):
return self.__clause_element__().clauses
def label(self, name):
"""Provide a copy of this :class:`.Bundle` passing a new label."""
cloned = self._clone()
cloned.name = name
return cloned
def create_row_processor(self, query, procs, labels):
"""Produce the "row processing" function for this :class:`.Bundle`.
May be overridden by subclasses.
.. seealso::
:ref:`bundles` - includes an example of subclassing.
"""
keyed_tuple = util.lightweight_named_tuple('result', labels)
def proc(row):
return keyed_tuple([proc(row) for proc in procs])
return proc
class _BundleEntity(_QueryEntity):
def __init__(self, query, bundle, setup_entities=True):
query._entities.append(self)
self.bundle = self.expr = bundle
self.type = type(bundle)
self._label_name = bundle.name
self._entities = []
if setup_entities:
for expr in bundle.exprs:
if isinstance(expr, Bundle):
_BundleEntity(self, expr)
else:
_ColumnEntity(self, expr, namespace=self)
self.entities = ()
self.filter_fn = lambda item: item
self.supports_single_entity = self.bundle.single_entity
@property
def entity_zero(self):
for ent in self._entities:
ezero = ent.entity_zero
if ezero is not None:
return ezero
else:
return None
def corresponds_to(self, entity):
# TODO: this seems to have no effect for
# _ColumnEntity either
return False
@property
def entity_zero_or_selectable(self):
for ent in self._entities:
ezero = ent.entity_zero_or_selectable
if ezero is not None:
return ezero
else:
return None
def adapt_to_selectable(self, query, sel):
c = _BundleEntity(query, self.bundle, setup_entities=False)
# c._label_name = self._label_name
# c.entity_zero = self.entity_zero
# c.entities = self.entities
for ent in self._entities:
ent.adapt_to_selectable(c, sel)
def setup_entity(self, ext_info, aliased_adapter):
for ent in self._entities:
ent.setup_entity(ext_info, aliased_adapter)
def setup_context(self, query, context):
for ent in self._entities:
ent.setup_context(query, context)
def row_processor(self, query, context, result):
procs, labels = zip(
*[ent.row_processor(query, context, result)
for ent in self._entities]
)
proc = self.bundle.create_row_processor(query, procs, labels)
return proc, self._label_name
class _ColumnEntity(_QueryEntity):
"""Column/expression based entity."""
def __init__(self, query, column, namespace=None):
self.expr = column
self.namespace = namespace
if isinstance(column, util.string_types):
column = sql.literal_column(column)
self._label_name = column.name
elif isinstance(column, (
attributes.QueryableAttribute,
interfaces.PropComparator
)):
self._label_name = column.key
column = column._query_clause_element()
else:
self._label_name = getattr(column, 'key', None)
if not isinstance(column, expression.ColumnElement) and \
hasattr(column, '_select_iterable'):
for c in column._select_iterable:
if c is column:
break
_ColumnEntity(query, c, namespace=column)
else:
return
elif isinstance(column, Bundle):
_BundleEntity(query, column)
return
if not isinstance(column, sql.ColumnElement):
raise sa_exc.InvalidRequestError(
"SQL expression, column, or mapped entity "
"expected - got '%r'" % (column, )
)
self.type = type_ = column.type
if type_.hashable:
self.filter_fn = lambda item: item
else:
counter = util.counter()
self.filter_fn = lambda item: counter()
# If the Column is unnamed, give it a
# label() so that mutable column expressions
# can be located in the result even
# if the expression's identity has been changed
# due to adaption.
if not column._label and not getattr(column, 'is_literal', False):
column = column.label(self._label_name)
query._entities.append(self)
self.column = column
self.froms = set()
# look for ORM entities represented within the
# given expression. Try to count only entities
# for columns whose FROM object is in the actual list
# of FROMs for the overall expression - this helps
# subqueries which were built from ORM constructs from
# leaking out their entities into the main select construct
self.actual_froms = actual_froms = set(column._from_objects)
self.entities = util.OrderedSet(
elem._annotations['parententity']
for elem in visitors.iterate(column, {})
if 'parententity' in elem._annotations
and actual_froms.intersection(elem._from_objects)
)
if self.entities:
self.entity_zero = list(self.entities)[0]
elif self.namespace is not None:
self.entity_zero = self.namespace
else:
self.entity_zero = None
supports_single_entity = False
@property
def entity_zero_or_selectable(self):
if self.entity_zero is not None:
return self.entity_zero
elif self.actual_froms:
return list(self.actual_froms)[0]
else:
return None
def adapt_to_selectable(self, query, sel):
c = _ColumnEntity(query, sel.corresponding_column(self.column))
c._label_name = self._label_name
c.entity_zero = self.entity_zero
c.entities = self.entities
def setup_entity(self, ext_info, aliased_adapter):
if 'selectable' not in self.__dict__:
self.selectable = ext_info.selectable
self.froms.add(ext_info.selectable)
def corresponds_to(self, entity):
# TODO: just returning False here,
# no tests fail
if self.entity_zero is None:
return False
elif _is_aliased_class(entity):
# TODO: polymorphic subclasses ?
return entity is self.entity_zero
else:
return not _is_aliased_class(self.entity_zero) and \
entity.common_parent(self.entity_zero)
def _resolve_expr_against_query_aliases(self, query, expr, context):
return query._adapt_clause(expr, False, True)
def row_processor(self, query, context, result):
column = self._resolve_expr_against_query_aliases(
query, self.column, context)
if context.adapter:
column = context.adapter.columns[column]
getter = result._getter(column)
return getter, self._label_name
def setup_context(self, query, context):
column = self._resolve_expr_against_query_aliases(
query, self.column, context)
context.froms += tuple(self.froms)
context.primary_columns.append(column)
def __str__(self):
return str(self.column)
class QueryContext(object):
multi_row_eager_loaders = False
adapter = None
froms = ()
for_update = None
def __init__(self, query):
if query._statement is not None:
if isinstance(query._statement, expression.SelectBase) and \
not query._statement._textual and \
not query._statement.use_labels:
self.statement = query._statement.apply_labels()
else:
self.statement = query._statement
else:
self.statement = None
self.from_clause = query._from_obj
self.whereclause = query._criterion
self.order_by = query._order_by
self.query = query
self.session = query.session
self.populate_existing = query._populate_existing
self.invoke_all_eagers = query._invoke_all_eagers
self.version_check = query._version_check
self.refresh_state = query._refresh_state
self.primary_columns = []
self.secondary_columns = []
self.eager_order_by = []
self.eager_joins = {}
self.create_eager_joins = []
self.propagate_options = set(o for o in query._with_options if
o.propagate_to_loaders)
self.attributes = query._attributes.copy()
class AliasOption(interfaces.MapperOption):
def __init__(self, alias):
"""Return a :class:`.MapperOption` that will indicate to the :class:`.Query`
that the main table has been aliased.
This is a seldom-used option to suit the
very rare case that :func:`.contains_eager`
is being used in conjunction with a user-defined SELECT
statement that aliases the parent table. E.g.::
# define an aliased UNION called 'ulist'
ulist = users.select(users.c.user_id==7).\\
union(users.select(users.c.user_id>7)).\\
alias('ulist')
# add on an eager load of "addresses"
statement = ulist.outerjoin(addresses).\\
select().apply_labels()
# create query, indicating "ulist" will be an
# alias for the main table, "addresses"
# property should be eager loaded
query = session.query(User).options(
contains_alias(ulist),
contains_eager(User.addresses))
# then get results via the statement
results = query.from_statement(statement).all()
:param alias: is the string name of an alias, or a
:class:`~.sql.expression.Alias` object representing
the alias.
"""
self.alias = alias
def process_query(self, query):
if isinstance(self.alias, util.string_types):
alias = query._mapper_zero().mapped_table.alias(self.alias)
else:
alias = self.alias
query._from_obj_alias = sql_util.ColumnAdapter(alias)
|
def with_parent(self, instance, property=None):
"""Add filtering criterion that relates the given instance
to a child object or collection, using its attribute state
as well as an established :func:`.relationship()`
configuration.
The method uses the :func:`.with_parent` function to generate
the clause, the result of which is passed to :meth:`.Query.filter`.
Parameters are the same as :func:`.with_parent`, with the exception
that the given property can be None, in which case a search is
performed against this :class:`.Query` object's target mapper.
"""
if property is None:
mapper = object_mapper(instance)
for prop in mapper.iterate_properties:
if isinstance(prop, properties.RelationshipProperty) and \
prop.mapper is self._mapper_zero():
property = prop
break
else:
raise sa_exc.InvalidRequestError(
"Could not locate a property which relates instances "
"of class '%s' to instances of class '%s'" %
(
self._mapper_zero().class_.__name__,
instance.__class__.__name__)
)
return self.filter(with_parent(instance, property))
| 924 | 956 |
# orm/query.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The Query class and support.
Defines the :class:`.Query` class, the central
construct used by the ORM to construct database queries.
The :class:`.Query` class should not be confused with the
:class:`.Select` class, which defines database
SELECT operations at the SQL (non-ORM) level. ``Query`` differs from
``Select`` in that it returns ORM-mapped objects and interacts with an
ORM session, whereas the ``Select`` construct interacts directly with the
database to return iterable result sets.
"""
from itertools import chain
from . import (
attributes, interfaces, object_mapper, persistence,
exc as orm_exc, loading
)
from .base import _entity_descriptor, _is_aliased_class, \
_is_mapped_class, _orm_columns, _generative
from .path_registry import PathRegistry
from .util import (
AliasedClass, ORMAdapter, join as orm_join, with_parent, aliased
)
from .. import sql, util, log, exc as sa_exc, inspect, inspection
from ..sql.expression import _interpret_as_from
from ..sql import (
util as sql_util,
expression, visitors
)
from ..sql.base import ColumnCollection
from . import properties
__all__ = ['Query', 'QueryContext', 'aliased']
_path_registry = PathRegistry.root
@inspection._self_inspects
@log.class_logger
class Query(object):
"""ORM-level SQL construction object.
:class:`.Query` is the source of all SELECT statements generated by the
ORM, both those formulated by end-user query operations as well as by
high level internal operations such as related collection loading. It
features a generative interface whereby successive calls return a new
:class:`.Query` object, a copy of the former with additional
criteria and options associated with it.
:class:`.Query` objects are normally initially generated using the
:meth:`~.Session.query` method of :class:`.Session`. For a full
walkthrough of :class:`.Query` usage, see the
:ref:`ormtutorial_toplevel`.
"""
_enable_eagerloads = True
_enable_assertions = True
_with_labels = False
_criterion = None
_yield_per = None
_order_by = False
_group_by = False
_having = None
_distinct = False
_prefixes = None
_offset = None
_limit = None
_for_update_arg = None
_statement = None
_correlate = frozenset()
_populate_existing = False
_invoke_all_eagers = True
_version_check = False
_autoflush = True
_only_load_props = None
_refresh_state = None
_from_obj = ()
_join_entities = ()
_select_from_entity = None
_mapper_adapter_map = {}
_filter_aliases = None
_from_obj_alias = None
_joinpath = _joinpoint = util.immutabledict()
_execution_options = util.immutabledict()
_params = util.immutabledict()
_attributes = util.immutabledict()
_with_options = ()
_with_hints = ()
_enable_single_crit = True
_current_path = _path_registry
def __init__(self, entities, session=None):
self.session = session
self._polymorphic_adapters = {}
self._set_entities(entities)
def _set_entities(self, entities, entity_wrapper=None):
if entity_wrapper is None:
entity_wrapper = _QueryEntity
self._entities = []
self._primary_entity = None
for ent in util.to_list(entities):
entity_wrapper(self, ent)
self._set_entity_selectables(self._entities)
def _set_entity_selectables(self, entities):
self._mapper_adapter_map = d = self._mapper_adapter_map.copy()
for ent in entities:
for entity in ent.entities:
if entity not in d:
ext_info = inspect(entity)
if not ext_info.is_aliased_class and \
ext_info.mapper.with_polymorphic:
if ext_info.mapper.mapped_table not in \
self._polymorphic_adapters:
self._mapper_loads_polymorphically_with(
ext_info.mapper,
sql_util.ColumnAdapter(
ext_info.selectable,
ext_info.mapper._equivalent_columns
)
)
aliased_adapter = None
elif ext_info.is_aliased_class:
aliased_adapter = ext_info._adapter
else:
aliased_adapter = None
d[entity] = (
ext_info,
aliased_adapter
)
ent.setup_entity(*d[entity])
def _mapper_loads_polymorphically_with(self, mapper, adapter):
for m2 in mapper._with_polymorphic_mappers or [mapper]:
self._polymorphic_adapters[m2] = adapter
for m in m2.iterate_to_root():
self._polymorphic_adapters[m.local_table] = adapter
def _set_select_from(self, obj, set_base_alias):
fa = []
select_from_alias = None
for from_obj in obj:
info = inspect(from_obj)
if hasattr(info, 'mapper') and \
(info.is_mapper or info.is_aliased_class):
self._select_from_entity = from_obj
if set_base_alias:
raise sa_exc.ArgumentError(
"A selectable (FromClause) instance is "
"expected when the base alias is being set.")
fa.append(info.selectable)
elif not info.is_selectable:
raise sa_exc.ArgumentError(
"argument is not a mapped class, mapper, "
"aliased(), or FromClause instance.")
else:
if isinstance(from_obj, expression.SelectBase):
from_obj = from_obj.alias()
if set_base_alias:
select_from_alias = from_obj
fa.append(from_obj)
self._from_obj = tuple(fa)
if set_base_alias and \
len(self._from_obj) == 1 and \
isinstance(select_from_alias, expression.Alias):
equivs = self.__all_equivs()
self._from_obj_alias = sql_util.ColumnAdapter(
self._from_obj[0], equivs)
def _reset_polymorphic_adapter(self, mapper):
for m2 in mapper._with_polymorphic_mappers:
self._polymorphic_adapters.pop(m2, None)
for m in m2.iterate_to_root():
self._polymorphic_adapters.pop(m.local_table, None)
def _adapt_polymorphic_element(self, element):
if "parententity" in element._annotations:
search = element._annotations['parententity']
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
if isinstance(element, expression.FromClause):
search = element
elif hasattr(element, 'table'):
search = element.table
else:
return None
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
def _adapt_col_list(self, cols):
return [
self._adapt_clause(
expression._literal_as_label_reference(o),
True, True)
for o in cols
]
@_generative()
def _adapt_all_clauses(self):
self._orm_only_adapt = False
def _adapt_clause(self, clause, as_filter, orm_only):
"""Adapt incoming clauses to transformations which
have been applied within this query."""
adapters = []
# do we adapt all expression elements or only those
# tagged as 'ORM' constructs ?
orm_only = getattr(self, '_orm_only_adapt', orm_only)
if as_filter and self._filter_aliases:
for fa in self._filter_aliases._visitor_iterator:
adapters.append(
(
orm_only, fa.replace
)
)
if self._from_obj_alias:
# for the "from obj" alias, apply extra rule to the
# 'ORM only' check, if this query were generated from a
# subquery of itself, i.e. _from_selectable(), apply adaption
# to all SQL constructs.
adapters.append(
(
getattr(self, '_orm_only_from_obj_alias', orm_only),
self._from_obj_alias.replace
)
)
if self._polymorphic_adapters:
adapters.append(
(
orm_only, self._adapt_polymorphic_element
)
)
if not adapters:
return clause
def replace(elem):
for _orm_only, adapter in adapters:
# if 'orm only', look for ORM annotations
# in the element before adapting.
if not _orm_only or \
'_orm_adapt' in elem._annotations or \
"parententity" in elem._annotations:
e = adapter(elem)
if e is not None:
return e
return visitors.replacement_traverse(
clause,
{},
replace
)
def _entity_zero(self):
return self._entities[0]
def _mapper_zero(self):
return self._select_from_entity or \
self._entity_zero().entity_zero
@property
def _mapper_entities(self):
for ent in self._entities:
if isinstance(ent, _MapperEntity):
yield ent
def _joinpoint_zero(self):
return self._joinpoint.get(
'_joinpoint_entity',
self._mapper_zero()
)
def _mapper_zero_or_none(self):
if self._primary_entity:
return self._primary_entity.mapper
else:
return None
def _only_mapper_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._mapper_zero()
def _only_full_mapper_zero(self, methname):
if self._entities != [self._primary_entity]:
raise sa_exc.InvalidRequestError(
"%s() can only be used against "
"a single mapped class." % methname)
return self._primary_entity.entity_zero
def _only_entity_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._entity_zero()
def __all_equivs(self):
equivs = {}
for ent in self._mapper_entities:
equivs.update(ent.mapper._equivalent_columns)
return equivs
def _get_condition(self):
return self._no_criterion_condition(
"get", order_by=False, distinct=False)
def _get_existing_condition(self):
self._no_criterion_assertion("get", order_by=False, distinct=False)
def _no_criterion_assertion(self, meth, order_by=True, distinct=True):
if not self._enable_assertions:
return
if self._criterion is not None or \
self._statement is not None or self._from_obj or \
self._limit is not None or self._offset is not None or \
self._group_by or (order_by and self._order_by) or \
(distinct and self._distinct):
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
def _no_criterion_condition(self, meth, order_by=True, distinct=True):
self._no_criterion_assertion(meth, order_by, distinct)
self._from_obj = ()
self._statement = self._criterion = None
self._order_by = self._group_by = self._distinct = False
def _no_clauseelement_condition(self, meth):
if not self._enable_assertions:
return
if self._order_by:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
self._no_criterion_condition(meth)
def _no_statement_condition(self, meth):
if not self._enable_assertions:
return
if self._statement is not None:
raise sa_exc.InvalidRequestError(
("Query.%s() being called on a Query with an existing full "
"statement - can't apply criterion.") % meth)
def _no_limit_offset(self, meth):
if not self._enable_assertions:
return
if self._limit is not None or self._offset is not None:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a Query which already has LIMIT "
"or OFFSET applied. To modify the row-limited results of a "
" Query, call from_self() first. "
"Otherwise, call %s() before limit() or offset() "
"are applied."
% (meth, meth)
)
def _no_select_modifiers(self, meth):
if not self._enable_assertions:
return
for attr, methname, notset in (
('_limit', 'limit()', None),
('_offset', 'offset()', None),
('_order_by', 'order_by()', False),
('_group_by', 'group_by()', False),
('_distinct', 'distinct()', False),
):
if getattr(self, attr) is not notset:
raise sa_exc.InvalidRequestError(
"Can't call Query.%s() when %s has been called" %
(meth, methname)
)
def _get_options(self, populate_existing=None,
version_check=None,
only_load_props=None,
refresh_state=None):
if populate_existing:
self._populate_existing = populate_existing
if version_check:
self._version_check = version_check
if refresh_state:
self._refresh_state = refresh_state
if only_load_props:
self._only_load_props = set(only_load_props)
return self
def _clone(self):
cls = self.__class__
q = cls.__new__(cls)
q.__dict__ = self.__dict__.copy()
return q
@property
def statement(self):
"""The full SELECT statement represented by this Query.
The statement by default will not have disambiguating labels
applied to the construct unless with_labels(True) is called
first.
"""
stmt = self._compile_context(labels=self._with_labels).\
statement
if self._params:
stmt = stmt.params(self._params)
# TODO: there's no tests covering effects of
# the annotation not being there
return stmt._annotate({'no_replacement_traverse': True})
def subquery(self, name=None, with_labels=False, reduce_columns=False):
"""return the full SELECT statement represented by
this :class:`.Query`, embedded within an :class:`.Alias`.
Eager JOIN generation within the query is disabled.
:param name: string name to be assigned as the alias;
this is passed through to :meth:`.FromClause.alias`.
If ``None``, a name will be deterministically generated
at compile time.
:param with_labels: if True, :meth:`.with_labels` will be called
on the :class:`.Query` first to apply table-qualified labels
to all columns.
:param reduce_columns: if True, :meth:`.Select.reduce_columns` will
be called on the resulting :func:`.select` construct,
to remove same-named columns where one also refers to the other
via foreign key or WHERE clause equivalence.
.. versionchanged:: 0.8 the ``with_labels`` and ``reduce_columns``
keyword arguments were added.
"""
q = self.enable_eagerloads(False)
if with_labels:
q = q.with_labels()
q = q.statement
if reduce_columns:
q = q.reduce_columns()
return q.alias(name=name)
def cte(self, name=None, recursive=False):
"""Return the full SELECT statement represented by this
:class:`.Query` represented as a common table expression (CTE).
.. versionadded:: 0.7.6
Parameters and usage are the same as those of the
:meth:`.SelectBase.cte` method; see that method for
further details.
Here is the `Postgresql WITH
RECURSIVE example
<http://www.postgresql.org/docs/8.4/static/queries-with.html>`_.
Note that, in this example, the ``included_parts`` cte and the
``incl_alias`` alias of it are Core selectables, which
means the columns are accessed via the ``.c.`` attribute. The
``parts_alias`` object is an :func:`.orm.aliased` instance of the
``Part`` entity, so column-mapped attributes are available
directly::
from sqlalchemy.orm import aliased
class Part(Base):
__tablename__ = 'part'
part = Column(String, primary_key=True)
sub_part = Column(String, primary_key=True)
quantity = Column(Integer)
included_parts = session.query(
Part.sub_part,
Part.part,
Part.quantity).\\
filter(Part.part=="our part").\\
cte(name="included_parts", recursive=True)
incl_alias = aliased(included_parts, name="pr")
parts_alias = aliased(Part, name="p")
included_parts = included_parts.union_all(
session.query(
parts_alias.sub_part,
parts_alias.part,
parts_alias.quantity).\\
filter(parts_alias.part==incl_alias.c.sub_part)
)
q = session.query(
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).
label('total_quantity')
).\\
group_by(included_parts.c.sub_part)
.. seealso::
:meth:`.SelectBase.cte`
"""
return self.enable_eagerloads(False).\
statement.cte(name=name, recursive=recursive)
def label(self, name):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted
to a scalar subquery with a label of the given name.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.label`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.label(name)
def as_scalar(self):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted to a scalar subquery.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.as_scalar`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.as_scalar()
@property
def selectable(self):
"""Return the :class:`.Select` object emitted by this :class:`.Query`.
Used for :func:`.inspect` compatibility, this is equivalent to::
query.enable_eagerloads(False).with_labels().statement
"""
return self.__clause_element__()
def __clause_element__(self):
return self.enable_eagerloads(False).with_labels().statement
@_generative()
def enable_eagerloads(self, value):
"""Control whether or not eager joins and subqueries are
rendered.
When set to False, the returned Query will not render
eager joins regardless of :func:`~sqlalchemy.orm.joinedload`,
:func:`~sqlalchemy.orm.subqueryload` options
or mapper-level ``lazy='joined'``/``lazy='subquery'``
configurations.
This is used primarily when nesting the Query's
statement into a subquery or other
selectable, or when using :meth:`.Query.yield_per`.
"""
self._enable_eagerloads = value
def _no_yield_per(self, message):
raise sa_exc.InvalidRequestError(
"The yield_per Query option is currently not "
"compatible with %s eager loading. Please "
"specify lazyload('*') or query.enable_eagerloads(False) in "
"order to "
"proceed with query.yield_per()." % message)
@_generative()
def with_labels(self):
"""Apply column labels to the return value of Query.statement.
Indicates that this Query's `statement` accessor should return
a SELECT statement that applies labels to all columns in the
form <tablename>_<columnname>; this is commonly used to
disambiguate columns from multiple tables which have the same
name.
When the `Query` actually issues SQL to load rows, it always
uses column labeling.
"""
self._with_labels = True
@_generative()
def enable_assertions(self, value):
"""Control whether assertions are generated.
When set to False, the returned Query will
not assert its state before certain operations,
including that LIMIT/OFFSET has not been applied
when filter() is called, no criterion exists
when get() is called, and no "from_statement()"
exists when filter()/order_by()/group_by() etc.
is called. This more permissive mode is used by
custom Query subclasses to specify criterion or
other modifiers outside of the usual usage patterns.
Care should be taken to ensure that the usage
pattern is even possible. A statement applied
by from_statement() will override any criterion
set by filter() or order_by(), for example.
"""
self._enable_assertions = value
@property
def whereclause(self):
"""A readonly attribute which returns the current WHERE criterion for
this Query.
This returned value is a SQL expression construct, or ``None`` if no
criterion has been established.
"""
return self._criterion
@_generative()
def _with_current_path(self, path):
"""indicate that this query applies to objects loaded
within a certain path.
Used by deferred loaders (see strategies.py) which transfer
query options from an originating query to a newly generated
query intended for the deferred load.
"""
self._current_path = path
@_generative(_no_clauseelement_condition)
def with_polymorphic(self,
cls_or_mappers,
selectable=None,
polymorphic_on=None):
"""Load columns for inheriting classes.
:meth:`.Query.with_polymorphic` applies transformations
to the "main" mapped class represented by this :class:`.Query`.
The "main" mapped class here means the :class:`.Query`
object's first argument is a full class, i.e.
``session.query(SomeClass)``. These transformations allow additional
tables to be present in the FROM clause so that columns for a
joined-inheritance subclass are available in the query, both for the
purposes of load-time efficiency as well as the ability to use
these columns at query time.
See the documentation section :ref:`with_polymorphic` for
details on how this method is used.
.. versionchanged:: 0.8
A new and more flexible function
:func:`.orm.with_polymorphic` supersedes
:meth:`.Query.with_polymorphic`, as it can apply the equivalent
functionality to any set of columns or classes in the
:class:`.Query`, not just the "zero mapper". See that
function for a description of arguments.
"""
if not self._primary_entity:
raise sa_exc.InvalidRequestError(
"No primary mapper set up for this Query.")
entity = self._entities[0]._clone()
self._entities = [entity] + self._entities[1:]
entity.set_with_polymorphic(self,
cls_or_mappers,
selectable=selectable,
polymorphic_on=polymorphic_on)
@_generative()
def yield_per(self, count):
"""Yield only ``count`` rows at a time.
The purpose of this method is when fetching very large result sets
(> 10K rows), to batch results in sub-collections and yield them
out partially, so that the Python interpreter doesn't need to declare
very large areas of memory which is both time consuming and leads
to excessive memory use. The performance from fetching hundreds of
thousands of rows can often double when a suitable yield-per setting
(e.g. approximately 1000) is used, even with DBAPIs that buffer
rows (which are most).
The :meth:`.Query.yield_per` method **is not compatible with most
eager loading schemes, including subqueryload and joinedload with
collections**. For this reason, it may be helpful to disable
eager loads, either unconditionally with
:meth:`.Query.enable_eagerloads`::
q = sess.query(Object).yield_per(100).enable_eagerloads(False)
Or more selectively using :func:`.lazyload`; such as with
an asterisk to specify the default loader scheme::
q = sess.query(Object).yield_per(100).\\
options(lazyload('*'), joinedload(Object.some_related))
.. warning::
Use this method with caution; if the same instance is
present in more than one batch of rows, end-user changes
to attributes will be overwritten.
In particular, it's usually impossible to use this setting
with eagerly loaded collections (i.e. any lazy='joined' or
'subquery') since those collections will be cleared for a
new load when encountered in a subsequent result batch.
In the case of 'subquery' loading, the full result for all
rows is fetched which generally defeats the purpose of
:meth:`~sqlalchemy.orm.query.Query.yield_per`.
Also note that while
:meth:`~sqlalchemy.orm.query.Query.yield_per` will set the
``stream_results`` execution option to True, currently
this is only understood by
:mod:`~sqlalchemy.dialects.postgresql.psycopg2` dialect
which will stream results using server side cursors
instead of pre-buffer all rows for this query. Other
DBAPIs **pre-buffer all rows** before making them
available. The memory use of raw database rows is much less
than that of an ORM-mapped object, but should still be taken into
consideration when benchmarking.
.. seealso::
:meth:`.Query.enable_eagerloads`
"""
self._yield_per = count
self._execution_options = self._execution_options.union(
{"stream_results": True})
def get(self, ident):
"""Return an instance based on the given primary key identifier,
or ``None`` if not found.
E.g.::
my_user = session.query(User).get(5)
some_object = session.query(VersionedFoo).get((5, 10))
:meth:`~.Query.get` is special in that it provides direct
access to the identity map of the owning :class:`.Session`.
If the given primary key identifier is present
in the local identity map, the object is returned
directly from this collection and no SQL is emitted,
unless the object has been marked fully expired.
If not present,
a SELECT is performed in order to locate the object.
:meth:`~.Query.get` also will perform a check if
the object is present in the identity map and
marked as expired - a SELECT
is emitted to refresh the object as well as to
ensure that the row is still present.
If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
:meth:`~.Query.get` is only used to return a single
mapped instance, not multiple instances or
individual column constructs, and strictly
on a single primary key value. The originating
:class:`.Query` must be constructed in this way,
i.e. against a single mapped entity,
with no additional filtering criterion. Loading
options via :meth:`~.Query.options` may be applied
however, and will be used if the object is not
yet locally present.
A lazy-loading, many-to-one attribute configured
by :func:`.relationship`, using a simple
foreign-key-to-primary-key criterion, will also use an
operation equivalent to :meth:`~.Query.get` in order to retrieve
the target value from the local identity map
before querying the database. See :doc:`/orm/loading`
for further details on relationship loading.
:param ident: A scalar or tuple value representing
the primary key. For a composite primary key,
the order of identifiers corresponds in most cases
to that of the mapped :class:`.Table` object's
primary key columns. For a :func:`.mapper` that
was given the ``primary key`` argument during
construction, the order of identifiers corresponds
to the elements present in this collection.
:return: The object instance, or ``None``.
"""
# convert composite types to individual args
if hasattr(ident, '__composite_values__'):
ident = ident.__composite_values__()
ident = util.to_list(ident)
mapper = self._only_full_mapper_zero("get")
if len(ident) != len(mapper.primary_key):
raise sa_exc.InvalidRequestError(
"Incorrect number of values in identifier to formulate "
"primary key for query.get(); primary key columns are %s" %
','.join("'%s'" % c for c in mapper.primary_key))
key = mapper.identity_key_from_primary_key(ident)
if not self._populate_existing and \
not mapper.always_refresh and \
self._for_update_arg is None:
instance = loading.get_from_identity(
self.session, key, attributes.PASSIVE_OFF)
if instance is not None:
self._get_existing_condition()
# reject calls for id in identity map but class
# mismatch.
if not issubclass(instance.__class__, mapper.class_):
return None
return instance
return loading.load_on_ident(self, key)
@_generative()
def correlate(self, *args):
"""Return a :class:`.Query` construct which will correlate the given
FROM clauses to that of an enclosing :class:`.Query` or
:func:`~.expression.select`.
The method here accepts mapped classes, :func:`.aliased` constructs,
and :func:`.mapper` constructs as arguments, which are resolved into
expression constructs, in addition to appropriate expression
constructs.
The correlation arguments are ultimately passed to
:meth:`.Select.correlate` after coercion to expression constructs.
The correlation arguments take effect in such cases
as when :meth:`.Query.from_self` is used, or when
a subquery as returned by :meth:`.Query.subquery` is
embedded in another :func:`~.expression.select` construct.
"""
self._correlate = self._correlate.union(
_interpret_as_from(s)
if s is not None else None
for s in args)
@_generative()
def autoflush(self, setting):
"""Return a Query with a specific 'autoflush' setting.
Note that a Session with autoflush=False will
not autoflush, even if this flag is set to True at the
Query level. Therefore this flag is usually used only
to disable autoflush for a specific Query.
"""
self._autoflush = setting
@_generative()
def populate_existing(self):
"""Return a :class:`.Query` that will expire and refresh all instances
as they are loaded, or reused from the current :class:`.Session`.
:meth:`.populate_existing` does not improve behavior when
the ORM is used normally - the :class:`.Session` object's usual
behavior of maintaining a transaction and expiring all attributes
after rollback or commit handles object state automatically.
This method is not intended for general use.
"""
self._populate_existing = True
@_generative()
def _with_invoke_all_eagers(self, value):
"""Set the 'invoke all eagers' flag which causes joined- and
subquery loaders to traverse into already-loaded related objects
and collections.
Default is that of :attr:`.Query._invoke_all_eagers`.
"""
self._invoke_all_eagers = value
def with_parent(self, instance, property=None):
"""Add filtering criterion that relates the given instance
to a child object or collection, using its attribute state
as well as an established :func:`.relationship()`
configuration.
The method uses the :func:`.with_parent` function to generate
the clause, the result of which is passed to :meth:`.Query.filter`.
Parameters are the same as :func:`.with_parent`, with the exception
that the given property can be None, in which case a search is
performed against this :class:`.Query` object's target mapper.
"""
if property is None:
mapper = object_mapper(instance)
for prop in mapper.iterate_properties:
if isinstance(prop, properties.RelationshipProperty) and \
prop.mapper is self._mapper_zero():
property = prop
break
else:
raise sa_exc.InvalidRequestError(
"Could not locate a property which relates instances "
"of class '%s' to instances of class '%s'" %
(
self._mapper_zero().class_.__name__,
instance.__class__.__name__)
)
return self.filter(with_parent(instance, property))
@_generative()
def add_entity(self, entity, alias=None):
"""add a mapped entity to the list of result columns
to be returned."""
if alias is not None:
entity = aliased(entity, alias)
self._entities = list(self._entities)
m = _MapperEntity(self, entity)
self._set_entity_selectables([m])
@_generative()
def with_session(self, session):
"""Return a :class:`.Query` that will use the given :class:`.Session`.
"""
self.session = session
def from_self(self, *entities):
"""return a Query that selects from this Query's
SELECT statement.
\*entities - optional list of entities which will replace
those being selected.
"""
fromclause = self.with_labels().enable_eagerloads(False).\
statement.correlate(None)
q = self._from_selectable(fromclause)
q._enable_single_crit = False
if entities:
q._set_entities(entities)
return q
@_generative()
def _set_enable_single_crit(self, val):
self._enable_single_crit = val
@_generative()
def _from_selectable(self, fromclause):
for attr in (
'_statement', '_criterion',
'_order_by', '_group_by',
'_limit', '_offset',
'_joinpath', '_joinpoint',
'_distinct', '_having',
'_prefixes',
):
self.__dict__.pop(attr, None)
self._set_select_from([fromclause], True)
# this enables clause adaptation for non-ORM
# expressions.
self._orm_only_from_obj_alias = False
old_entities = self._entities
self._entities = []
for e in old_entities:
e.adapt_to_selectable(self, self._from_obj[0])
def values(self, *columns):
"""Return an iterator yielding result tuples corresponding
to the given list of columns"""
if not columns:
return iter(())
q = self._clone()
q._set_entities(columns, entity_wrapper=_ColumnEntity)
if not q._yield_per:
q._yield_per = 10
return iter(q)
_values = values
def value(self, column):
"""Return a scalar result corresponding to the given
column expression."""
try:
return next(self.values(column))[0]
except StopIteration:
return None
@_generative()
def with_entities(self, *entities):
"""Return a new :class:`.Query` replacing the SELECT list with the
given entities.
e.g.::
# Users, filtered on some arbitrary criterion
# and then ordered by related email address
q = session.query(User).\\
join(User.address).\\
filter(User.name.like('%ed%')).\\
order_by(Address.email)
# given *only* User.id==5, Address.email, and 'q', what
# would the *next* User in the result be ?
subq = q.with_entities(Address.email).\\
order_by(None).\\
filter(User.id==5).\\
subquery()
q = q.join((subq, subq.c.email < Address.email)).\\
limit(1)
.. versionadded:: 0.6.5
"""
self._set_entities(entities)
@_generative()
def add_columns(self, *column):
"""Add one or more column expressions to the list
of result columns to be returned."""
self._entities = list(self._entities)
l = len(self._entities)
for c in column:
_ColumnEntity(self, c)
# _ColumnEntity may add many entities if the
# given arg is a FROM clause
self._set_entity_selectables(self._entities[l:])
@util.pending_deprecation("0.7",
":meth:`.add_column` is superseded "
"by :meth:`.add_columns`",
False)
def add_column(self, column):
"""Add a column expression to the list of result columns to be
returned.
Pending deprecation: :meth:`.add_column` will be superseded by
:meth:`.add_columns`.
"""
return self.add_columns(column)
def options(self, *args):
"""Return a new Query object, applying the given list of
mapper options.
Most supplied options regard changing how column- and
relationship-mapped attributes are loaded. See the sections
:ref:`deferred` and :doc:`/orm/loading` for reference
documentation.
"""
return self._options(False, *args)
def _conditional_options(self, *args):
return self._options(True, *args)
@_generative()
def _options(self, conditional, *args):
# most MapperOptions write to the '_attributes' dictionary,
# so copy that as well
self._attributes = self._attributes.copy()
opts = tuple(util.flatten_iterator(args))
self._with_options = self._with_options + opts
if conditional:
for opt in opts:
opt.process_query_conditionally(self)
else:
for opt in opts:
opt.process_query(self)
def with_transformation(self, fn):
"""Return a new :class:`.Query` object transformed by
the given function.
E.g.::
def filter_something(criterion):
def transform(q):
return q.filter(criterion)
return transform
q = q.with_transformation(filter_something(x==5))
This allows ad-hoc recipes to be created for :class:`.Query`
objects. See the example at :ref:`hybrid_transformers`.
.. versionadded:: 0.7.4
"""
return fn(self)
@_generative()
def with_hint(self, selectable, text, dialect_name='*'):
"""Add an indexing or other executional context
hint for the given entity or selectable to
this :class:`.Query`.
Functionality is passed straight through to
:meth:`~sqlalchemy.sql.expression.Select.with_hint`,
with the addition that ``selectable`` can be a
:class:`.Table`, :class:`.Alias`, or ORM entity / mapped class
/etc.
.. seealso::
:meth:`.Query.with_statement_hint`
"""
if selectable is not None:
selectable = inspect(selectable).selectable
self._with_hints += ((selectable, text, dialect_name),)
def with_statement_hint(self, text, dialect_name='*'):
"""add a statement hint to this :class:`.Select`.
This method is similar to :meth:`.Select.with_hint` except that
it does not require an individual table, and instead applies to the
statement as a whole.
This feature calls down into :meth:`.Select.with_statement_hint`.
.. versionadded:: 1.0.0
.. seealso::
:meth:`.Query.with_hint`
"""
return self.with_hint(None, text, dialect_name)
@_generative()
def execution_options(self, **kwargs):
""" Set non-SQL options which take effect during execution.
The options are the same as those accepted by
:meth:`.Connection.execution_options`.
Note that the ``stream_results`` execution option is enabled
automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()`
method is used.
"""
self._execution_options = self._execution_options.union(kwargs)
@_generative()
def with_lockmode(self, mode):
"""Return a new :class:`.Query` object with the specified "locking mode",
which essentially refers to the ``FOR UPDATE`` clause.
.. deprecated:: 0.9.0 superseded by :meth:`.Query.with_for_update`.
:param mode: a string representing the desired locking mode.
Valid values are:
* ``None`` - translates to no lockmode
* ``'update'`` - translates to ``FOR UPDATE``
(standard SQL, supported by most dialects)
* ``'update_nowait'`` - translates to ``FOR UPDATE NOWAIT``
(supported by Oracle, PostgreSQL 8.1 upwards)
* ``'read'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL),
and ``FOR SHARE`` (for PostgreSQL)
.. seealso::
:meth:`.Query.with_for_update` - improved API for
specifying the ``FOR UPDATE`` clause.
"""
self._for_update_arg = LockmodeArg.parse_legacy_query(mode)
@_generative()
def with_for_update(self, read=False, nowait=False, of=None):
"""return a new :class:`.Query` with the specified options for the
``FOR UPDATE`` clause.
The behavior of this method is identical to that of
:meth:`.SelectBase.with_for_update`. When called with no arguments,
the resulting ``SELECT`` statement will have a ``FOR UPDATE`` clause
appended. When additional arguments are specified, backend-specific
options such as ``FOR UPDATE NOWAIT`` or ``LOCK IN SHARE MODE``
can take effect.
E.g.::
q = sess.query(User).with_for_update(nowait=True, of=User)
The above query on a Postgresql backend will render like::
SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT
.. versionadded:: 0.9.0 :meth:`.Query.with_for_update` supersedes
the :meth:`.Query.with_lockmode` method.
.. seealso::
:meth:`.GenerativeSelect.with_for_update` - Core level method with
full argument and behavioral description.
"""
self._for_update_arg = LockmodeArg(read=read, nowait=nowait, of=of)
@_generative()
def params(self, *args, **kwargs):
"""add values for bind parameters which may have been
specified in filter().
parameters may be specified using \**kwargs, or optionally a single
dictionary as the first positional argument. The reason for both is
that \**kwargs is convenient, however some parameter dictionaries
contain unicode keys in which case \**kwargs cannot be used.
"""
if len(args) == 1:
kwargs.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary.")
self._params = self._params.copy()
self._params.update(kwargs)
@_generative(_no_statement_condition, _no_limit_offset)
def filter(self, *criterion):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using SQL expressions.
e.g.::
session.query(MyClass).filter(MyClass.name == 'some name')
Multiple criteria are joined together by AND::
session.query(MyClass).\\
filter(MyClass.name == 'some name', MyClass.id > 5)
The criterion is any SQL expression object applicable to the
WHERE clause of a select. String expressions are coerced
into SQL expression constructs via the :func:`.text` construct.
.. versionchanged:: 0.7.5
Multiple criteria joined by AND.
.. seealso::
:meth:`.Query.filter_by` - filter on keyword expressions.
"""
for criterion in list(criterion):
criterion = expression._expression_literal_as_text(criterion)
criterion = self._adapt_clause(criterion, True, True)
if self._criterion is not None:
self._criterion = self._criterion & criterion
else:
self._criterion = criterion
def filter_by(self, **kwargs):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using keyword expressions.
e.g.::
session.query(MyClass).filter_by(name = 'some name')
Multiple criteria are joined together by AND::
session.query(MyClass).\\
filter_by(name = 'some name', id = 5)
The keyword expressions are extracted from the primary
entity of the query, or the last entity that was the
target of a call to :meth:`.Query.join`.
.. seealso::
:meth:`.Query.filter` - filter on SQL expressions.
"""
clauses = [_entity_descriptor(self._joinpoint_zero(), key) == value
for key, value in kwargs.items()]
return self.filter(sql.and_(*clauses))
@_generative(_no_statement_condition, _no_limit_offset)
def order_by(self, *criterion):
"""apply one or more ORDER BY criterion to the query and return
the newly resulting ``Query``
All existing ORDER BY settings can be suppressed by
passing ``None`` - this will suppress any ORDER BY configured
on mappers as well.
Alternatively, an existing ORDER BY setting on the Query
object can be entirely cancelled by passing ``False``
as the value - use this before calling methods where
an ORDER BY is invalid.
"""
if len(criterion) == 1:
if criterion[0] is False:
if '_order_by' in self.__dict__:
del self._order_by
return
if criterion[0] is None:
self._order_by = None
return
criterion = self._adapt_col_list(criterion)
if self._order_by is False or self._order_by is None:
self._order_by = criterion
else:
self._order_by = self._order_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def group_by(self, *criterion):
"""apply one or more GROUP BY criterion to the query and return
the newly resulting :class:`.Query`"""
criterion = list(chain(*[_orm_columns(c) for c in criterion]))
criterion = self._adapt_col_list(criterion)
if self._group_by is False:
self._group_by = criterion
else:
self._group_by = self._group_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def having(self, criterion):
"""apply a HAVING criterion to the query and return the
newly resulting :class:`.Query`.
:meth:`~.Query.having` is used in conjunction with
:meth:`~.Query.group_by`.
HAVING criterion makes it possible to use filters on aggregate
functions like COUNT, SUM, AVG, MAX, and MIN, eg.::
q = session.query(User.id).\\
join(User.addresses).\\
group_by(User.id).\\
having(func.count(Address.id) > 2)
"""
criterion = expression._expression_literal_as_text(criterion)
if criterion is not None and \
not isinstance(criterion, sql.ClauseElement):
raise sa_exc.ArgumentError(
"having() argument must be of type "
"sqlalchemy.sql.ClauseElement or string")
criterion = self._adapt_clause(criterion, True, True)
if self._having is not None:
self._having = self._having & criterion
else:
self._having = criterion
def union(self, *q):
"""Produce a UNION of this Query against one or more queries.
e.g.::
q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar')
q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo')
q3 = q1.union(q2)
The method accepts multiple Query objects so as to control
the level of nesting. A series of ``union()`` calls such as::
x.union(y).union(z).all()
will nest on each ``union()``, and produces::
SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION
SELECT * FROM y) UNION SELECT * FROM Z)
Whereas::
x.union(y, z).all()
produces::
SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION
SELECT * FROM Z)
Note that many database backends do not allow ORDER BY to
be rendered on a query called within UNION, EXCEPT, etc.
To disable all ORDER BY clauses including those configured
on mappers, issue ``query.order_by(None)`` - the resulting
:class:`.Query` object will not render ORDER BY within
its SELECT statement.
"""
return self._from_selectable(
expression.union(*([self] + list(q))))
def union_all(self, *q):
"""Produce a UNION ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.union_all(*([self] + list(q)))
)
def intersect(self, *q):
"""Produce an INTERSECT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect(*([self] + list(q)))
)
def intersect_all(self, *q):
"""Produce an INTERSECT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect_all(*([self] + list(q)))
)
def except_(self, *q):
"""Produce an EXCEPT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_(*([self] + list(q)))
)
def except_all(self, *q):
"""Produce an EXCEPT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_all(*([self] + list(q)))
)
def join(self, *props, **kwargs):
"""Create a SQL JOIN against this :class:`.Query` object's criterion
and apply generatively, returning the newly resulting :class:`.Query`.
**Simple Relationship Joins**
Consider a mapping between two classes ``User`` and ``Address``,
with a relationship ``User.addresses`` representing a collection
of ``Address`` objects associated with each ``User``. The most
common usage of :meth:`~.Query.join` is to create a JOIN along this
relationship, using the ``User.addresses`` attribute as an indicator
for how this should occur::
q = session.query(User).join(User.addresses)
Where above, the call to :meth:`~.Query.join` along ``User.addresses``
will result in SQL equivalent to::
SELECT user.* FROM user JOIN address ON user.id = address.user_id
In the above example we refer to ``User.addresses`` as passed to
:meth:`~.Query.join` as the *on clause*, that is, it indicates
how the "ON" portion of the JOIN should be constructed. For a
single-entity query such as the one above (i.e. we start by selecting
only from ``User`` and nothing else), the relationship can also be
specified by its string name::
q = session.query(User).join("addresses")
:meth:`~.Query.join` can also accommodate multiple
"on clause" arguments to produce a chain of joins, such as below
where a join across four related entities is constructed::
q = session.query(User).join("orders", "items", "keywords")
The above would be shorthand for three separate calls to
:meth:`~.Query.join`, each using an explicit attribute to indicate
the source entity::
q = session.query(User).\\
join(User.orders).\\
join(Order.items).\\
join(Item.keywords)
**Joins to a Target Entity or Selectable**
A second form of :meth:`~.Query.join` allows any mapped entity
or core selectable construct as a target. In this usage,
:meth:`~.Query.join` will attempt
to create a JOIN along the natural foreign key relationship between
two entities::
q = session.query(User).join(Address)
The above calling form of :meth:`~.Query.join` will raise an error if
either there are no foreign keys between the two entities, or if
there are multiple foreign key linkages between them. In the
above calling form, :meth:`~.Query.join` is called upon to
create the "on clause" automatically for us. The target can
be any mapped entity or selectable, such as a :class:`.Table`::
q = session.query(User).join(addresses_table)
**Joins to a Target with an ON Clause**
The third calling form allows both the target entity as well
as the ON clause to be passed explicitly. Suppose for
example we wanted to join to ``Address`` twice, using
an alias the second time. We use :func:`~sqlalchemy.orm.aliased`
to create a distinct alias of ``Address``, and join
to it using the ``target, onclause`` form, so that the
alias can be specified explicitly as the target along with
the relationship to instruct how the ON clause should proceed::
a_alias = aliased(Address)
q = session.query(User).\\
join(User.addresses).\\
join(a_alias, User.addresses).\\
filter(Address.email_address=='[email protected]').\\
filter(a_alias.email_address=='[email protected]')
Where above, the generated SQL would be similar to::
SELECT user.* FROM user
JOIN address ON user.id = address.user_id
JOIN address AS address_1 ON user.id=address_1.user_id
WHERE address.email_address = :email_address_1
AND address_1.email_address = :email_address_2
The two-argument calling form of :meth:`~.Query.join`
also allows us to construct arbitrary joins with SQL-oriented
"on clause" expressions, not relying upon configured relationships
at all. Any SQL expression can be passed as the ON clause
when using the two-argument form, which should refer to the target
entity in some way as well as an applicable source entity::
q = session.query(User).join(Address, User.id==Address.user_id)
.. versionchanged:: 0.7
In SQLAlchemy 0.6 and earlier, the two argument form of
:meth:`~.Query.join` requires the usage of a tuple:
``query(User).join((Address, User.id==Address.user_id))``\ .
This calling form is accepted in 0.7 and further, though
is not necessary unless multiple join conditions are passed to
a single :meth:`~.Query.join` call, which itself is also not
generally necessary as it is now equivalent to multiple
calls (this wasn't always the case).
**Advanced Join Targeting and Adaption**
There is a lot of flexibility in what the "target" can be when using
:meth:`~.Query.join`. As noted previously, it also accepts
:class:`.Table` constructs and other selectables such as
:func:`.alias` and :func:`.select` constructs, with either the one
or two-argument forms::
addresses_q = select([Address.user_id]).\\
where(Address.email_address.endswith("@bar.com")).\\
alias()
q = session.query(User).\\
join(addresses_q, addresses_q.c.user_id==User.id)
:meth:`~.Query.join` also features the ability to *adapt* a
:meth:`~sqlalchemy.orm.relationship` -driven ON clause to the target
selectable. Below we construct a JOIN from ``User`` to a subquery
against ``Address``, allowing the relationship denoted by
``User.addresses`` to *adapt* itself to the altered target::
address_subq = session.query(Address).\\
filter(Address.email_address == '[email protected]').\\
subquery()
q = session.query(User).join(address_subq, User.addresses)
Producing SQL similar to::
SELECT user.* FROM user
JOIN (
SELECT address.id AS id,
address.user_id AS user_id,
address.email_address AS email_address
FROM address
WHERE address.email_address = :email_address_1
) AS anon_1 ON user.id = anon_1.user_id
The above form allows one to fall back onto an explicit ON
clause at any time::
q = session.query(User).\\
join(address_subq, User.id==address_subq.c.user_id)
**Controlling what to Join From**
While :meth:`~.Query.join` exclusively deals with the "right"
side of the JOIN, we can also control the "left" side, in those
cases where it's needed, using :meth:`~.Query.select_from`.
Below we construct a query against ``Address`` but can still
make usage of ``User.addresses`` as our ON clause by instructing
the :class:`.Query` to select first from the ``User``
entity::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which will produce SQL similar to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
**Constructing Aliases Anonymously**
:meth:`~.Query.join` can construct anonymous aliases
using the ``aliased=True`` flag. This feature is useful
when a query is being joined algorithmically, such as
when querying self-referentially to an arbitrary depth::
q = session.query(Node).\\
join("children", "children", aliased=True)
When ``aliased=True`` is used, the actual "alias" construct
is not explicitly available. To work with it, methods such as
:meth:`.Query.filter` will adapt the incoming entity to
the last join point::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1')
When using automatic aliasing, the ``from_joinpoint=True``
argument can allow a multi-node join to be broken into
multiple calls to :meth:`~.Query.join`, so that
each path along the way can be further filtered::
q = session.query(Node).\\
join("children", aliased=True).\\
filter(Node.name='child 1').\\
join("children", aliased=True, from_joinpoint=True).\\
filter(Node.name == 'grandchild 1')
The filtering aliases above can then be reset back to the
original ``Node`` entity using :meth:`~.Query.reset_joinpoint`::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1').\\
reset_joinpoint().\\
filter(Node.name == 'parent 1)
For an example of ``aliased=True``, see the distribution
example :ref:`examples_xmlpersistence` which illustrates
an XPath-like query system using algorithmic joins.
:param \*props: A collection of one or more join conditions,
each consisting of a relationship-bound attribute or string
relationship name representing an "on clause", or a single
target entity, or a tuple in the form of ``(target, onclause)``.
A special two-argument calling form of the form ``target, onclause``
is also accepted.
:param aliased=False: If True, indicate that the JOIN target should be
anonymously aliased. Subsequent calls to :meth:`~.Query.filter`
and similar will adapt the incoming criterion to the target
alias, until :meth:`~.Query.reset_joinpoint` is called.
:param isouter=False: If True, the join used will be a left outer join,
just as if the :meth:`.Query.outerjoin` method were called. This
flag is here to maintain consistency with the same flag as accepted
by :meth:`.FromClause.join` and other Core constructs.
.. versionadded:: 1.0.0
:param from_joinpoint=False: When using ``aliased=True``, a setting
of True here will cause the join to be from the most recent
joined target, rather than starting back from the original
FROM clauses of the query.
.. seealso::
:ref:`ormtutorial_joins` in the ORM tutorial.
:ref:`inheritance_toplevel` for details on how
:meth:`~.Query.join` is used for inheritance relationships.
:func:`.orm.join` - a standalone ORM-level join function,
used internally by :meth:`.Query.join`, which in previous
SQLAlchemy versions was the primary ORM-level joining interface.
"""
aliased, from_joinpoint, isouter = kwargs.pop('aliased', False),\
kwargs.pop('from_joinpoint', False),\
kwargs.pop('isouter', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
','.join(kwargs.keys))
isouter = isouter
return self._join(props,
outerjoin=isouter, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def outerjoin(self, *props, **kwargs):
"""Create a left outer join against this ``Query`` object's criterion
and apply generatively, returning the newly resulting ``Query``.
Usage is the same as the ``join()`` method.
"""
aliased, from_joinpoint = kwargs.pop('aliased', False), \
kwargs.pop('from_joinpoint', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
','.join(kwargs))
return self._join(props,
outerjoin=True, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def _update_joinpoint(self, jp):
self._joinpoint = jp
# copy backwards to the root of the _joinpath
# dict, so that no existing dict in the path is mutated
while 'prev' in jp:
f, prev = jp['prev']
prev = prev.copy()
prev[f] = jp
jp['prev'] = (f, prev)
jp = prev
self._joinpath = jp
@_generative(_no_statement_condition, _no_limit_offset)
def _join(self, keys, outerjoin, create_aliases, from_joinpoint):
"""consumes arguments from join() or outerjoin(), places them into a
consistent format with which to form the actual JOIN constructs.
"""
if not from_joinpoint:
self._reset_joinpoint()
if len(keys) == 2 and \
isinstance(keys[0], (expression.FromClause,
type, AliasedClass)) and \
isinstance(keys[1], (str, expression.ClauseElement,
interfaces.PropComparator)):
# detect 2-arg form of join and
# convert to a tuple.
keys = (keys,)
for arg1 in util.to_list(keys):
if isinstance(arg1, tuple):
# "tuple" form of join, multiple
# tuples are accepted as well. The simpler
# "2-arg" form is preferred. May deprecate
# the "tuple" usage.
arg1, arg2 = arg1
else:
arg2 = None
# determine onclause/right_entity. there
# is a little bit of legacy behavior still at work here
# which means they might be in either order. may possibly
# lock this down to (right_entity, onclause) in 0.6.
if isinstance(
arg1, (interfaces.PropComparator, util.string_types)):
right_entity, onclause = arg2, arg1
else:
right_entity, onclause = arg1, arg2
left_entity = prop = None
if isinstance(onclause, interfaces.PropComparator):
of_type = getattr(onclause, '_of_type', None)
else:
of_type = None
if isinstance(onclause, util.string_types):
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity, onclause)
onclause = descriptor
# check for q.join(Class.propname, from_joinpoint=True)
# and Class is that of the current joinpoint
elif from_joinpoint and \
isinstance(onclause, interfaces.PropComparator):
left_entity = onclause._parententity
info = inspect(self._joinpoint_zero())
left_mapper, left_selectable, left_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', None)
if left_mapper is left_entity:
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity,
onclause.key)
onclause = descriptor
if isinstance(onclause, interfaces.PropComparator):
if right_entity is None:
if of_type:
right_entity = of_type
else:
right_entity = onclause.property.mapper
left_entity = onclause._parententity
prop = onclause.property
if not isinstance(onclause, attributes.QueryableAttribute):
onclause = prop
if not create_aliases:
# check for this path already present.
# don't render in that case.
edge = (left_entity, right_entity, prop.key)
if edge in self._joinpoint:
# The child's prev reference might be stale --
# it could point to a parent older than the
# current joinpoint. If this is the case,
# then we need to update it and then fix the
# tree's spine with _update_joinpoint. Copy
# and then mutate the child, which might be
# shared by a different query object.
jp = self._joinpoint[edge].copy()
jp['prev'] = (edge, self._joinpoint)
self._update_joinpoint(jp)
continue
elif onclause is not None and right_entity is None:
# TODO: no coverage here
raise NotImplementedError("query.join(a==b) not supported.")
self._join_left_to_right(
left_entity,
right_entity, onclause,
outerjoin, create_aliases, prop)
def _join_left_to_right(self, left, right,
onclause, outerjoin, create_aliases, prop):
"""append a JOIN to the query's from clause."""
self._polymorphic_adapters = self._polymorphic_adapters.copy()
if left is None:
if self._from_obj:
left = self._from_obj[0]
elif self._entities:
left = self._entities[0].entity_zero_or_selectable
if left is None:
raise sa_exc.InvalidRequestError(
"Don't know how to join from %s; please use "
"select_from() to establish the left "
"entity/selectable of this join" % self._entities[0])
if left is right and \
not create_aliases:
raise sa_exc.InvalidRequestError(
"Can't construct a join from %s to %s, they "
"are the same entity" %
(left, right))
l_info = inspect(left)
r_info = inspect(right)
overlap = False
if not create_aliases:
right_mapper = getattr(r_info, "mapper", None)
# if the target is a joined inheritance mapping,
# be more liberal about auto-aliasing.
if right_mapper and (
right_mapper.with_polymorphic or
isinstance(right_mapper.mapped_table, expression.Join)
):
for from_obj in self._from_obj or [l_info.selectable]:
if sql_util.selectables_overlap(
l_info.selectable, from_obj) and \
sql_util.selectables_overlap(
from_obj, r_info.selectable):
overlap = True
break
if (overlap or not create_aliases) and \
l_info.selectable is r_info.selectable:
raise sa_exc.InvalidRequestError(
"Can't join table/selectable '%s' to itself" %
l_info.selectable)
right, onclause = self._prepare_right_side(
r_info, right, onclause,
create_aliases,
prop, overlap)
# if joining on a MapperProperty path,
# track the path to prevent redundant joins
if not create_aliases and prop:
self._update_joinpoint({
'_joinpoint_entity': right,
'prev': ((left, right, prop.key), self._joinpoint)
})
else:
self._joinpoint = {'_joinpoint_entity': right}
self._join_to_left(l_info, left, right, onclause, outerjoin)
def _prepare_right_side(self, r_info, right, onclause, create_aliases,
prop, overlap):
info = r_info
right_mapper, right_selectable, right_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', False)
if right_mapper:
self._join_entities += (info, )
if right_mapper and prop and \
not right_mapper.common_parent(prop.mapper):
raise sa_exc.InvalidRequestError(
"Join target %s does not correspond to "
"the right side of join condition %s" % (right, onclause)
)
if not right_mapper and prop:
right_mapper = prop.mapper
need_adapter = False
if right_mapper and right is right_selectable:
if not right_selectable.is_derived_from(
right_mapper.mapped_table):
raise sa_exc.InvalidRequestError(
"Selectable '%s' is not derived from '%s'" %
(right_selectable.description,
right_mapper.mapped_table.description))
if isinstance(right_selectable, expression.SelectBase):
# TODO: this isn't even covered now!
right_selectable = right_selectable.alias()
need_adapter = True
right = aliased(right_mapper, right_selectable)
aliased_entity = right_mapper and \
not right_is_aliased and \
(
right_mapper.with_polymorphic and isinstance(
right_mapper._with_polymorphic_selectable,
expression.Alias)
or
overlap # test for overlap:
# orm/inheritance/relationships.py
# SelfReferentialM2MTest
)
if not need_adapter and (create_aliases or aliased_entity):
right = aliased(right, flat=True)
need_adapter = True
# if an alias() of the right side was generated here,
# apply an adapter to all subsequent filter() calls
# until reset_joinpoint() is called.
if need_adapter:
self._filter_aliases = ORMAdapter(
right,
equivalents=right_mapper and
right_mapper._equivalent_columns or {},
chain_to=self._filter_aliases)
# if the onclause is a ClauseElement, adapt it with any
# adapters that are in place right now
if isinstance(onclause, expression.ClauseElement):
onclause = self._adapt_clause(onclause, True, True)
# if an alias() on the right side was generated,
# which is intended to wrap a the right side in a subquery,
# ensure that columns retrieved from this target in the result
# set are also adapted.
if aliased_entity and not create_aliases:
self._mapper_loads_polymorphically_with(
right_mapper,
ORMAdapter(
right,
equivalents=right_mapper._equivalent_columns
)
)
return right, onclause
def _join_to_left(self, l_info, left, right, onclause, outerjoin):
info = l_info
left_mapper = getattr(info, 'mapper', None)
left_selectable = info.selectable
if self._from_obj:
replace_clause_index, clause = sql_util.find_join_source(
self._from_obj,
left_selectable)
if clause is not None:
try:
clause = orm_join(clause,
right,
onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = \
self._from_obj[:replace_clause_index] + \
(clause, ) + \
self._from_obj[replace_clause_index + 1:]
return
if left_mapper:
for ent in self._entities:
if ent.corresponds_to(left):
clause = ent.selectable
break
else:
clause = left
else:
clause = left_selectable
assert clause is not None
try:
clause = orm_join(clause, right, onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = self._from_obj + (clause,)
def _reset_joinpoint(self):
self._joinpoint = self._joinpath
self._filter_aliases = None
@_generative(_no_statement_condition)
def reset_joinpoint(self):
"""Return a new :class:`.Query`, where the "join point" has
been reset back to the base FROM entities of the query.
This method is usually used in conjunction with the
``aliased=True`` feature of the :meth:`~.Query.join`
method. See the example in :meth:`~.Query.join` for how
this is used.
"""
self._reset_joinpoint()
@_generative(_no_clauseelement_condition)
def select_from(self, *from_obj):
"""Set the FROM clause of this :class:`.Query` explicitly.
:meth:`.Query.select_from` is often used in conjunction with
:meth:`.Query.join` in order to control which entity is selected
from on the "left" side of the join.
The entity or selectable object here effectively replaces the
"left edge" of any calls to :meth:`~.Query.join`, when no
joinpoint is otherwise established - usually, the default "join
point" is the leftmost entity in the :class:`~.Query` object's
list of entities to be selected.
A typical example::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which produces SQL equivalent to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
:param \*from_obj: collection of one or more entities to apply
to the FROM clause. Entities can be mapped classes,
:class:`.AliasedClass` objects, :class:`.Mapper` objects
as well as core :class:`.FromClause` elements like subqueries.
.. versionchanged:: 0.9
This method no longer applies the given FROM object
to be the selectable from which matching entities
select from; the :meth:`.select_entity_from` method
now accomplishes this. See that method for a description
of this behavior.
.. seealso::
:meth:`~.Query.join`
:meth:`.Query.select_entity_from`
"""
self._set_select_from(from_obj, False)
@_generative(_no_clauseelement_condition)
def select_entity_from(self, from_obj):
"""Set the FROM clause of this :class:`.Query` to a
core selectable, applying it as a replacement FROM clause
for corresponding mapped entities.
This method is similar to the :meth:`.Query.select_from`
method, in that it sets the FROM clause of the query. However,
where :meth:`.Query.select_from` only affects what is placed
in the FROM, this method also applies the given selectable
to replace the FROM which the selected entities would normally
select from.
The given ``from_obj`` must be an instance of a :class:`.FromClause`,
e.g. a :func:`.select` or :class:`.Alias` construct.
An example would be a :class:`.Query` that selects ``User`` entities,
but uses :meth:`.Query.select_entity_from` to have the entities
selected from a :func:`.select` construct instead of the
base ``user`` table::
select_stmt = select([User]).where(User.id == 7)
q = session.query(User).\\
select_entity_from(select_stmt).\\
filter(User.name == 'ed')
The query generated will select ``User`` entities directly
from the given :func:`.select` construct, and will be::
SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name
FROM (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE anon_1.name = :name_1
Notice above that even the WHERE criterion was "adapted" such that
the ``anon_1`` subquery effectively replaces all references to the
``user`` table, except for the one that it refers to internally.
Compare this to :meth:`.Query.select_from`, which as of
version 0.9, does not affect existing entities. The
statement below::
q = session.query(User).\\
select_from(select_stmt).\\
filter(User.name == 'ed')
Produces SQL where both the ``user`` table as well as the
``select_stmt`` construct are present as separate elements
in the FROM clause. No "adaptation" of the ``user`` table
is applied::
SELECT "user".id AS user_id, "user".name AS user_name
FROM "user", (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE "user".name = :name_1
:meth:`.Query.select_entity_from` maintains an older
behavior of :meth:`.Query.select_from`. In modern usage,
similar results can also be achieved using :func:`.aliased`::
select_stmt = select([User]).where(User.id == 7)
user_from_select = aliased(User, select_stmt.alias())
q = session.query(user_from_select)
:param from_obj: a :class:`.FromClause` object that will replace
the FROM clause of this :class:`.Query`.
.. seealso::
:meth:`.Query.select_from`
.. versionadded:: 0.8
:meth:`.Query.select_entity_from` was added to specify
the specific behavior of entity replacement, however
the :meth:`.Query.select_from` maintains this behavior
as well until 0.9.
"""
self._set_select_from([from_obj], True)
def __getitem__(self, item):
if isinstance(item, slice):
start, stop, step = util.decode_slice(item)
if isinstance(stop, int) and \
isinstance(start, int) and \
stop - start <= 0:
return []
# perhaps we should execute a count() here so that we
# can still use LIMIT/OFFSET ?
elif (isinstance(start, int) and start < 0) \
or (isinstance(stop, int) and stop < 0):
return list(self)[item]
res = self.slice(start, stop)
if step is not None:
return list(res)[None:None:item.step]
else:
return list(res)
else:
if item == -1:
return list(self)[-1]
else:
return list(self[item:item + 1])[0]
@_generative(_no_statement_condition)
def slice(self, start, stop):
"""apply LIMIT/OFFSET to the ``Query`` based on a "
"range and return the newly resulting ``Query``."""
if start is not None and stop is not None:
self._offset = (self._offset or 0) + start
self._limit = stop - start
elif start is None and stop is not None:
self._limit = stop
elif start is not None and stop is None:
self._offset = (self._offset or 0) + start
if self._offset == 0:
self._offset = None
@_generative(_no_statement_condition)
def limit(self, limit):
"""Apply a ``LIMIT`` to the query and return the newly resulting
``Query``.
"""
self._limit = limit
@_generative(_no_statement_condition)
def offset(self, offset):
"""Apply an ``OFFSET`` to the query and return the newly resulting
``Query``.
"""
self._offset = offset
@_generative(_no_statement_condition)
def distinct(self, *criterion):
"""Apply a ``DISTINCT`` to the query and return the newly resulting
``Query``.
:param \*expr: optional column expressions. When present,
the Postgresql dialect will render a ``DISTINCT ON (<expressions>>)``
construct.
"""
if not criterion:
self._distinct = True
else:
criterion = self._adapt_col_list(criterion)
if isinstance(self._distinct, list):
self._distinct += criterion
else:
self._distinct = criterion
@_generative()
def prefix_with(self, *prefixes):
"""Apply the prefixes to the query and return the newly resulting
``Query``.
:param \*prefixes: optional prefixes, typically strings,
not using any commas. In particular is useful for MySQL keywords.
e.g.::
query = sess.query(User.name).\\
prefix_with('HIGH_PRIORITY').\\
prefix_with('SQL_SMALL_RESULT', 'ALL')
Would render::
SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL users.name AS users_name
FROM users
.. versionadded:: 0.7.7
"""
if self._prefixes:
self._prefixes += prefixes
else:
self._prefixes = prefixes
def all(self):
"""Return the results represented by this ``Query`` as a list.
This results in an execution of the underlying query.
"""
return list(self)
@_generative(_no_clauseelement_condition)
def from_statement(self, statement):
"""Execute the given SELECT statement and return results.
This method bypasses all internal statement compilation, and the
statement is executed without modification.
The statement is typically either a :func:`~.expression.text`
or :func:`~.expression.select` construct, and should return the set
of columns
appropriate to the entity class represented by this :class:`.Query`.
.. seealso::
:ref:`orm_tutorial_literal_sql` - usage examples in the
ORM tutorial
"""
statement = expression._expression_literal_as_text(statement)
if not isinstance(statement,
(expression.TextClause,
expression.SelectBase)):
raise sa_exc.ArgumentError(
"from_statement accepts text(), select(), "
"and union() objects only.")
self._statement = statement
def first(self):
"""Return the first result of this ``Query`` or
None if the result doesn't contain any row.
first() applies a limit of one within the generated SQL, so that
only one primary entity row is generated on the server side
(note this may consist of multiple result rows if join-loaded
collections are present).
Calling ``first()`` results in an execution of the underlying query.
"""
if self._statement is not None:
ret = list(self)[0:1]
else:
ret = list(self[0:1])
if len(ret) > 0:
return ret[0]
else:
return None
def one(self):
"""Return exactly one result or raise an exception.
Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects
no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
if multiple object identities are returned, or if multiple
rows are returned for a query that does not return object
identities.
Note that an entity query, that is, one which selects one or
more mapped classes as opposed to individual column attributes,
may ultimately represent many rows but only one row of
unique entity or entities - this is a successful result for one().
Calling ``one()`` results in an execution of the underlying query.
.. versionchanged:: 0.6
``one()`` fully fetches all results instead of applying
any kind of limit, so that the "unique"-ing of entities does not
conceal multiple object identities.
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
raise orm_exc.NoResultFound("No row was found for one()")
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one()")
def scalar(self):
"""Return the first element of the first result or None
if no rows present. If multiple rows are returned,
raises MultipleResultsFound.
>>> session.query(Item).scalar()
<Item>
>>> session.query(Item.id).scalar()
1
>>> session.query(Item.id).filter(Item.id < 0).scalar()
None
>>> session.query(Item.id, Item.name).scalar()
1
>>> session.query(func.count(Parent.id)).scalar()
20
This results in an execution of the underlying query.
"""
try:
ret = self.one()
if not isinstance(ret, tuple):
return ret
return ret[0]
except orm_exc.NoResultFound:
return None
def __iter__(self):
context = self._compile_context()
context.statement.use_labels = True
if self._autoflush and not self._populate_existing:
self.session._autoflush()
return self._execute_and_instances(context)
def _connection_from_session(self, **kw):
conn = self.session.connection(
**kw)
if self._execution_options:
conn = conn.execution_options(**self._execution_options)
return conn
def _execute_and_instances(self, querycontext):
conn = self._connection_from_session(
mapper=self._mapper_zero_or_none(),
clause=querycontext.statement,
close_with_result=True)
result = conn.execute(querycontext.statement, self._params)
return loading.instances(self, result, querycontext)
@property
def column_descriptions(self):
"""Return metadata about the columns which would be
returned by this :class:`.Query`.
Format is a list of dictionaries::
user_alias = aliased(User, name='user2')
q = sess.query(User, User.id, user_alias)
# this expression:
q.column_descriptions
# would return:
[
{
'name':'User',
'type':User,
'aliased':False,
'expr':User,
},
{
'name':'id',
'type':Integer(),
'aliased':False,
'expr':User.id,
},
{
'name':'user2',
'type':User,
'aliased':True,
'expr':user_alias
}
]
"""
return [
{
'name': ent._label_name,
'type': ent.type,
'aliased': getattr(ent, 'is_aliased_class', False),
'expr': ent.expr
}
for ent in self._entities
]
def instances(self, cursor, __context=None):
"""Given a ResultProxy cursor as returned by connection.execute(),
return an ORM result as an iterator.
e.g.::
result = engine.execute("select * from users")
for u in session.query(User).instances(result):
print u
"""
context = __context
if context is None:
context = QueryContext(self)
return loading.instances(self, cursor, context)
def merge_result(self, iterator, load=True):
"""Merge a result into this :class:`.Query` object's Session.
Given an iterator returned by a :class:`.Query` of the same structure
as this one, return an identical iterator of results, with all mapped
instances merged into the session using :meth:`.Session.merge`. This
is an optimized method which will merge all mapped instances,
preserving the structure of the result rows and unmapped columns with
less method overhead than that of calling :meth:`.Session.merge`
explicitly for each value.
The structure of the results is determined based on the column list of
this :class:`.Query` - if these do not correspond, unchecked errors
will occur.
The 'load' argument is the same as that of :meth:`.Session.merge`.
For an example of how :meth:`~.Query.merge_result` is used, see
the source code for the example :ref:`examples_caching`, where
:meth:`~.Query.merge_result` is used to efficiently restore state
from a cache back into a target :class:`.Session`.
"""
return loading.merge_result(self, iterator, load)
@property
def _select_args(self):
return {
'limit': self._limit,
'offset': self._offset,
'distinct': self._distinct,
'prefixes': self._prefixes,
'group_by': self._group_by or None,
'having': self._having
}
@property
def _should_nest_selectable(self):
kwargs = self._select_args
return (kwargs.get('limit') is not None or
kwargs.get('offset') is not None or
kwargs.get('distinct', False))
def exists(self):
"""A convenience method that turns a query into an EXISTS subquery
of the form EXISTS (SELECT 1 FROM ... WHERE ...).
e.g.::
q = session.query(User).filter(User.name == 'fred')
session.query(q.exists())
Producing SQL similar to::
SELECT EXISTS (
SELECT 1 FROM users WHERE users.name = :name_1
) AS anon_1
The EXISTS construct is usually used in the WHERE clause::
session.query(User.id).filter(q.exists()).scalar()
Note that some databases such as SQL Server don't allow an
EXISTS expression to be present in the columns clause of a
SELECT. To select a simple boolean value based on the exists
as a WHERE, use :func:`.literal`::
from sqlalchemy import literal
session.query(literal(True)).filter(q.exists()).scalar()
.. versionadded:: 0.8.1
"""
# .add_columns() for the case that we are a query().select_from(X),
# so that ".statement" can be produced (#2995) but also without
# omitting the FROM clause from a query(X) (#2818);
# .with_only_columns() after we have a core select() so that
# we get just "SELECT 1" without any entities.
return sql.exists(self.add_columns('1').with_labels().
statement.with_only_columns([1]))
def count(self):
"""Return a count of rows this Query would return.
This generates the SQL for this Query as follows::
SELECT count(1) AS count_1 FROM (
SELECT <rest of query follows...>
) AS anon_1
.. versionchanged:: 0.7
The above scheme is newly refined as of 0.7b3.
For fine grained control over specific columns
to count, to skip the usage of a subquery or
otherwise control of the FROM clause,
or to use other aggregate functions,
use :attr:`~sqlalchemy.sql.expression.func`
expressions in conjunction
with :meth:`~.Session.query`, i.e.::
from sqlalchemy import func
# count User records, without
# using a subquery.
session.query(func.count(User.id))
# return count of user "id" grouped
# by "name"
session.query(func.count(User.id)).\\
group_by(User.name)
from sqlalchemy import distinct
# count distinct "name" values
session.query(func.count(distinct(User.name)))
"""
col = sql.func.count(sql.literal_column('*'))
return self.from_self(col).scalar()
def delete(self, synchronize_session='evaluate'):
"""Perform a bulk delete query.
Deletes rows matched by this query from the database.
:param synchronize_session: chooses the strategy for the removal of
matched objects from the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, objects may still remain in
the session which were in fact deleted which can lead to confusing
results if they are accessed via get() or already loaded
collections.
``'fetch'`` - performs a select query before the delete to find
objects that are matched by the delete query and need to be
removed from the session. Matched objects are removed from the
session.
``'evaluate'`` - Evaluate the query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an error is raised. In that case you probably
want to use the 'fetch' strategy as a fallback.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
This method has several key caveats:
* The method does **not** offer in-Python cascading of relationships
- it is assumed that ON DELETE CASCADE/SET NULL/etc. is configured
for any foreign key references which require it, otherwise the
database may emit an integrity violation if foreign key references
are being enforced.
After the DELETE, dependent objects in the :class:`.Session` which
were impacted by an ON DELETE may not contain the current
state, or may have been deleted. This issue is resolved once the
:class:`.Session` is expired,
which normally occurs upon :meth:`.Session.commit` or can be forced
by using :meth:`.Session.expire_all`. Accessing an expired object
whose row has been deleted will invoke a SELECT to locate the
row; when the row is not found, an
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
* The :meth:`.MapperEvents.before_delete` and
:meth:`.MapperEvents.after_delete`
events are **not** invoked from this method. Instead, the
:meth:`.SessionEvents.after_bulk_delete` method is provided to act
upon a mass DELETE of entity rows.
.. seealso::
:meth:`.Query.update`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
# TODO: cascades need handling.
delete_op = persistence.BulkDelete.factory(
self, synchronize_session)
delete_op.exec_()
return delete_op.rowcount
def update(self, values, synchronize_session='evaluate'):
"""Perform a bulk update query.
Updates rows matched by this query in the database.
E.g.::
sess.query(User).filter(User.age == 25).\
update({User.age: User.age - 10}, synchronize_session='fetch')
sess.query(User).filter(User.age == 25).\
update({"age": User.age - 10}, synchronize_session='evaluate')
:param values: a dictionary with attributes names, or alternatively
mapped attributes or SQL expressions, as keys, and literal
values or sql expressions as values.
.. versionchanged:: 1.0.0 - string names in the values dictionary
are now resolved against the mapped entity; previously, these
strings were passed as literal column names with no mapper-level
translation.
:param synchronize_session: chooses the strategy to update the
attributes on objects in the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, updated objects may still
remain in the session with stale values on their attributes, which
can lead to confusing results.
``'fetch'`` - performs a select query before the update to find
objects that are matched by the update query. The updated
attributes are expired on matched objects.
``'evaluate'`` - Evaluate the Query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an exception is raised.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
This method has several key caveats:
* The method does **not** offer in-Python cascading of relationships
- it is assumed that ON UPDATE CASCADE is configured for any foreign
key references which require it, otherwise the database may emit an
integrity violation if foreign key references are being enforced.
After the UPDATE, dependent objects in the :class:`.Session` which
were impacted by an ON UPDATE CASCADE may not contain the current
state; this issue is resolved once the :class:`.Session` is expired,
which normally occurs upon :meth:`.Session.commit` or can be forced
by using :meth:`.Session.expire_all`.
* The method supports multiple table updates, as
detailed in :ref:`multi_table_updates`, and this behavior does
extend to support updates of joined-inheritance and other multiple
table mappings. However, the **join condition of an inheritance
mapper is currently not automatically rendered**.
Care must be taken in any multiple-table update to explicitly
include the joining condition between those tables, even in mappings
where this is normally automatic.
E.g. if a class ``Engineer`` subclasses ``Employee``, an UPDATE of
the ``Engineer`` local table using criteria against the ``Employee``
local table might look like::
session.query(Engineer).\\
filter(Engineer.id == Employee.id).\\
filter(Employee.name == 'dilbert').\\
update({"engineer_type": "programmer"})
* The :meth:`.MapperEvents.before_update` and
:meth:`.MapperEvents.after_update`
events are **not** invoked from this method. Instead, the
:meth:`.SessionEvents.after_bulk_update` method is provided to act
upon a mass UPDATE of entity rows.
.. seealso::
:meth:`.Query.delete`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
update_op = persistence.BulkUpdate.factory(
self, synchronize_session, values)
update_op.exec_()
return update_op.rowcount
def _compile_context(self, labels=True):
context = QueryContext(self)
if context.statement is not None:
return context
context.labels = labels
context._for_update_arg = self._for_update_arg
for entity in self._entities:
entity.setup_context(self, context)
for rec in context.create_eager_joins:
strategy = rec[0]
strategy(*rec[1:])
if context.from_clause:
# "load from explicit FROMs" mode,
# i.e. when select_from() or join() is used
context.froms = list(context.from_clause)
else:
# "load from discrete FROMs" mode,
# i.e. when each _MappedEntity has its own FROM
context.froms = context.froms
if self._enable_single_crit:
self._adjust_for_single_inheritance(context)
if not context.primary_columns:
if self._only_load_props:
raise sa_exc.InvalidRequestError(
"No column-based properties specified for "
"refresh operation. Use session.expire() "
"to reload collections and related items.")
else:
raise sa_exc.InvalidRequestError(
"Query contains no columns with which to "
"SELECT from.")
if context.multi_row_eager_loaders and self._should_nest_selectable:
context.statement = self._compound_eager_statement(context)
else:
context.statement = self._simple_statement(context)
return context
def _compound_eager_statement(self, context):
# for eager joins present and LIMIT/OFFSET/DISTINCT,
# wrap the query inside a select,
# then append eager joins onto that
if context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
else:
context.order_by = None
order_by_col_expr = []
inner = sql.select(
context.primary_columns + order_by_col_expr,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
# TODO: this order_by is only needed if
# LIMIT/OFFSET is present in self._select_args,
# else the application on the outside is enough
order_by=context.order_by,
**self._select_args
)
for hint in self._with_hints:
inner = inner.with_hint(*hint)
if self._correlate:
inner = inner.correlate(*self._correlate)
inner = inner.alias()
equivs = self.__all_equivs()
context.adapter = sql_util.ColumnAdapter(inner, equivs)
statement = sql.select(
[inner] + context.secondary_columns,
use_labels=context.labels)
statement._for_update_arg = context._for_update_arg
from_clause = inner
for eager_join in context.eager_joins.values():
# EagerLoader places a 'stop_on' attribute on the join,
# giving us a marker as to where the "splice point" of
# the join should be
from_clause = sql_util.splice_joins(
from_clause,
eager_join, eager_join.stop_on)
statement.append_from(from_clause)
if context.order_by:
statement.append_order_by(
*context.adapter.copy_and_process(
context.order_by
)
)
statement.append_order_by(*context.eager_order_by)
return statement
def _simple_statement(self, context):
if not context.order_by:
context.order_by = None
if self._distinct and context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
context.primary_columns += order_by_col_expr
context.froms += tuple(context.eager_joins.values())
statement = sql.select(
context.primary_columns +
context.secondary_columns,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
order_by=context.order_by,
**self._select_args
)
statement._for_update_arg = context._for_update_arg
for hint in self._with_hints:
statement = statement.with_hint(*hint)
if self._correlate:
statement = statement.correlate(*self._correlate)
if context.eager_order_by:
statement.append_order_by(*context.eager_order_by)
return statement
def _adjust_for_single_inheritance(self, context):
"""Apply single-table-inheritance filtering.
For all distinct single-table-inheritance mappers represented in
the columns clause of this query, add criterion to the WHERE
clause of the given QueryContext such that only the appropriate
subtypes are selected from the total results.
"""
for (ext_info, adapter) in set(self._mapper_adapter_map.values()):
if ext_info in self._join_entities:
continue
single_crit = ext_info.mapper._single_table_criterion
if single_crit is not None:
if adapter:
single_crit = adapter.traverse(single_crit)
single_crit = self._adapt_clause(single_crit, False, False)
context.whereclause = sql.and_(
sql.True_._ifnone(context.whereclause),
single_crit)
def __str__(self):
return str(self._compile_context().statement)
from ..sql.selectable import ForUpdateArg
class LockmodeArg(ForUpdateArg):
@classmethod
def parse_legacy_query(self, mode):
if mode in (None, False):
return None
if mode == "read":
read = True
nowait = False
elif mode == "update":
read = nowait = False
elif mode == "update_nowait":
nowait = True
read = False
else:
raise sa_exc.ArgumentError(
"Unknown with_lockmode argument: %r" % mode)
return LockmodeArg(read=read, nowait=nowait)
class _QueryEntity(object):
"""represent an entity column returned within a Query result."""
def __new__(cls, *args, **kwargs):
if cls is _QueryEntity:
entity = args[1]
if not isinstance(entity, util.string_types) and \
_is_mapped_class(entity):
cls = _MapperEntity
elif isinstance(entity, Bundle):
cls = _BundleEntity
else:
cls = _ColumnEntity
return object.__new__(cls)
def _clone(self):
q = self.__class__.__new__(self.__class__)
q.__dict__ = self.__dict__.copy()
return q
class _MapperEntity(_QueryEntity):
"""mapper/class/AliasedClass entity"""
def __init__(self, query, entity):
if not query._primary_entity:
query._primary_entity = self
query._entities.append(self)
self.entities = [entity]
self.expr = entity
supports_single_entity = True
def setup_entity(self, ext_info, aliased_adapter):
self.mapper = ext_info.mapper
self.aliased_adapter = aliased_adapter
self.selectable = ext_info.selectable
self.is_aliased_class = ext_info.is_aliased_class
self._with_polymorphic = ext_info.with_polymorphic_mappers
self._polymorphic_discriminator = \
ext_info.polymorphic_on
self.entity_zero = ext_info
if ext_info.is_aliased_class:
self._label_name = self.entity_zero.name
else:
self._label_name = self.mapper.class_.__name__
self.path = self.entity_zero._path_registry
def set_with_polymorphic(self, query, cls_or_mappers,
selectable, polymorphic_on):
"""Receive an update from a call to query.with_polymorphic().
Note the newer style of using a free standing with_polymporphic()
construct doesn't make use of this method.
"""
if self.is_aliased_class:
# TODO: invalidrequest ?
raise NotImplementedError(
"Can't use with_polymorphic() against "
"an Aliased object"
)
if cls_or_mappers is None:
query._reset_polymorphic_adapter(self.mapper)
return
mappers, from_obj = self.mapper._with_polymorphic_args(
cls_or_mappers, selectable)
self._with_polymorphic = mappers
self._polymorphic_discriminator = polymorphic_on
self.selectable = from_obj
query._mapper_loads_polymorphically_with(
self.mapper, sql_util.ColumnAdapter(
from_obj, self.mapper._equivalent_columns))
filter_fn = id
@property
def type(self):
return self.mapper.class_
@property
def entity_zero_or_selectable(self):
return self.entity_zero
def corresponds_to(self, entity):
if entity.is_aliased_class:
if self.is_aliased_class:
if entity._base_alias is self.entity_zero._base_alias:
return True
return False
elif self.is_aliased_class:
if self.entity_zero._use_mapper_path:
return entity in self._with_polymorphic
else:
return entity is self.entity_zero
return entity.common_parent(self.entity_zero)
def adapt_to_selectable(self, query, sel):
query._entities.append(self)
def _get_entity_clauses(self, query, context):
adapter = None
if not self.is_aliased_class:
if query._polymorphic_adapters:
adapter = query._polymorphic_adapters.get(self.mapper, None)
else:
adapter = self.aliased_adapter
if adapter:
if query._from_obj_alias:
ret = adapter.wrap(query._from_obj_alias)
else:
ret = adapter
else:
ret = query._from_obj_alias
return ret
def row_processor(self, query, context, result):
adapter = self._get_entity_clauses(query, context)
if context.adapter and adapter:
adapter = adapter.wrap(context.adapter)
elif not adapter:
adapter = context.adapter
# polymorphic mappers which have concrete tables in
# their hierarchy usually
# require row aliasing unconditionally.
if not adapter and self.mapper._requires_row_aliasing:
adapter = sql_util.ColumnAdapter(
self.selectable,
self.mapper._equivalent_columns)
if query._primary_entity is self:
_instance = loading.instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
only_load_props=query._only_load_props,
refresh_state=context.refresh_state,
polymorphic_discriminator=self._polymorphic_discriminator
)
else:
_instance = loading.instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
polymorphic_discriminator=self._polymorphic_discriminator
)
return _instance, self._label_name
def setup_context(self, query, context):
adapter = self._get_entity_clauses(query, context)
# if self._adapted_selectable is None:
context.froms += (self.selectable,)
if context.order_by is False and self.mapper.order_by:
context.order_by = self.mapper.order_by
# apply adaptation to the mapper's order_by if needed.
if adapter:
context.order_by = adapter.adapt_list(
util.to_list(
context.order_by
)
)
if self._with_polymorphic:
poly_properties = self.mapper._iterate_polymorphic_properties(
self._with_polymorphic)
else:
poly_properties = self.mapper._polymorphic_properties
for value in poly_properties:
if query._only_load_props and \
value.key not in query._only_load_props:
continue
value.setup(
context,
self,
self.path,
adapter,
only_load_props=query._only_load_props,
column_collection=context.primary_columns
)
if self._polymorphic_discriminator is not None and \
self._polymorphic_discriminator \
is not self.mapper.polymorphic_on:
if adapter:
pd = adapter.columns[self._polymorphic_discriminator]
else:
pd = self._polymorphic_discriminator
context.primary_columns.append(pd)
def __str__(self):
return str(self.mapper)
@inspection._self_inspects
class Bundle(object):
"""A grouping of SQL expressions that are returned by a :class:`.Query`
under one namespace.
The :class:`.Bundle` essentially allows nesting of the tuple-based
results returned by a column-oriented :class:`.Query` object. It also
is extensible via simple subclassing, where the primary capability
to override is that of how the set of expressions should be returned,
allowing post-processing as well as custom return types, without
involving ORM identity-mapped classes.
.. versionadded:: 0.9.0
.. seealso::
:ref:`bundles`
"""
single_entity = False
"""If True, queries for a single Bundle will be returned as a single
entity, rather than an element within a keyed tuple."""
def __init__(self, name, *exprs, **kw):
"""Construct a new :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
for row in session.query(bn).filter(
bn.c.x == 5).filter(bn.c.y == 4):
print(row.mybundle.x, row.mybundle.y)
:param name: name of the bundle.
:param \*exprs: columns or SQL expressions comprising the bundle.
:param single_entity=False: if True, rows for this :class:`.Bundle`
can be returned as a "single entity" outside of any enclosing tuple
in the same manner as a mapped entity.
"""
self.name = self._label = name
self.exprs = exprs
self.c = self.columns = ColumnCollection()
self.columns.update((getattr(col, "key", col._label), col)
for col in exprs)
self.single_entity = kw.pop('single_entity', self.single_entity)
columns = None
"""A namespace of SQL expressions referred to by this :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
q = sess.query(bn).filter(bn.c.x == 5)
Nesting of bundles is also supported::
b1 = Bundle("b1",
Bundle('b2', MyClass.a, MyClass.b),
Bundle('b3', MyClass.x, MyClass.y)
)
q = sess.query(b1).filter(
b1.c.b2.c.a == 5).filter(b1.c.b3.c.y == 9)
.. seealso::
:attr:`.Bundle.c`
"""
c = None
"""An alias for :attr:`.Bundle.columns`."""
def _clone(self):
cloned = self.__class__.__new__(self.__class__)
cloned.__dict__.update(self.__dict__)
return cloned
def __clause_element__(self):
return expression.ClauseList(group=False, *self.c)
@property
def clauses(self):
return self.__clause_element__().clauses
def label(self, name):
"""Provide a copy of this :class:`.Bundle` passing a new label."""
cloned = self._clone()
cloned.name = name
return cloned
def create_row_processor(self, query, procs, labels):
"""Produce the "row processing" function for this :class:`.Bundle`.
May be overridden by subclasses.
.. seealso::
:ref:`bundles` - includes an example of subclassing.
"""
keyed_tuple = util.lightweight_named_tuple('result', labels)
def proc(row):
return keyed_tuple([proc(row) for proc in procs])
return proc
class _BundleEntity(_QueryEntity):
def __init__(self, query, bundle, setup_entities=True):
query._entities.append(self)
self.bundle = self.expr = bundle
self.type = type(bundle)
self._label_name = bundle.name
self._entities = []
if setup_entities:
for expr in bundle.exprs:
if isinstance(expr, Bundle):
_BundleEntity(self, expr)
else:
_ColumnEntity(self, expr, namespace=self)
self.entities = ()
self.filter_fn = lambda item: item
self.supports_single_entity = self.bundle.single_entity
@property
def entity_zero(self):
for ent in self._entities:
ezero = ent.entity_zero
if ezero is not None:
return ezero
else:
return None
def corresponds_to(self, entity):
# TODO: this seems to have no effect for
# _ColumnEntity either
return False
@property
def entity_zero_or_selectable(self):
for ent in self._entities:
ezero = ent.entity_zero_or_selectable
if ezero is not None:
return ezero
else:
return None
def adapt_to_selectable(self, query, sel):
c = _BundleEntity(query, self.bundle, setup_entities=False)
# c._label_name = self._label_name
# c.entity_zero = self.entity_zero
# c.entities = self.entities
for ent in self._entities:
ent.adapt_to_selectable(c, sel)
def setup_entity(self, ext_info, aliased_adapter):
for ent in self._entities:
ent.setup_entity(ext_info, aliased_adapter)
def setup_context(self, query, context):
for ent in self._entities:
ent.setup_context(query, context)
def row_processor(self, query, context, result):
procs, labels = zip(
*[ent.row_processor(query, context, result)
for ent in self._entities]
)
proc = self.bundle.create_row_processor(query, procs, labels)
return proc, self._label_name
class _ColumnEntity(_QueryEntity):
"""Column/expression based entity."""
def __init__(self, query, column, namespace=None):
self.expr = column
self.namespace = namespace
if isinstance(column, util.string_types):
column = sql.literal_column(column)
self._label_name = column.name
elif isinstance(column, (
attributes.QueryableAttribute,
interfaces.PropComparator
)):
self._label_name = column.key
column = column._query_clause_element()
else:
self._label_name = getattr(column, 'key', None)
if not isinstance(column, expression.ColumnElement) and \
hasattr(column, '_select_iterable'):
for c in column._select_iterable:
if c is column:
break
_ColumnEntity(query, c, namespace=column)
else:
return
elif isinstance(column, Bundle):
_BundleEntity(query, column)
return
if not isinstance(column, sql.ColumnElement):
raise sa_exc.InvalidRequestError(
"SQL expression, column, or mapped entity "
"expected - got '%r'" % (column, )
)
self.type = type_ = column.type
if type_.hashable:
self.filter_fn = lambda item: item
else:
counter = util.counter()
self.filter_fn = lambda item: counter()
# If the Column is unnamed, give it a
# label() so that mutable column expressions
# can be located in the result even
# if the expression's identity has been changed
# due to adaption.
if not column._label and not getattr(column, 'is_literal', False):
column = column.label(self._label_name)
query._entities.append(self)
self.column = column
self.froms = set()
# look for ORM entities represented within the
# given expression. Try to count only entities
# for columns whose FROM object is in the actual list
# of FROMs for the overall expression - this helps
# subqueries which were built from ORM constructs from
# leaking out their entities into the main select construct
self.actual_froms = actual_froms = set(column._from_objects)
self.entities = util.OrderedSet(
elem._annotations['parententity']
for elem in visitors.iterate(column, {})
if 'parententity' in elem._annotations
and actual_froms.intersection(elem._from_objects)
)
if self.entities:
self.entity_zero = list(self.entities)[0]
elif self.namespace is not None:
self.entity_zero = self.namespace
else:
self.entity_zero = None
supports_single_entity = False
@property
def entity_zero_or_selectable(self):
if self.entity_zero is not None:
return self.entity_zero
elif self.actual_froms:
return list(self.actual_froms)[0]
else:
return None
def adapt_to_selectable(self, query, sel):
c = _ColumnEntity(query, sel.corresponding_column(self.column))
c._label_name = self._label_name
c.entity_zero = self.entity_zero
c.entities = self.entities
def setup_entity(self, ext_info, aliased_adapter):
if 'selectable' not in self.__dict__:
self.selectable = ext_info.selectable
self.froms.add(ext_info.selectable)
def corresponds_to(self, entity):
# TODO: just returning False here,
# no tests fail
if self.entity_zero is None:
return False
elif _is_aliased_class(entity):
# TODO: polymorphic subclasses ?
return entity is self.entity_zero
else:
return not _is_aliased_class(self.entity_zero) and \
entity.common_parent(self.entity_zero)
def _resolve_expr_against_query_aliases(self, query, expr, context):
return query._adapt_clause(expr, False, True)
def row_processor(self, query, context, result):
column = self._resolve_expr_against_query_aliases(
query, self.column, context)
if context.adapter:
column = context.adapter.columns[column]
getter = result._getter(column)
return getter, self._label_name
def setup_context(self, query, context):
column = self._resolve_expr_against_query_aliases(
query, self.column, context)
context.froms += tuple(self.froms)
context.primary_columns.append(column)
def __str__(self):
return str(self.column)
class QueryContext(object):
multi_row_eager_loaders = False
adapter = None
froms = ()
for_update = None
def __init__(self, query):
if query._statement is not None:
if isinstance(query._statement, expression.SelectBase) and \
not query._statement._textual and \
not query._statement.use_labels:
self.statement = query._statement.apply_labels()
else:
self.statement = query._statement
else:
self.statement = None
self.from_clause = query._from_obj
self.whereclause = query._criterion
self.order_by = query._order_by
self.query = query
self.session = query.session
self.populate_existing = query._populate_existing
self.invoke_all_eagers = query._invoke_all_eagers
self.version_check = query._version_check
self.refresh_state = query._refresh_state
self.primary_columns = []
self.secondary_columns = []
self.eager_order_by = []
self.eager_joins = {}
self.create_eager_joins = []
self.propagate_options = set(o for o in query._with_options if
o.propagate_to_loaders)
self.attributes = query._attributes.copy()
class AliasOption(interfaces.MapperOption):
def __init__(self, alias):
"""Return a :class:`.MapperOption` that will indicate to the :class:`.Query`
that the main table has been aliased.
This is a seldom-used option to suit the
very rare case that :func:`.contains_eager`
is being used in conjunction with a user-defined SELECT
statement that aliases the parent table. E.g.::
# define an aliased UNION called 'ulist'
ulist = users.select(users.c.user_id==7).\\
union(users.select(users.c.user_id>7)).\\
alias('ulist')
# add on an eager load of "addresses"
statement = ulist.outerjoin(addresses).\\
select().apply_labels()
# create query, indicating "ulist" will be an
# alias for the main table, "addresses"
# property should be eager loaded
query = session.query(User).options(
contains_alias(ulist),
contains_eager(User.addresses))
# then get results via the statement
results = query.from_statement(statement).all()
:param alias: is the string name of an alias, or a
:class:`~.sql.expression.Alias` object representing
the alias.
"""
self.alias = alias
def process_query(self, query):
if isinstance(self.alias, util.string_types):
alias = query._mapper_zero().mapped_table.alias(self.alias)
else:
alias = self.alias
query._from_obj_alias = sql_util.ColumnAdapter(alias)
|
params
|
add values for bind parameters which may have been
specified in filter().
parameters may be specified using \**kwargs, or optionally a single
dictionary as the first positional argument. The reason for both is
that \**kwargs is convenient, however some parameter dictionaries
contain unicode keys in which case \**kwargs cannot be used.
|
# orm/query.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The Query class and support.
Defines the :class:`.Query` class, the central
construct used by the ORM to construct database queries.
The :class:`.Query` class should not be confused with the
:class:`.Select` class, which defines database
SELECT operations at the SQL (non-ORM) level. ``Query`` differs from
``Select`` in that it returns ORM-mapped objects and interacts with an
ORM session, whereas the ``Select`` construct interacts directly with the
database to return iterable result sets.
"""
from itertools import chain
from . import (
attributes, interfaces, object_mapper, persistence,
exc as orm_exc, loading
)
from .base import _entity_descriptor, _is_aliased_class, \
_is_mapped_class, _orm_columns, _generative
from .path_registry import PathRegistry
from .util import (
AliasedClass, ORMAdapter, join as orm_join, with_parent, aliased
)
from .. import sql, util, log, exc as sa_exc, inspect, inspection
from ..sql.expression import _interpret_as_from
from ..sql import (
util as sql_util,
expression, visitors
)
from ..sql.base import ColumnCollection
from . import properties
__all__ = ['Query', 'QueryContext', 'aliased']
_path_registry = PathRegistry.root
@inspection._self_inspects
@log.class_logger
class Query(object):
"""ORM-level SQL construction object.
:class:`.Query` is the source of all SELECT statements generated by the
ORM, both those formulated by end-user query operations as well as by
high level internal operations such as related collection loading. It
features a generative interface whereby successive calls return a new
:class:`.Query` object, a copy of the former with additional
criteria and options associated with it.
:class:`.Query` objects are normally initially generated using the
:meth:`~.Session.query` method of :class:`.Session`. For a full
walkthrough of :class:`.Query` usage, see the
:ref:`ormtutorial_toplevel`.
"""
_enable_eagerloads = True
_enable_assertions = True
_with_labels = False
_criterion = None
_yield_per = None
_order_by = False
_group_by = False
_having = None
_distinct = False
_prefixes = None
_offset = None
_limit = None
_for_update_arg = None
_statement = None
_correlate = frozenset()
_populate_existing = False
_invoke_all_eagers = True
_version_check = False
_autoflush = True
_only_load_props = None
_refresh_state = None
_from_obj = ()
_join_entities = ()
_select_from_entity = None
_mapper_adapter_map = {}
_filter_aliases = None
_from_obj_alias = None
_joinpath = _joinpoint = util.immutabledict()
_execution_options = util.immutabledict()
_params = util.immutabledict()
_attributes = util.immutabledict()
_with_options = ()
_with_hints = ()
_enable_single_crit = True
_current_path = _path_registry
def __init__(self, entities, session=None):
self.session = session
self._polymorphic_adapters = {}
self._set_entities(entities)
def _set_entities(self, entities, entity_wrapper=None):
if entity_wrapper is None:
entity_wrapper = _QueryEntity
self._entities = []
self._primary_entity = None
for ent in util.to_list(entities):
entity_wrapper(self, ent)
self._set_entity_selectables(self._entities)
def _set_entity_selectables(self, entities):
self._mapper_adapter_map = d = self._mapper_adapter_map.copy()
for ent in entities:
for entity in ent.entities:
if entity not in d:
ext_info = inspect(entity)
if not ext_info.is_aliased_class and \
ext_info.mapper.with_polymorphic:
if ext_info.mapper.mapped_table not in \
self._polymorphic_adapters:
self._mapper_loads_polymorphically_with(
ext_info.mapper,
sql_util.ColumnAdapter(
ext_info.selectable,
ext_info.mapper._equivalent_columns
)
)
aliased_adapter = None
elif ext_info.is_aliased_class:
aliased_adapter = ext_info._adapter
else:
aliased_adapter = None
d[entity] = (
ext_info,
aliased_adapter
)
ent.setup_entity(*d[entity])
def _mapper_loads_polymorphically_with(self, mapper, adapter):
for m2 in mapper._with_polymorphic_mappers or [mapper]:
self._polymorphic_adapters[m2] = adapter
for m in m2.iterate_to_root():
self._polymorphic_adapters[m.local_table] = adapter
def _set_select_from(self, obj, set_base_alias):
fa = []
select_from_alias = None
for from_obj in obj:
info = inspect(from_obj)
if hasattr(info, 'mapper') and \
(info.is_mapper or info.is_aliased_class):
self._select_from_entity = from_obj
if set_base_alias:
raise sa_exc.ArgumentError(
"A selectable (FromClause) instance is "
"expected when the base alias is being set.")
fa.append(info.selectable)
elif not info.is_selectable:
raise sa_exc.ArgumentError(
"argument is not a mapped class, mapper, "
"aliased(), or FromClause instance.")
else:
if isinstance(from_obj, expression.SelectBase):
from_obj = from_obj.alias()
if set_base_alias:
select_from_alias = from_obj
fa.append(from_obj)
self._from_obj = tuple(fa)
if set_base_alias and \
len(self._from_obj) == 1 and \
isinstance(select_from_alias, expression.Alias):
equivs = self.__all_equivs()
self._from_obj_alias = sql_util.ColumnAdapter(
self._from_obj[0], equivs)
def _reset_polymorphic_adapter(self, mapper):
for m2 in mapper._with_polymorphic_mappers:
self._polymorphic_adapters.pop(m2, None)
for m in m2.iterate_to_root():
self._polymorphic_adapters.pop(m.local_table, None)
def _adapt_polymorphic_element(self, element):
if "parententity" in element._annotations:
search = element._annotations['parententity']
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
if isinstance(element, expression.FromClause):
search = element
elif hasattr(element, 'table'):
search = element.table
else:
return None
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
def _adapt_col_list(self, cols):
return [
self._adapt_clause(
expression._literal_as_label_reference(o),
True, True)
for o in cols
]
@_generative()
def _adapt_all_clauses(self):
self._orm_only_adapt = False
def _adapt_clause(self, clause, as_filter, orm_only):
"""Adapt incoming clauses to transformations which
have been applied within this query."""
adapters = []
# do we adapt all expression elements or only those
# tagged as 'ORM' constructs ?
orm_only = getattr(self, '_orm_only_adapt', orm_only)
if as_filter and self._filter_aliases:
for fa in self._filter_aliases._visitor_iterator:
adapters.append(
(
orm_only, fa.replace
)
)
if self._from_obj_alias:
# for the "from obj" alias, apply extra rule to the
# 'ORM only' check, if this query were generated from a
# subquery of itself, i.e. _from_selectable(), apply adaption
# to all SQL constructs.
adapters.append(
(
getattr(self, '_orm_only_from_obj_alias', orm_only),
self._from_obj_alias.replace
)
)
if self._polymorphic_adapters:
adapters.append(
(
orm_only, self._adapt_polymorphic_element
)
)
if not adapters:
return clause
def replace(elem):
for _orm_only, adapter in adapters:
# if 'orm only', look for ORM annotations
# in the element before adapting.
if not _orm_only or \
'_orm_adapt' in elem._annotations or \
"parententity" in elem._annotations:
e = adapter(elem)
if e is not None:
return e
return visitors.replacement_traverse(
clause,
{},
replace
)
def _entity_zero(self):
return self._entities[0]
def _mapper_zero(self):
return self._select_from_entity or \
self._entity_zero().entity_zero
@property
def _mapper_entities(self):
for ent in self._entities:
if isinstance(ent, _MapperEntity):
yield ent
def _joinpoint_zero(self):
return self._joinpoint.get(
'_joinpoint_entity',
self._mapper_zero()
)
def _mapper_zero_or_none(self):
if self._primary_entity:
return self._primary_entity.mapper
else:
return None
def _only_mapper_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._mapper_zero()
def _only_full_mapper_zero(self, methname):
if self._entities != [self._primary_entity]:
raise sa_exc.InvalidRequestError(
"%s() can only be used against "
"a single mapped class." % methname)
return self._primary_entity.entity_zero
def _only_entity_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._entity_zero()
def __all_equivs(self):
equivs = {}
for ent in self._mapper_entities:
equivs.update(ent.mapper._equivalent_columns)
return equivs
def _get_condition(self):
return self._no_criterion_condition(
"get", order_by=False, distinct=False)
def _get_existing_condition(self):
self._no_criterion_assertion("get", order_by=False, distinct=False)
def _no_criterion_assertion(self, meth, order_by=True, distinct=True):
if not self._enable_assertions:
return
if self._criterion is not None or \
self._statement is not None or self._from_obj or \
self._limit is not None or self._offset is not None or \
self._group_by or (order_by and self._order_by) or \
(distinct and self._distinct):
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
def _no_criterion_condition(self, meth, order_by=True, distinct=True):
self._no_criterion_assertion(meth, order_by, distinct)
self._from_obj = ()
self._statement = self._criterion = None
self._order_by = self._group_by = self._distinct = False
def _no_clauseelement_condition(self, meth):
if not self._enable_assertions:
return
if self._order_by:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
self._no_criterion_condition(meth)
def _no_statement_condition(self, meth):
if not self._enable_assertions:
return
if self._statement is not None:
raise sa_exc.InvalidRequestError(
("Query.%s() being called on a Query with an existing full "
"statement - can't apply criterion.") % meth)
def _no_limit_offset(self, meth):
if not self._enable_assertions:
return
if self._limit is not None or self._offset is not None:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a Query which already has LIMIT "
"or OFFSET applied. To modify the row-limited results of a "
" Query, call from_self() first. "
"Otherwise, call %s() before limit() or offset() "
"are applied."
% (meth, meth)
)
def _no_select_modifiers(self, meth):
if not self._enable_assertions:
return
for attr, methname, notset in (
('_limit', 'limit()', None),
('_offset', 'offset()', None),
('_order_by', 'order_by()', False),
('_group_by', 'group_by()', False),
('_distinct', 'distinct()', False),
):
if getattr(self, attr) is not notset:
raise sa_exc.InvalidRequestError(
"Can't call Query.%s() when %s has been called" %
(meth, methname)
)
def _get_options(self, populate_existing=None,
version_check=None,
only_load_props=None,
refresh_state=None):
if populate_existing:
self._populate_existing = populate_existing
if version_check:
self._version_check = version_check
if refresh_state:
self._refresh_state = refresh_state
if only_load_props:
self._only_load_props = set(only_load_props)
return self
def _clone(self):
cls = self.__class__
q = cls.__new__(cls)
q.__dict__ = self.__dict__.copy()
return q
@property
def statement(self):
"""The full SELECT statement represented by this Query.
The statement by default will not have disambiguating labels
applied to the construct unless with_labels(True) is called
first.
"""
stmt = self._compile_context(labels=self._with_labels).\
statement
if self._params:
stmt = stmt.params(self._params)
# TODO: there's no tests covering effects of
# the annotation not being there
return stmt._annotate({'no_replacement_traverse': True})
def subquery(self, name=None, with_labels=False, reduce_columns=False):
"""return the full SELECT statement represented by
this :class:`.Query`, embedded within an :class:`.Alias`.
Eager JOIN generation within the query is disabled.
:param name: string name to be assigned as the alias;
this is passed through to :meth:`.FromClause.alias`.
If ``None``, a name will be deterministically generated
at compile time.
:param with_labels: if True, :meth:`.with_labels` will be called
on the :class:`.Query` first to apply table-qualified labels
to all columns.
:param reduce_columns: if True, :meth:`.Select.reduce_columns` will
be called on the resulting :func:`.select` construct,
to remove same-named columns where one also refers to the other
via foreign key or WHERE clause equivalence.
.. versionchanged:: 0.8 the ``with_labels`` and ``reduce_columns``
keyword arguments were added.
"""
q = self.enable_eagerloads(False)
if with_labels:
q = q.with_labels()
q = q.statement
if reduce_columns:
q = q.reduce_columns()
return q.alias(name=name)
def cte(self, name=None, recursive=False):
"""Return the full SELECT statement represented by this
:class:`.Query` represented as a common table expression (CTE).
.. versionadded:: 0.7.6
Parameters and usage are the same as those of the
:meth:`.SelectBase.cte` method; see that method for
further details.
Here is the `Postgresql WITH
RECURSIVE example
<http://www.postgresql.org/docs/8.4/static/queries-with.html>`_.
Note that, in this example, the ``included_parts`` cte and the
``incl_alias`` alias of it are Core selectables, which
means the columns are accessed via the ``.c.`` attribute. The
``parts_alias`` object is an :func:`.orm.aliased` instance of the
``Part`` entity, so column-mapped attributes are available
directly::
from sqlalchemy.orm import aliased
class Part(Base):
__tablename__ = 'part'
part = Column(String, primary_key=True)
sub_part = Column(String, primary_key=True)
quantity = Column(Integer)
included_parts = session.query(
Part.sub_part,
Part.part,
Part.quantity).\\
filter(Part.part=="our part").\\
cte(name="included_parts", recursive=True)
incl_alias = aliased(included_parts, name="pr")
parts_alias = aliased(Part, name="p")
included_parts = included_parts.union_all(
session.query(
parts_alias.sub_part,
parts_alias.part,
parts_alias.quantity).\\
filter(parts_alias.part==incl_alias.c.sub_part)
)
q = session.query(
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).
label('total_quantity')
).\\
group_by(included_parts.c.sub_part)
.. seealso::
:meth:`.SelectBase.cte`
"""
return self.enable_eagerloads(False).\
statement.cte(name=name, recursive=recursive)
def label(self, name):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted
to a scalar subquery with a label of the given name.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.label`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.label(name)
def as_scalar(self):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted to a scalar subquery.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.as_scalar`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.as_scalar()
@property
def selectable(self):
"""Return the :class:`.Select` object emitted by this :class:`.Query`.
Used for :func:`.inspect` compatibility, this is equivalent to::
query.enable_eagerloads(False).with_labels().statement
"""
return self.__clause_element__()
def __clause_element__(self):
return self.enable_eagerloads(False).with_labels().statement
@_generative()
def enable_eagerloads(self, value):
"""Control whether or not eager joins and subqueries are
rendered.
When set to False, the returned Query will not render
eager joins regardless of :func:`~sqlalchemy.orm.joinedload`,
:func:`~sqlalchemy.orm.subqueryload` options
or mapper-level ``lazy='joined'``/``lazy='subquery'``
configurations.
This is used primarily when nesting the Query's
statement into a subquery or other
selectable, or when using :meth:`.Query.yield_per`.
"""
self._enable_eagerloads = value
def _no_yield_per(self, message):
raise sa_exc.InvalidRequestError(
"The yield_per Query option is currently not "
"compatible with %s eager loading. Please "
"specify lazyload('*') or query.enable_eagerloads(False) in "
"order to "
"proceed with query.yield_per()." % message)
@_generative()
def with_labels(self):
"""Apply column labels to the return value of Query.statement.
Indicates that this Query's `statement` accessor should return
a SELECT statement that applies labels to all columns in the
form <tablename>_<columnname>; this is commonly used to
disambiguate columns from multiple tables which have the same
name.
When the `Query` actually issues SQL to load rows, it always
uses column labeling.
"""
self._with_labels = True
@_generative()
def enable_assertions(self, value):
"""Control whether assertions are generated.
When set to False, the returned Query will
not assert its state before certain operations,
including that LIMIT/OFFSET has not been applied
when filter() is called, no criterion exists
when get() is called, and no "from_statement()"
exists when filter()/order_by()/group_by() etc.
is called. This more permissive mode is used by
custom Query subclasses to specify criterion or
other modifiers outside of the usual usage patterns.
Care should be taken to ensure that the usage
pattern is even possible. A statement applied
by from_statement() will override any criterion
set by filter() or order_by(), for example.
"""
self._enable_assertions = value
@property
def whereclause(self):
"""A readonly attribute which returns the current WHERE criterion for
this Query.
This returned value is a SQL expression construct, or ``None`` if no
criterion has been established.
"""
return self._criterion
@_generative()
def _with_current_path(self, path):
"""indicate that this query applies to objects loaded
within a certain path.
Used by deferred loaders (see strategies.py) which transfer
query options from an originating query to a newly generated
query intended for the deferred load.
"""
self._current_path = path
@_generative(_no_clauseelement_condition)
def with_polymorphic(self,
cls_or_mappers,
selectable=None,
polymorphic_on=None):
"""Load columns for inheriting classes.
:meth:`.Query.with_polymorphic` applies transformations
to the "main" mapped class represented by this :class:`.Query`.
The "main" mapped class here means the :class:`.Query`
object's first argument is a full class, i.e.
``session.query(SomeClass)``. These transformations allow additional
tables to be present in the FROM clause so that columns for a
joined-inheritance subclass are available in the query, both for the
purposes of load-time efficiency as well as the ability to use
these columns at query time.
See the documentation section :ref:`with_polymorphic` for
details on how this method is used.
.. versionchanged:: 0.8
A new and more flexible function
:func:`.orm.with_polymorphic` supersedes
:meth:`.Query.with_polymorphic`, as it can apply the equivalent
functionality to any set of columns or classes in the
:class:`.Query`, not just the "zero mapper". See that
function for a description of arguments.
"""
if not self._primary_entity:
raise sa_exc.InvalidRequestError(
"No primary mapper set up for this Query.")
entity = self._entities[0]._clone()
self._entities = [entity] + self._entities[1:]
entity.set_with_polymorphic(self,
cls_or_mappers,
selectable=selectable,
polymorphic_on=polymorphic_on)
@_generative()
def yield_per(self, count):
"""Yield only ``count`` rows at a time.
The purpose of this method is when fetching very large result sets
(> 10K rows), to batch results in sub-collections and yield them
out partially, so that the Python interpreter doesn't need to declare
very large areas of memory which is both time consuming and leads
to excessive memory use. The performance from fetching hundreds of
thousands of rows can often double when a suitable yield-per setting
(e.g. approximately 1000) is used, even with DBAPIs that buffer
rows (which are most).
The :meth:`.Query.yield_per` method **is not compatible with most
eager loading schemes, including subqueryload and joinedload with
collections**. For this reason, it may be helpful to disable
eager loads, either unconditionally with
:meth:`.Query.enable_eagerloads`::
q = sess.query(Object).yield_per(100).enable_eagerloads(False)
Or more selectively using :func:`.lazyload`; such as with
an asterisk to specify the default loader scheme::
q = sess.query(Object).yield_per(100).\\
options(lazyload('*'), joinedload(Object.some_related))
.. warning::
Use this method with caution; if the same instance is
present in more than one batch of rows, end-user changes
to attributes will be overwritten.
In particular, it's usually impossible to use this setting
with eagerly loaded collections (i.e. any lazy='joined' or
'subquery') since those collections will be cleared for a
new load when encountered in a subsequent result batch.
In the case of 'subquery' loading, the full result for all
rows is fetched which generally defeats the purpose of
:meth:`~sqlalchemy.orm.query.Query.yield_per`.
Also note that while
:meth:`~sqlalchemy.orm.query.Query.yield_per` will set the
``stream_results`` execution option to True, currently
this is only understood by
:mod:`~sqlalchemy.dialects.postgresql.psycopg2` dialect
which will stream results using server side cursors
instead of pre-buffer all rows for this query. Other
DBAPIs **pre-buffer all rows** before making them
available. The memory use of raw database rows is much less
than that of an ORM-mapped object, but should still be taken into
consideration when benchmarking.
.. seealso::
:meth:`.Query.enable_eagerloads`
"""
self._yield_per = count
self._execution_options = self._execution_options.union(
{"stream_results": True})
def get(self, ident):
"""Return an instance based on the given primary key identifier,
or ``None`` if not found.
E.g.::
my_user = session.query(User).get(5)
some_object = session.query(VersionedFoo).get((5, 10))
:meth:`~.Query.get` is special in that it provides direct
access to the identity map of the owning :class:`.Session`.
If the given primary key identifier is present
in the local identity map, the object is returned
directly from this collection and no SQL is emitted,
unless the object has been marked fully expired.
If not present,
a SELECT is performed in order to locate the object.
:meth:`~.Query.get` also will perform a check if
the object is present in the identity map and
marked as expired - a SELECT
is emitted to refresh the object as well as to
ensure that the row is still present.
If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
:meth:`~.Query.get` is only used to return a single
mapped instance, not multiple instances or
individual column constructs, and strictly
on a single primary key value. The originating
:class:`.Query` must be constructed in this way,
i.e. against a single mapped entity,
with no additional filtering criterion. Loading
options via :meth:`~.Query.options` may be applied
however, and will be used if the object is not
yet locally present.
A lazy-loading, many-to-one attribute configured
by :func:`.relationship`, using a simple
foreign-key-to-primary-key criterion, will also use an
operation equivalent to :meth:`~.Query.get` in order to retrieve
the target value from the local identity map
before querying the database. See :doc:`/orm/loading`
for further details on relationship loading.
:param ident: A scalar or tuple value representing
the primary key. For a composite primary key,
the order of identifiers corresponds in most cases
to that of the mapped :class:`.Table` object's
primary key columns. For a :func:`.mapper` that
was given the ``primary key`` argument during
construction, the order of identifiers corresponds
to the elements present in this collection.
:return: The object instance, or ``None``.
"""
# convert composite types to individual args
if hasattr(ident, '__composite_values__'):
ident = ident.__composite_values__()
ident = util.to_list(ident)
mapper = self._only_full_mapper_zero("get")
if len(ident) != len(mapper.primary_key):
raise sa_exc.InvalidRequestError(
"Incorrect number of values in identifier to formulate "
"primary key for query.get(); primary key columns are %s" %
','.join("'%s'" % c for c in mapper.primary_key))
key = mapper.identity_key_from_primary_key(ident)
if not self._populate_existing and \
not mapper.always_refresh and \
self._for_update_arg is None:
instance = loading.get_from_identity(
self.session, key, attributes.PASSIVE_OFF)
if instance is not None:
self._get_existing_condition()
# reject calls for id in identity map but class
# mismatch.
if not issubclass(instance.__class__, mapper.class_):
return None
return instance
return loading.load_on_ident(self, key)
@_generative()
def correlate(self, *args):
"""Return a :class:`.Query` construct which will correlate the given
FROM clauses to that of an enclosing :class:`.Query` or
:func:`~.expression.select`.
The method here accepts mapped classes, :func:`.aliased` constructs,
and :func:`.mapper` constructs as arguments, which are resolved into
expression constructs, in addition to appropriate expression
constructs.
The correlation arguments are ultimately passed to
:meth:`.Select.correlate` after coercion to expression constructs.
The correlation arguments take effect in such cases
as when :meth:`.Query.from_self` is used, or when
a subquery as returned by :meth:`.Query.subquery` is
embedded in another :func:`~.expression.select` construct.
"""
self._correlate = self._correlate.union(
_interpret_as_from(s)
if s is not None else None
for s in args)
@_generative()
def autoflush(self, setting):
"""Return a Query with a specific 'autoflush' setting.
Note that a Session with autoflush=False will
not autoflush, even if this flag is set to True at the
Query level. Therefore this flag is usually used only
to disable autoflush for a specific Query.
"""
self._autoflush = setting
@_generative()
def populate_existing(self):
"""Return a :class:`.Query` that will expire and refresh all instances
as they are loaded, or reused from the current :class:`.Session`.
:meth:`.populate_existing` does not improve behavior when
the ORM is used normally - the :class:`.Session` object's usual
behavior of maintaining a transaction and expiring all attributes
after rollback or commit handles object state automatically.
This method is not intended for general use.
"""
self._populate_existing = True
@_generative()
def _with_invoke_all_eagers(self, value):
"""Set the 'invoke all eagers' flag which causes joined- and
subquery loaders to traverse into already-loaded related objects
and collections.
Default is that of :attr:`.Query._invoke_all_eagers`.
"""
self._invoke_all_eagers = value
def with_parent(self, instance, property=None):
"""Add filtering criterion that relates the given instance
to a child object or collection, using its attribute state
as well as an established :func:`.relationship()`
configuration.
The method uses the :func:`.with_parent` function to generate
the clause, the result of which is passed to :meth:`.Query.filter`.
Parameters are the same as :func:`.with_parent`, with the exception
that the given property can be None, in which case a search is
performed against this :class:`.Query` object's target mapper.
"""
if property is None:
mapper = object_mapper(instance)
for prop in mapper.iterate_properties:
if isinstance(prop, properties.RelationshipProperty) and \
prop.mapper is self._mapper_zero():
property = prop
break
else:
raise sa_exc.InvalidRequestError(
"Could not locate a property which relates instances "
"of class '%s' to instances of class '%s'" %
(
self._mapper_zero().class_.__name__,
instance.__class__.__name__)
)
return self.filter(with_parent(instance, property))
@_generative()
def add_entity(self, entity, alias=None):
"""add a mapped entity to the list of result columns
to be returned."""
if alias is not None:
entity = aliased(entity, alias)
self._entities = list(self._entities)
m = _MapperEntity(self, entity)
self._set_entity_selectables([m])
@_generative()
def with_session(self, session):
"""Return a :class:`.Query` that will use the given :class:`.Session`.
"""
self.session = session
def from_self(self, *entities):
"""return a Query that selects from this Query's
SELECT statement.
\*entities - optional list of entities which will replace
those being selected.
"""
fromclause = self.with_labels().enable_eagerloads(False).\
statement.correlate(None)
q = self._from_selectable(fromclause)
q._enable_single_crit = False
if entities:
q._set_entities(entities)
return q
@_generative()
def _set_enable_single_crit(self, val):
self._enable_single_crit = val
@_generative()
def _from_selectable(self, fromclause):
for attr in (
'_statement', '_criterion',
'_order_by', '_group_by',
'_limit', '_offset',
'_joinpath', '_joinpoint',
'_distinct', '_having',
'_prefixes',
):
self.__dict__.pop(attr, None)
self._set_select_from([fromclause], True)
# this enables clause adaptation for non-ORM
# expressions.
self._orm_only_from_obj_alias = False
old_entities = self._entities
self._entities = []
for e in old_entities:
e.adapt_to_selectable(self, self._from_obj[0])
def values(self, *columns):
"""Return an iterator yielding result tuples corresponding
to the given list of columns"""
if not columns:
return iter(())
q = self._clone()
q._set_entities(columns, entity_wrapper=_ColumnEntity)
if not q._yield_per:
q._yield_per = 10
return iter(q)
_values = values
def value(self, column):
"""Return a scalar result corresponding to the given
column expression."""
try:
return next(self.values(column))[0]
except StopIteration:
return None
@_generative()
def with_entities(self, *entities):
"""Return a new :class:`.Query` replacing the SELECT list with the
given entities.
e.g.::
# Users, filtered on some arbitrary criterion
# and then ordered by related email address
q = session.query(User).\\
join(User.address).\\
filter(User.name.like('%ed%')).\\
order_by(Address.email)
# given *only* User.id==5, Address.email, and 'q', what
# would the *next* User in the result be ?
subq = q.with_entities(Address.email).\\
order_by(None).\\
filter(User.id==5).\\
subquery()
q = q.join((subq, subq.c.email < Address.email)).\\
limit(1)
.. versionadded:: 0.6.5
"""
self._set_entities(entities)
@_generative()
def add_columns(self, *column):
"""Add one or more column expressions to the list
of result columns to be returned."""
self._entities = list(self._entities)
l = len(self._entities)
for c in column:
_ColumnEntity(self, c)
# _ColumnEntity may add many entities if the
# given arg is a FROM clause
self._set_entity_selectables(self._entities[l:])
@util.pending_deprecation("0.7",
":meth:`.add_column` is superseded "
"by :meth:`.add_columns`",
False)
def add_column(self, column):
"""Add a column expression to the list of result columns to be
returned.
Pending deprecation: :meth:`.add_column` will be superseded by
:meth:`.add_columns`.
"""
return self.add_columns(column)
def options(self, *args):
"""Return a new Query object, applying the given list of
mapper options.
Most supplied options regard changing how column- and
relationship-mapped attributes are loaded. See the sections
:ref:`deferred` and :doc:`/orm/loading` for reference
documentation.
"""
return self._options(False, *args)
def _conditional_options(self, *args):
return self._options(True, *args)
@_generative()
def _options(self, conditional, *args):
# most MapperOptions write to the '_attributes' dictionary,
# so copy that as well
self._attributes = self._attributes.copy()
opts = tuple(util.flatten_iterator(args))
self._with_options = self._with_options + opts
if conditional:
for opt in opts:
opt.process_query_conditionally(self)
else:
for opt in opts:
opt.process_query(self)
def with_transformation(self, fn):
"""Return a new :class:`.Query` object transformed by
the given function.
E.g.::
def filter_something(criterion):
def transform(q):
return q.filter(criterion)
return transform
q = q.with_transformation(filter_something(x==5))
This allows ad-hoc recipes to be created for :class:`.Query`
objects. See the example at :ref:`hybrid_transformers`.
.. versionadded:: 0.7.4
"""
return fn(self)
@_generative()
def with_hint(self, selectable, text, dialect_name='*'):
"""Add an indexing or other executional context
hint for the given entity or selectable to
this :class:`.Query`.
Functionality is passed straight through to
:meth:`~sqlalchemy.sql.expression.Select.with_hint`,
with the addition that ``selectable`` can be a
:class:`.Table`, :class:`.Alias`, or ORM entity / mapped class
/etc.
.. seealso::
:meth:`.Query.with_statement_hint`
"""
if selectable is not None:
selectable = inspect(selectable).selectable
self._with_hints += ((selectable, text, dialect_name),)
def with_statement_hint(self, text, dialect_name='*'):
"""add a statement hint to this :class:`.Select`.
This method is similar to :meth:`.Select.with_hint` except that
it does not require an individual table, and instead applies to the
statement as a whole.
This feature calls down into :meth:`.Select.with_statement_hint`.
.. versionadded:: 1.0.0
.. seealso::
:meth:`.Query.with_hint`
"""
return self.with_hint(None, text, dialect_name)
@_generative()
def execution_options(self, **kwargs):
""" Set non-SQL options which take effect during execution.
The options are the same as those accepted by
:meth:`.Connection.execution_options`.
Note that the ``stream_results`` execution option is enabled
automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()`
method is used.
"""
self._execution_options = self._execution_options.union(kwargs)
@_generative()
def with_lockmode(self, mode):
"""Return a new :class:`.Query` object with the specified "locking mode",
which essentially refers to the ``FOR UPDATE`` clause.
.. deprecated:: 0.9.0 superseded by :meth:`.Query.with_for_update`.
:param mode: a string representing the desired locking mode.
Valid values are:
* ``None`` - translates to no lockmode
* ``'update'`` - translates to ``FOR UPDATE``
(standard SQL, supported by most dialects)
* ``'update_nowait'`` - translates to ``FOR UPDATE NOWAIT``
(supported by Oracle, PostgreSQL 8.1 upwards)
* ``'read'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL),
and ``FOR SHARE`` (for PostgreSQL)
.. seealso::
:meth:`.Query.with_for_update` - improved API for
specifying the ``FOR UPDATE`` clause.
"""
self._for_update_arg = LockmodeArg.parse_legacy_query(mode)
@_generative()
def with_for_update(self, read=False, nowait=False, of=None):
"""return a new :class:`.Query` with the specified options for the
``FOR UPDATE`` clause.
The behavior of this method is identical to that of
:meth:`.SelectBase.with_for_update`. When called with no arguments,
the resulting ``SELECT`` statement will have a ``FOR UPDATE`` clause
appended. When additional arguments are specified, backend-specific
options such as ``FOR UPDATE NOWAIT`` or ``LOCK IN SHARE MODE``
can take effect.
E.g.::
q = sess.query(User).with_for_update(nowait=True, of=User)
The above query on a Postgresql backend will render like::
SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT
.. versionadded:: 0.9.0 :meth:`.Query.with_for_update` supersedes
the :meth:`.Query.with_lockmode` method.
.. seealso::
:meth:`.GenerativeSelect.with_for_update` - Core level method with
full argument and behavioral description.
"""
self._for_update_arg = LockmodeArg(read=read, nowait=nowait, of=of)
# MASKED: params function (lines 1260-1278)
@_generative(_no_statement_condition, _no_limit_offset)
def filter(self, *criterion):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using SQL expressions.
e.g.::
session.query(MyClass).filter(MyClass.name == 'some name')
Multiple criteria are joined together by AND::
session.query(MyClass).\\
filter(MyClass.name == 'some name', MyClass.id > 5)
The criterion is any SQL expression object applicable to the
WHERE clause of a select. String expressions are coerced
into SQL expression constructs via the :func:`.text` construct.
.. versionchanged:: 0.7.5
Multiple criteria joined by AND.
.. seealso::
:meth:`.Query.filter_by` - filter on keyword expressions.
"""
for criterion in list(criterion):
criterion = expression._expression_literal_as_text(criterion)
criterion = self._adapt_clause(criterion, True, True)
if self._criterion is not None:
self._criterion = self._criterion & criterion
else:
self._criterion = criterion
def filter_by(self, **kwargs):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using keyword expressions.
e.g.::
session.query(MyClass).filter_by(name = 'some name')
Multiple criteria are joined together by AND::
session.query(MyClass).\\
filter_by(name = 'some name', id = 5)
The keyword expressions are extracted from the primary
entity of the query, or the last entity that was the
target of a call to :meth:`.Query.join`.
.. seealso::
:meth:`.Query.filter` - filter on SQL expressions.
"""
clauses = [_entity_descriptor(self._joinpoint_zero(), key) == value
for key, value in kwargs.items()]
return self.filter(sql.and_(*clauses))
@_generative(_no_statement_condition, _no_limit_offset)
def order_by(self, *criterion):
"""apply one or more ORDER BY criterion to the query and return
the newly resulting ``Query``
All existing ORDER BY settings can be suppressed by
passing ``None`` - this will suppress any ORDER BY configured
on mappers as well.
Alternatively, an existing ORDER BY setting on the Query
object can be entirely cancelled by passing ``False``
as the value - use this before calling methods where
an ORDER BY is invalid.
"""
if len(criterion) == 1:
if criterion[0] is False:
if '_order_by' in self.__dict__:
del self._order_by
return
if criterion[0] is None:
self._order_by = None
return
criterion = self._adapt_col_list(criterion)
if self._order_by is False or self._order_by is None:
self._order_by = criterion
else:
self._order_by = self._order_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def group_by(self, *criterion):
"""apply one or more GROUP BY criterion to the query and return
the newly resulting :class:`.Query`"""
criterion = list(chain(*[_orm_columns(c) for c in criterion]))
criterion = self._adapt_col_list(criterion)
if self._group_by is False:
self._group_by = criterion
else:
self._group_by = self._group_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def having(self, criterion):
"""apply a HAVING criterion to the query and return the
newly resulting :class:`.Query`.
:meth:`~.Query.having` is used in conjunction with
:meth:`~.Query.group_by`.
HAVING criterion makes it possible to use filters on aggregate
functions like COUNT, SUM, AVG, MAX, and MIN, eg.::
q = session.query(User.id).\\
join(User.addresses).\\
group_by(User.id).\\
having(func.count(Address.id) > 2)
"""
criterion = expression._expression_literal_as_text(criterion)
if criterion is not None and \
not isinstance(criterion, sql.ClauseElement):
raise sa_exc.ArgumentError(
"having() argument must be of type "
"sqlalchemy.sql.ClauseElement or string")
criterion = self._adapt_clause(criterion, True, True)
if self._having is not None:
self._having = self._having & criterion
else:
self._having = criterion
def union(self, *q):
"""Produce a UNION of this Query against one or more queries.
e.g.::
q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar')
q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo')
q3 = q1.union(q2)
The method accepts multiple Query objects so as to control
the level of nesting. A series of ``union()`` calls such as::
x.union(y).union(z).all()
will nest on each ``union()``, and produces::
SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION
SELECT * FROM y) UNION SELECT * FROM Z)
Whereas::
x.union(y, z).all()
produces::
SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION
SELECT * FROM Z)
Note that many database backends do not allow ORDER BY to
be rendered on a query called within UNION, EXCEPT, etc.
To disable all ORDER BY clauses including those configured
on mappers, issue ``query.order_by(None)`` - the resulting
:class:`.Query` object will not render ORDER BY within
its SELECT statement.
"""
return self._from_selectable(
expression.union(*([self] + list(q))))
def union_all(self, *q):
"""Produce a UNION ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.union_all(*([self] + list(q)))
)
def intersect(self, *q):
"""Produce an INTERSECT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect(*([self] + list(q)))
)
def intersect_all(self, *q):
"""Produce an INTERSECT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect_all(*([self] + list(q)))
)
def except_(self, *q):
"""Produce an EXCEPT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_(*([self] + list(q)))
)
def except_all(self, *q):
"""Produce an EXCEPT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_all(*([self] + list(q)))
)
def join(self, *props, **kwargs):
"""Create a SQL JOIN against this :class:`.Query` object's criterion
and apply generatively, returning the newly resulting :class:`.Query`.
**Simple Relationship Joins**
Consider a mapping between two classes ``User`` and ``Address``,
with a relationship ``User.addresses`` representing a collection
of ``Address`` objects associated with each ``User``. The most
common usage of :meth:`~.Query.join` is to create a JOIN along this
relationship, using the ``User.addresses`` attribute as an indicator
for how this should occur::
q = session.query(User).join(User.addresses)
Where above, the call to :meth:`~.Query.join` along ``User.addresses``
will result in SQL equivalent to::
SELECT user.* FROM user JOIN address ON user.id = address.user_id
In the above example we refer to ``User.addresses`` as passed to
:meth:`~.Query.join` as the *on clause*, that is, it indicates
how the "ON" portion of the JOIN should be constructed. For a
single-entity query such as the one above (i.e. we start by selecting
only from ``User`` and nothing else), the relationship can also be
specified by its string name::
q = session.query(User).join("addresses")
:meth:`~.Query.join` can also accommodate multiple
"on clause" arguments to produce a chain of joins, such as below
where a join across four related entities is constructed::
q = session.query(User).join("orders", "items", "keywords")
The above would be shorthand for three separate calls to
:meth:`~.Query.join`, each using an explicit attribute to indicate
the source entity::
q = session.query(User).\\
join(User.orders).\\
join(Order.items).\\
join(Item.keywords)
**Joins to a Target Entity or Selectable**
A second form of :meth:`~.Query.join` allows any mapped entity
or core selectable construct as a target. In this usage,
:meth:`~.Query.join` will attempt
to create a JOIN along the natural foreign key relationship between
two entities::
q = session.query(User).join(Address)
The above calling form of :meth:`~.Query.join` will raise an error if
either there are no foreign keys between the two entities, or if
there are multiple foreign key linkages between them. In the
above calling form, :meth:`~.Query.join` is called upon to
create the "on clause" automatically for us. The target can
be any mapped entity or selectable, such as a :class:`.Table`::
q = session.query(User).join(addresses_table)
**Joins to a Target with an ON Clause**
The third calling form allows both the target entity as well
as the ON clause to be passed explicitly. Suppose for
example we wanted to join to ``Address`` twice, using
an alias the second time. We use :func:`~sqlalchemy.orm.aliased`
to create a distinct alias of ``Address``, and join
to it using the ``target, onclause`` form, so that the
alias can be specified explicitly as the target along with
the relationship to instruct how the ON clause should proceed::
a_alias = aliased(Address)
q = session.query(User).\\
join(User.addresses).\\
join(a_alias, User.addresses).\\
filter(Address.email_address=='[email protected]').\\
filter(a_alias.email_address=='[email protected]')
Where above, the generated SQL would be similar to::
SELECT user.* FROM user
JOIN address ON user.id = address.user_id
JOIN address AS address_1 ON user.id=address_1.user_id
WHERE address.email_address = :email_address_1
AND address_1.email_address = :email_address_2
The two-argument calling form of :meth:`~.Query.join`
also allows us to construct arbitrary joins with SQL-oriented
"on clause" expressions, not relying upon configured relationships
at all. Any SQL expression can be passed as the ON clause
when using the two-argument form, which should refer to the target
entity in some way as well as an applicable source entity::
q = session.query(User).join(Address, User.id==Address.user_id)
.. versionchanged:: 0.7
In SQLAlchemy 0.6 and earlier, the two argument form of
:meth:`~.Query.join` requires the usage of a tuple:
``query(User).join((Address, User.id==Address.user_id))``\ .
This calling form is accepted in 0.7 and further, though
is not necessary unless multiple join conditions are passed to
a single :meth:`~.Query.join` call, which itself is also not
generally necessary as it is now equivalent to multiple
calls (this wasn't always the case).
**Advanced Join Targeting and Adaption**
There is a lot of flexibility in what the "target" can be when using
:meth:`~.Query.join`. As noted previously, it also accepts
:class:`.Table` constructs and other selectables such as
:func:`.alias` and :func:`.select` constructs, with either the one
or two-argument forms::
addresses_q = select([Address.user_id]).\\
where(Address.email_address.endswith("@bar.com")).\\
alias()
q = session.query(User).\\
join(addresses_q, addresses_q.c.user_id==User.id)
:meth:`~.Query.join` also features the ability to *adapt* a
:meth:`~sqlalchemy.orm.relationship` -driven ON clause to the target
selectable. Below we construct a JOIN from ``User`` to a subquery
against ``Address``, allowing the relationship denoted by
``User.addresses`` to *adapt* itself to the altered target::
address_subq = session.query(Address).\\
filter(Address.email_address == '[email protected]').\\
subquery()
q = session.query(User).join(address_subq, User.addresses)
Producing SQL similar to::
SELECT user.* FROM user
JOIN (
SELECT address.id AS id,
address.user_id AS user_id,
address.email_address AS email_address
FROM address
WHERE address.email_address = :email_address_1
) AS anon_1 ON user.id = anon_1.user_id
The above form allows one to fall back onto an explicit ON
clause at any time::
q = session.query(User).\\
join(address_subq, User.id==address_subq.c.user_id)
**Controlling what to Join From**
While :meth:`~.Query.join` exclusively deals with the "right"
side of the JOIN, we can also control the "left" side, in those
cases where it's needed, using :meth:`~.Query.select_from`.
Below we construct a query against ``Address`` but can still
make usage of ``User.addresses`` as our ON clause by instructing
the :class:`.Query` to select first from the ``User``
entity::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which will produce SQL similar to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
**Constructing Aliases Anonymously**
:meth:`~.Query.join` can construct anonymous aliases
using the ``aliased=True`` flag. This feature is useful
when a query is being joined algorithmically, such as
when querying self-referentially to an arbitrary depth::
q = session.query(Node).\\
join("children", "children", aliased=True)
When ``aliased=True`` is used, the actual "alias" construct
is not explicitly available. To work with it, methods such as
:meth:`.Query.filter` will adapt the incoming entity to
the last join point::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1')
When using automatic aliasing, the ``from_joinpoint=True``
argument can allow a multi-node join to be broken into
multiple calls to :meth:`~.Query.join`, so that
each path along the way can be further filtered::
q = session.query(Node).\\
join("children", aliased=True).\\
filter(Node.name='child 1').\\
join("children", aliased=True, from_joinpoint=True).\\
filter(Node.name == 'grandchild 1')
The filtering aliases above can then be reset back to the
original ``Node`` entity using :meth:`~.Query.reset_joinpoint`::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1').\\
reset_joinpoint().\\
filter(Node.name == 'parent 1)
For an example of ``aliased=True``, see the distribution
example :ref:`examples_xmlpersistence` which illustrates
an XPath-like query system using algorithmic joins.
:param \*props: A collection of one or more join conditions,
each consisting of a relationship-bound attribute or string
relationship name representing an "on clause", or a single
target entity, or a tuple in the form of ``(target, onclause)``.
A special two-argument calling form of the form ``target, onclause``
is also accepted.
:param aliased=False: If True, indicate that the JOIN target should be
anonymously aliased. Subsequent calls to :meth:`~.Query.filter`
and similar will adapt the incoming criterion to the target
alias, until :meth:`~.Query.reset_joinpoint` is called.
:param isouter=False: If True, the join used will be a left outer join,
just as if the :meth:`.Query.outerjoin` method were called. This
flag is here to maintain consistency with the same flag as accepted
by :meth:`.FromClause.join` and other Core constructs.
.. versionadded:: 1.0.0
:param from_joinpoint=False: When using ``aliased=True``, a setting
of True here will cause the join to be from the most recent
joined target, rather than starting back from the original
FROM clauses of the query.
.. seealso::
:ref:`ormtutorial_joins` in the ORM tutorial.
:ref:`inheritance_toplevel` for details on how
:meth:`~.Query.join` is used for inheritance relationships.
:func:`.orm.join` - a standalone ORM-level join function,
used internally by :meth:`.Query.join`, which in previous
SQLAlchemy versions was the primary ORM-level joining interface.
"""
aliased, from_joinpoint, isouter = kwargs.pop('aliased', False),\
kwargs.pop('from_joinpoint', False),\
kwargs.pop('isouter', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
','.join(kwargs.keys))
isouter = isouter
return self._join(props,
outerjoin=isouter, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def outerjoin(self, *props, **kwargs):
"""Create a left outer join against this ``Query`` object's criterion
and apply generatively, returning the newly resulting ``Query``.
Usage is the same as the ``join()`` method.
"""
aliased, from_joinpoint = kwargs.pop('aliased', False), \
kwargs.pop('from_joinpoint', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
','.join(kwargs))
return self._join(props,
outerjoin=True, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def _update_joinpoint(self, jp):
self._joinpoint = jp
# copy backwards to the root of the _joinpath
# dict, so that no existing dict in the path is mutated
while 'prev' in jp:
f, prev = jp['prev']
prev = prev.copy()
prev[f] = jp
jp['prev'] = (f, prev)
jp = prev
self._joinpath = jp
@_generative(_no_statement_condition, _no_limit_offset)
def _join(self, keys, outerjoin, create_aliases, from_joinpoint):
"""consumes arguments from join() or outerjoin(), places them into a
consistent format with which to form the actual JOIN constructs.
"""
if not from_joinpoint:
self._reset_joinpoint()
if len(keys) == 2 and \
isinstance(keys[0], (expression.FromClause,
type, AliasedClass)) and \
isinstance(keys[1], (str, expression.ClauseElement,
interfaces.PropComparator)):
# detect 2-arg form of join and
# convert to a tuple.
keys = (keys,)
for arg1 in util.to_list(keys):
if isinstance(arg1, tuple):
# "tuple" form of join, multiple
# tuples are accepted as well. The simpler
# "2-arg" form is preferred. May deprecate
# the "tuple" usage.
arg1, arg2 = arg1
else:
arg2 = None
# determine onclause/right_entity. there
# is a little bit of legacy behavior still at work here
# which means they might be in either order. may possibly
# lock this down to (right_entity, onclause) in 0.6.
if isinstance(
arg1, (interfaces.PropComparator, util.string_types)):
right_entity, onclause = arg2, arg1
else:
right_entity, onclause = arg1, arg2
left_entity = prop = None
if isinstance(onclause, interfaces.PropComparator):
of_type = getattr(onclause, '_of_type', None)
else:
of_type = None
if isinstance(onclause, util.string_types):
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity, onclause)
onclause = descriptor
# check for q.join(Class.propname, from_joinpoint=True)
# and Class is that of the current joinpoint
elif from_joinpoint and \
isinstance(onclause, interfaces.PropComparator):
left_entity = onclause._parententity
info = inspect(self._joinpoint_zero())
left_mapper, left_selectable, left_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', None)
if left_mapper is left_entity:
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity,
onclause.key)
onclause = descriptor
if isinstance(onclause, interfaces.PropComparator):
if right_entity is None:
if of_type:
right_entity = of_type
else:
right_entity = onclause.property.mapper
left_entity = onclause._parententity
prop = onclause.property
if not isinstance(onclause, attributes.QueryableAttribute):
onclause = prop
if not create_aliases:
# check for this path already present.
# don't render in that case.
edge = (left_entity, right_entity, prop.key)
if edge in self._joinpoint:
# The child's prev reference might be stale --
# it could point to a parent older than the
# current joinpoint. If this is the case,
# then we need to update it and then fix the
# tree's spine with _update_joinpoint. Copy
# and then mutate the child, which might be
# shared by a different query object.
jp = self._joinpoint[edge].copy()
jp['prev'] = (edge, self._joinpoint)
self._update_joinpoint(jp)
continue
elif onclause is not None and right_entity is None:
# TODO: no coverage here
raise NotImplementedError("query.join(a==b) not supported.")
self._join_left_to_right(
left_entity,
right_entity, onclause,
outerjoin, create_aliases, prop)
def _join_left_to_right(self, left, right,
onclause, outerjoin, create_aliases, prop):
"""append a JOIN to the query's from clause."""
self._polymorphic_adapters = self._polymorphic_adapters.copy()
if left is None:
if self._from_obj:
left = self._from_obj[0]
elif self._entities:
left = self._entities[0].entity_zero_or_selectable
if left is None:
raise sa_exc.InvalidRequestError(
"Don't know how to join from %s; please use "
"select_from() to establish the left "
"entity/selectable of this join" % self._entities[0])
if left is right and \
not create_aliases:
raise sa_exc.InvalidRequestError(
"Can't construct a join from %s to %s, they "
"are the same entity" %
(left, right))
l_info = inspect(left)
r_info = inspect(right)
overlap = False
if not create_aliases:
right_mapper = getattr(r_info, "mapper", None)
# if the target is a joined inheritance mapping,
# be more liberal about auto-aliasing.
if right_mapper and (
right_mapper.with_polymorphic or
isinstance(right_mapper.mapped_table, expression.Join)
):
for from_obj in self._from_obj or [l_info.selectable]:
if sql_util.selectables_overlap(
l_info.selectable, from_obj) and \
sql_util.selectables_overlap(
from_obj, r_info.selectable):
overlap = True
break
if (overlap or not create_aliases) and \
l_info.selectable is r_info.selectable:
raise sa_exc.InvalidRequestError(
"Can't join table/selectable '%s' to itself" %
l_info.selectable)
right, onclause = self._prepare_right_side(
r_info, right, onclause,
create_aliases,
prop, overlap)
# if joining on a MapperProperty path,
# track the path to prevent redundant joins
if not create_aliases and prop:
self._update_joinpoint({
'_joinpoint_entity': right,
'prev': ((left, right, prop.key), self._joinpoint)
})
else:
self._joinpoint = {'_joinpoint_entity': right}
self._join_to_left(l_info, left, right, onclause, outerjoin)
def _prepare_right_side(self, r_info, right, onclause, create_aliases,
prop, overlap):
info = r_info
right_mapper, right_selectable, right_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', False)
if right_mapper:
self._join_entities += (info, )
if right_mapper and prop and \
not right_mapper.common_parent(prop.mapper):
raise sa_exc.InvalidRequestError(
"Join target %s does not correspond to "
"the right side of join condition %s" % (right, onclause)
)
if not right_mapper and prop:
right_mapper = prop.mapper
need_adapter = False
if right_mapper and right is right_selectable:
if not right_selectable.is_derived_from(
right_mapper.mapped_table):
raise sa_exc.InvalidRequestError(
"Selectable '%s' is not derived from '%s'" %
(right_selectable.description,
right_mapper.mapped_table.description))
if isinstance(right_selectable, expression.SelectBase):
# TODO: this isn't even covered now!
right_selectable = right_selectable.alias()
need_adapter = True
right = aliased(right_mapper, right_selectable)
aliased_entity = right_mapper and \
not right_is_aliased and \
(
right_mapper.with_polymorphic and isinstance(
right_mapper._with_polymorphic_selectable,
expression.Alias)
or
overlap # test for overlap:
# orm/inheritance/relationships.py
# SelfReferentialM2MTest
)
if not need_adapter and (create_aliases or aliased_entity):
right = aliased(right, flat=True)
need_adapter = True
# if an alias() of the right side was generated here,
# apply an adapter to all subsequent filter() calls
# until reset_joinpoint() is called.
if need_adapter:
self._filter_aliases = ORMAdapter(
right,
equivalents=right_mapper and
right_mapper._equivalent_columns or {},
chain_to=self._filter_aliases)
# if the onclause is a ClauseElement, adapt it with any
# adapters that are in place right now
if isinstance(onclause, expression.ClauseElement):
onclause = self._adapt_clause(onclause, True, True)
# if an alias() on the right side was generated,
# which is intended to wrap a the right side in a subquery,
# ensure that columns retrieved from this target in the result
# set are also adapted.
if aliased_entity and not create_aliases:
self._mapper_loads_polymorphically_with(
right_mapper,
ORMAdapter(
right,
equivalents=right_mapper._equivalent_columns
)
)
return right, onclause
def _join_to_left(self, l_info, left, right, onclause, outerjoin):
info = l_info
left_mapper = getattr(info, 'mapper', None)
left_selectable = info.selectable
if self._from_obj:
replace_clause_index, clause = sql_util.find_join_source(
self._from_obj,
left_selectable)
if clause is not None:
try:
clause = orm_join(clause,
right,
onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = \
self._from_obj[:replace_clause_index] + \
(clause, ) + \
self._from_obj[replace_clause_index + 1:]
return
if left_mapper:
for ent in self._entities:
if ent.corresponds_to(left):
clause = ent.selectable
break
else:
clause = left
else:
clause = left_selectable
assert clause is not None
try:
clause = orm_join(clause, right, onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = self._from_obj + (clause,)
def _reset_joinpoint(self):
self._joinpoint = self._joinpath
self._filter_aliases = None
@_generative(_no_statement_condition)
def reset_joinpoint(self):
"""Return a new :class:`.Query`, where the "join point" has
been reset back to the base FROM entities of the query.
This method is usually used in conjunction with the
``aliased=True`` feature of the :meth:`~.Query.join`
method. See the example in :meth:`~.Query.join` for how
this is used.
"""
self._reset_joinpoint()
@_generative(_no_clauseelement_condition)
def select_from(self, *from_obj):
"""Set the FROM clause of this :class:`.Query` explicitly.
:meth:`.Query.select_from` is often used in conjunction with
:meth:`.Query.join` in order to control which entity is selected
from on the "left" side of the join.
The entity or selectable object here effectively replaces the
"left edge" of any calls to :meth:`~.Query.join`, when no
joinpoint is otherwise established - usually, the default "join
point" is the leftmost entity in the :class:`~.Query` object's
list of entities to be selected.
A typical example::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which produces SQL equivalent to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
:param \*from_obj: collection of one or more entities to apply
to the FROM clause. Entities can be mapped classes,
:class:`.AliasedClass` objects, :class:`.Mapper` objects
as well as core :class:`.FromClause` elements like subqueries.
.. versionchanged:: 0.9
This method no longer applies the given FROM object
to be the selectable from which matching entities
select from; the :meth:`.select_entity_from` method
now accomplishes this. See that method for a description
of this behavior.
.. seealso::
:meth:`~.Query.join`
:meth:`.Query.select_entity_from`
"""
self._set_select_from(from_obj, False)
@_generative(_no_clauseelement_condition)
def select_entity_from(self, from_obj):
"""Set the FROM clause of this :class:`.Query` to a
core selectable, applying it as a replacement FROM clause
for corresponding mapped entities.
This method is similar to the :meth:`.Query.select_from`
method, in that it sets the FROM clause of the query. However,
where :meth:`.Query.select_from` only affects what is placed
in the FROM, this method also applies the given selectable
to replace the FROM which the selected entities would normally
select from.
The given ``from_obj`` must be an instance of a :class:`.FromClause`,
e.g. a :func:`.select` or :class:`.Alias` construct.
An example would be a :class:`.Query` that selects ``User`` entities,
but uses :meth:`.Query.select_entity_from` to have the entities
selected from a :func:`.select` construct instead of the
base ``user`` table::
select_stmt = select([User]).where(User.id == 7)
q = session.query(User).\\
select_entity_from(select_stmt).\\
filter(User.name == 'ed')
The query generated will select ``User`` entities directly
from the given :func:`.select` construct, and will be::
SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name
FROM (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE anon_1.name = :name_1
Notice above that even the WHERE criterion was "adapted" such that
the ``anon_1`` subquery effectively replaces all references to the
``user`` table, except for the one that it refers to internally.
Compare this to :meth:`.Query.select_from`, which as of
version 0.9, does not affect existing entities. The
statement below::
q = session.query(User).\\
select_from(select_stmt).\\
filter(User.name == 'ed')
Produces SQL where both the ``user`` table as well as the
``select_stmt`` construct are present as separate elements
in the FROM clause. No "adaptation" of the ``user`` table
is applied::
SELECT "user".id AS user_id, "user".name AS user_name
FROM "user", (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE "user".name = :name_1
:meth:`.Query.select_entity_from` maintains an older
behavior of :meth:`.Query.select_from`. In modern usage,
similar results can also be achieved using :func:`.aliased`::
select_stmt = select([User]).where(User.id == 7)
user_from_select = aliased(User, select_stmt.alias())
q = session.query(user_from_select)
:param from_obj: a :class:`.FromClause` object that will replace
the FROM clause of this :class:`.Query`.
.. seealso::
:meth:`.Query.select_from`
.. versionadded:: 0.8
:meth:`.Query.select_entity_from` was added to specify
the specific behavior of entity replacement, however
the :meth:`.Query.select_from` maintains this behavior
as well until 0.9.
"""
self._set_select_from([from_obj], True)
def __getitem__(self, item):
if isinstance(item, slice):
start, stop, step = util.decode_slice(item)
if isinstance(stop, int) and \
isinstance(start, int) and \
stop - start <= 0:
return []
# perhaps we should execute a count() here so that we
# can still use LIMIT/OFFSET ?
elif (isinstance(start, int) and start < 0) \
or (isinstance(stop, int) and stop < 0):
return list(self)[item]
res = self.slice(start, stop)
if step is not None:
return list(res)[None:None:item.step]
else:
return list(res)
else:
if item == -1:
return list(self)[-1]
else:
return list(self[item:item + 1])[0]
@_generative(_no_statement_condition)
def slice(self, start, stop):
"""apply LIMIT/OFFSET to the ``Query`` based on a "
"range and return the newly resulting ``Query``."""
if start is not None and stop is not None:
self._offset = (self._offset or 0) + start
self._limit = stop - start
elif start is None and stop is not None:
self._limit = stop
elif start is not None and stop is None:
self._offset = (self._offset or 0) + start
if self._offset == 0:
self._offset = None
@_generative(_no_statement_condition)
def limit(self, limit):
"""Apply a ``LIMIT`` to the query and return the newly resulting
``Query``.
"""
self._limit = limit
@_generative(_no_statement_condition)
def offset(self, offset):
"""Apply an ``OFFSET`` to the query and return the newly resulting
``Query``.
"""
self._offset = offset
@_generative(_no_statement_condition)
def distinct(self, *criterion):
"""Apply a ``DISTINCT`` to the query and return the newly resulting
``Query``.
:param \*expr: optional column expressions. When present,
the Postgresql dialect will render a ``DISTINCT ON (<expressions>>)``
construct.
"""
if not criterion:
self._distinct = True
else:
criterion = self._adapt_col_list(criterion)
if isinstance(self._distinct, list):
self._distinct += criterion
else:
self._distinct = criterion
@_generative()
def prefix_with(self, *prefixes):
"""Apply the prefixes to the query and return the newly resulting
``Query``.
:param \*prefixes: optional prefixes, typically strings,
not using any commas. In particular is useful for MySQL keywords.
e.g.::
query = sess.query(User.name).\\
prefix_with('HIGH_PRIORITY').\\
prefix_with('SQL_SMALL_RESULT', 'ALL')
Would render::
SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL users.name AS users_name
FROM users
.. versionadded:: 0.7.7
"""
if self._prefixes:
self._prefixes += prefixes
else:
self._prefixes = prefixes
def all(self):
"""Return the results represented by this ``Query`` as a list.
This results in an execution of the underlying query.
"""
return list(self)
@_generative(_no_clauseelement_condition)
def from_statement(self, statement):
"""Execute the given SELECT statement and return results.
This method bypasses all internal statement compilation, and the
statement is executed without modification.
The statement is typically either a :func:`~.expression.text`
or :func:`~.expression.select` construct, and should return the set
of columns
appropriate to the entity class represented by this :class:`.Query`.
.. seealso::
:ref:`orm_tutorial_literal_sql` - usage examples in the
ORM tutorial
"""
statement = expression._expression_literal_as_text(statement)
if not isinstance(statement,
(expression.TextClause,
expression.SelectBase)):
raise sa_exc.ArgumentError(
"from_statement accepts text(), select(), "
"and union() objects only.")
self._statement = statement
def first(self):
"""Return the first result of this ``Query`` or
None if the result doesn't contain any row.
first() applies a limit of one within the generated SQL, so that
only one primary entity row is generated on the server side
(note this may consist of multiple result rows if join-loaded
collections are present).
Calling ``first()`` results in an execution of the underlying query.
"""
if self._statement is not None:
ret = list(self)[0:1]
else:
ret = list(self[0:1])
if len(ret) > 0:
return ret[0]
else:
return None
def one(self):
"""Return exactly one result or raise an exception.
Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects
no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
if multiple object identities are returned, or if multiple
rows are returned for a query that does not return object
identities.
Note that an entity query, that is, one which selects one or
more mapped classes as opposed to individual column attributes,
may ultimately represent many rows but only one row of
unique entity or entities - this is a successful result for one().
Calling ``one()`` results in an execution of the underlying query.
.. versionchanged:: 0.6
``one()`` fully fetches all results instead of applying
any kind of limit, so that the "unique"-ing of entities does not
conceal multiple object identities.
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
raise orm_exc.NoResultFound("No row was found for one()")
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one()")
def scalar(self):
"""Return the first element of the first result or None
if no rows present. If multiple rows are returned,
raises MultipleResultsFound.
>>> session.query(Item).scalar()
<Item>
>>> session.query(Item.id).scalar()
1
>>> session.query(Item.id).filter(Item.id < 0).scalar()
None
>>> session.query(Item.id, Item.name).scalar()
1
>>> session.query(func.count(Parent.id)).scalar()
20
This results in an execution of the underlying query.
"""
try:
ret = self.one()
if not isinstance(ret, tuple):
return ret
return ret[0]
except orm_exc.NoResultFound:
return None
def __iter__(self):
context = self._compile_context()
context.statement.use_labels = True
if self._autoflush and not self._populate_existing:
self.session._autoflush()
return self._execute_and_instances(context)
def _connection_from_session(self, **kw):
conn = self.session.connection(
**kw)
if self._execution_options:
conn = conn.execution_options(**self._execution_options)
return conn
def _execute_and_instances(self, querycontext):
conn = self._connection_from_session(
mapper=self._mapper_zero_or_none(),
clause=querycontext.statement,
close_with_result=True)
result = conn.execute(querycontext.statement, self._params)
return loading.instances(self, result, querycontext)
@property
def column_descriptions(self):
"""Return metadata about the columns which would be
returned by this :class:`.Query`.
Format is a list of dictionaries::
user_alias = aliased(User, name='user2')
q = sess.query(User, User.id, user_alias)
# this expression:
q.column_descriptions
# would return:
[
{
'name':'User',
'type':User,
'aliased':False,
'expr':User,
},
{
'name':'id',
'type':Integer(),
'aliased':False,
'expr':User.id,
},
{
'name':'user2',
'type':User,
'aliased':True,
'expr':user_alias
}
]
"""
return [
{
'name': ent._label_name,
'type': ent.type,
'aliased': getattr(ent, 'is_aliased_class', False),
'expr': ent.expr
}
for ent in self._entities
]
def instances(self, cursor, __context=None):
"""Given a ResultProxy cursor as returned by connection.execute(),
return an ORM result as an iterator.
e.g.::
result = engine.execute("select * from users")
for u in session.query(User).instances(result):
print u
"""
context = __context
if context is None:
context = QueryContext(self)
return loading.instances(self, cursor, context)
def merge_result(self, iterator, load=True):
"""Merge a result into this :class:`.Query` object's Session.
Given an iterator returned by a :class:`.Query` of the same structure
as this one, return an identical iterator of results, with all mapped
instances merged into the session using :meth:`.Session.merge`. This
is an optimized method which will merge all mapped instances,
preserving the structure of the result rows and unmapped columns with
less method overhead than that of calling :meth:`.Session.merge`
explicitly for each value.
The structure of the results is determined based on the column list of
this :class:`.Query` - if these do not correspond, unchecked errors
will occur.
The 'load' argument is the same as that of :meth:`.Session.merge`.
For an example of how :meth:`~.Query.merge_result` is used, see
the source code for the example :ref:`examples_caching`, where
:meth:`~.Query.merge_result` is used to efficiently restore state
from a cache back into a target :class:`.Session`.
"""
return loading.merge_result(self, iterator, load)
@property
def _select_args(self):
return {
'limit': self._limit,
'offset': self._offset,
'distinct': self._distinct,
'prefixes': self._prefixes,
'group_by': self._group_by or None,
'having': self._having
}
@property
def _should_nest_selectable(self):
kwargs = self._select_args
return (kwargs.get('limit') is not None or
kwargs.get('offset') is not None or
kwargs.get('distinct', False))
def exists(self):
"""A convenience method that turns a query into an EXISTS subquery
of the form EXISTS (SELECT 1 FROM ... WHERE ...).
e.g.::
q = session.query(User).filter(User.name == 'fred')
session.query(q.exists())
Producing SQL similar to::
SELECT EXISTS (
SELECT 1 FROM users WHERE users.name = :name_1
) AS anon_1
The EXISTS construct is usually used in the WHERE clause::
session.query(User.id).filter(q.exists()).scalar()
Note that some databases such as SQL Server don't allow an
EXISTS expression to be present in the columns clause of a
SELECT. To select a simple boolean value based on the exists
as a WHERE, use :func:`.literal`::
from sqlalchemy import literal
session.query(literal(True)).filter(q.exists()).scalar()
.. versionadded:: 0.8.1
"""
# .add_columns() for the case that we are a query().select_from(X),
# so that ".statement" can be produced (#2995) but also without
# omitting the FROM clause from a query(X) (#2818);
# .with_only_columns() after we have a core select() so that
# we get just "SELECT 1" without any entities.
return sql.exists(self.add_columns('1').with_labels().
statement.with_only_columns([1]))
def count(self):
"""Return a count of rows this Query would return.
This generates the SQL for this Query as follows::
SELECT count(1) AS count_1 FROM (
SELECT <rest of query follows...>
) AS anon_1
.. versionchanged:: 0.7
The above scheme is newly refined as of 0.7b3.
For fine grained control over specific columns
to count, to skip the usage of a subquery or
otherwise control of the FROM clause,
or to use other aggregate functions,
use :attr:`~sqlalchemy.sql.expression.func`
expressions in conjunction
with :meth:`~.Session.query`, i.e.::
from sqlalchemy import func
# count User records, without
# using a subquery.
session.query(func.count(User.id))
# return count of user "id" grouped
# by "name"
session.query(func.count(User.id)).\\
group_by(User.name)
from sqlalchemy import distinct
# count distinct "name" values
session.query(func.count(distinct(User.name)))
"""
col = sql.func.count(sql.literal_column('*'))
return self.from_self(col).scalar()
def delete(self, synchronize_session='evaluate'):
"""Perform a bulk delete query.
Deletes rows matched by this query from the database.
:param synchronize_session: chooses the strategy for the removal of
matched objects from the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, objects may still remain in
the session which were in fact deleted which can lead to confusing
results if they are accessed via get() or already loaded
collections.
``'fetch'`` - performs a select query before the delete to find
objects that are matched by the delete query and need to be
removed from the session. Matched objects are removed from the
session.
``'evaluate'`` - Evaluate the query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an error is raised. In that case you probably
want to use the 'fetch' strategy as a fallback.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
This method has several key caveats:
* The method does **not** offer in-Python cascading of relationships
- it is assumed that ON DELETE CASCADE/SET NULL/etc. is configured
for any foreign key references which require it, otherwise the
database may emit an integrity violation if foreign key references
are being enforced.
After the DELETE, dependent objects in the :class:`.Session` which
were impacted by an ON DELETE may not contain the current
state, or may have been deleted. This issue is resolved once the
:class:`.Session` is expired,
which normally occurs upon :meth:`.Session.commit` or can be forced
by using :meth:`.Session.expire_all`. Accessing an expired object
whose row has been deleted will invoke a SELECT to locate the
row; when the row is not found, an
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
* The :meth:`.MapperEvents.before_delete` and
:meth:`.MapperEvents.after_delete`
events are **not** invoked from this method. Instead, the
:meth:`.SessionEvents.after_bulk_delete` method is provided to act
upon a mass DELETE of entity rows.
.. seealso::
:meth:`.Query.update`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
# TODO: cascades need handling.
delete_op = persistence.BulkDelete.factory(
self, synchronize_session)
delete_op.exec_()
return delete_op.rowcount
def update(self, values, synchronize_session='evaluate'):
"""Perform a bulk update query.
Updates rows matched by this query in the database.
E.g.::
sess.query(User).filter(User.age == 25).\
update({User.age: User.age - 10}, synchronize_session='fetch')
sess.query(User).filter(User.age == 25).\
update({"age": User.age - 10}, synchronize_session='evaluate')
:param values: a dictionary with attributes names, or alternatively
mapped attributes or SQL expressions, as keys, and literal
values or sql expressions as values.
.. versionchanged:: 1.0.0 - string names in the values dictionary
are now resolved against the mapped entity; previously, these
strings were passed as literal column names with no mapper-level
translation.
:param synchronize_session: chooses the strategy to update the
attributes on objects in the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, updated objects may still
remain in the session with stale values on their attributes, which
can lead to confusing results.
``'fetch'`` - performs a select query before the update to find
objects that are matched by the update query. The updated
attributes are expired on matched objects.
``'evaluate'`` - Evaluate the Query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an exception is raised.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
This method has several key caveats:
* The method does **not** offer in-Python cascading of relationships
- it is assumed that ON UPDATE CASCADE is configured for any foreign
key references which require it, otherwise the database may emit an
integrity violation if foreign key references are being enforced.
After the UPDATE, dependent objects in the :class:`.Session` which
were impacted by an ON UPDATE CASCADE may not contain the current
state; this issue is resolved once the :class:`.Session` is expired,
which normally occurs upon :meth:`.Session.commit` or can be forced
by using :meth:`.Session.expire_all`.
* The method supports multiple table updates, as
detailed in :ref:`multi_table_updates`, and this behavior does
extend to support updates of joined-inheritance and other multiple
table mappings. However, the **join condition of an inheritance
mapper is currently not automatically rendered**.
Care must be taken in any multiple-table update to explicitly
include the joining condition between those tables, even in mappings
where this is normally automatic.
E.g. if a class ``Engineer`` subclasses ``Employee``, an UPDATE of
the ``Engineer`` local table using criteria against the ``Employee``
local table might look like::
session.query(Engineer).\\
filter(Engineer.id == Employee.id).\\
filter(Employee.name == 'dilbert').\\
update({"engineer_type": "programmer"})
* The :meth:`.MapperEvents.before_update` and
:meth:`.MapperEvents.after_update`
events are **not** invoked from this method. Instead, the
:meth:`.SessionEvents.after_bulk_update` method is provided to act
upon a mass UPDATE of entity rows.
.. seealso::
:meth:`.Query.delete`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
update_op = persistence.BulkUpdate.factory(
self, synchronize_session, values)
update_op.exec_()
return update_op.rowcount
def _compile_context(self, labels=True):
context = QueryContext(self)
if context.statement is not None:
return context
context.labels = labels
context._for_update_arg = self._for_update_arg
for entity in self._entities:
entity.setup_context(self, context)
for rec in context.create_eager_joins:
strategy = rec[0]
strategy(*rec[1:])
if context.from_clause:
# "load from explicit FROMs" mode,
# i.e. when select_from() or join() is used
context.froms = list(context.from_clause)
else:
# "load from discrete FROMs" mode,
# i.e. when each _MappedEntity has its own FROM
context.froms = context.froms
if self._enable_single_crit:
self._adjust_for_single_inheritance(context)
if not context.primary_columns:
if self._only_load_props:
raise sa_exc.InvalidRequestError(
"No column-based properties specified for "
"refresh operation. Use session.expire() "
"to reload collections and related items.")
else:
raise sa_exc.InvalidRequestError(
"Query contains no columns with which to "
"SELECT from.")
if context.multi_row_eager_loaders and self._should_nest_selectable:
context.statement = self._compound_eager_statement(context)
else:
context.statement = self._simple_statement(context)
return context
def _compound_eager_statement(self, context):
# for eager joins present and LIMIT/OFFSET/DISTINCT,
# wrap the query inside a select,
# then append eager joins onto that
if context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
else:
context.order_by = None
order_by_col_expr = []
inner = sql.select(
context.primary_columns + order_by_col_expr,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
# TODO: this order_by is only needed if
# LIMIT/OFFSET is present in self._select_args,
# else the application on the outside is enough
order_by=context.order_by,
**self._select_args
)
for hint in self._with_hints:
inner = inner.with_hint(*hint)
if self._correlate:
inner = inner.correlate(*self._correlate)
inner = inner.alias()
equivs = self.__all_equivs()
context.adapter = sql_util.ColumnAdapter(inner, equivs)
statement = sql.select(
[inner] + context.secondary_columns,
use_labels=context.labels)
statement._for_update_arg = context._for_update_arg
from_clause = inner
for eager_join in context.eager_joins.values():
# EagerLoader places a 'stop_on' attribute on the join,
# giving us a marker as to where the "splice point" of
# the join should be
from_clause = sql_util.splice_joins(
from_clause,
eager_join, eager_join.stop_on)
statement.append_from(from_clause)
if context.order_by:
statement.append_order_by(
*context.adapter.copy_and_process(
context.order_by
)
)
statement.append_order_by(*context.eager_order_by)
return statement
def _simple_statement(self, context):
if not context.order_by:
context.order_by = None
if self._distinct and context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
context.primary_columns += order_by_col_expr
context.froms += tuple(context.eager_joins.values())
statement = sql.select(
context.primary_columns +
context.secondary_columns,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
order_by=context.order_by,
**self._select_args
)
statement._for_update_arg = context._for_update_arg
for hint in self._with_hints:
statement = statement.with_hint(*hint)
if self._correlate:
statement = statement.correlate(*self._correlate)
if context.eager_order_by:
statement.append_order_by(*context.eager_order_by)
return statement
def _adjust_for_single_inheritance(self, context):
"""Apply single-table-inheritance filtering.
For all distinct single-table-inheritance mappers represented in
the columns clause of this query, add criterion to the WHERE
clause of the given QueryContext such that only the appropriate
subtypes are selected from the total results.
"""
for (ext_info, adapter) in set(self._mapper_adapter_map.values()):
if ext_info in self._join_entities:
continue
single_crit = ext_info.mapper._single_table_criterion
if single_crit is not None:
if adapter:
single_crit = adapter.traverse(single_crit)
single_crit = self._adapt_clause(single_crit, False, False)
context.whereclause = sql.and_(
sql.True_._ifnone(context.whereclause),
single_crit)
def __str__(self):
return str(self._compile_context().statement)
from ..sql.selectable import ForUpdateArg
class LockmodeArg(ForUpdateArg):
@classmethod
def parse_legacy_query(self, mode):
if mode in (None, False):
return None
if mode == "read":
read = True
nowait = False
elif mode == "update":
read = nowait = False
elif mode == "update_nowait":
nowait = True
read = False
else:
raise sa_exc.ArgumentError(
"Unknown with_lockmode argument: %r" % mode)
return LockmodeArg(read=read, nowait=nowait)
class _QueryEntity(object):
"""represent an entity column returned within a Query result."""
def __new__(cls, *args, **kwargs):
if cls is _QueryEntity:
entity = args[1]
if not isinstance(entity, util.string_types) and \
_is_mapped_class(entity):
cls = _MapperEntity
elif isinstance(entity, Bundle):
cls = _BundleEntity
else:
cls = _ColumnEntity
return object.__new__(cls)
def _clone(self):
q = self.__class__.__new__(self.__class__)
q.__dict__ = self.__dict__.copy()
return q
class _MapperEntity(_QueryEntity):
"""mapper/class/AliasedClass entity"""
def __init__(self, query, entity):
if not query._primary_entity:
query._primary_entity = self
query._entities.append(self)
self.entities = [entity]
self.expr = entity
supports_single_entity = True
def setup_entity(self, ext_info, aliased_adapter):
self.mapper = ext_info.mapper
self.aliased_adapter = aliased_adapter
self.selectable = ext_info.selectable
self.is_aliased_class = ext_info.is_aliased_class
self._with_polymorphic = ext_info.with_polymorphic_mappers
self._polymorphic_discriminator = \
ext_info.polymorphic_on
self.entity_zero = ext_info
if ext_info.is_aliased_class:
self._label_name = self.entity_zero.name
else:
self._label_name = self.mapper.class_.__name__
self.path = self.entity_zero._path_registry
def set_with_polymorphic(self, query, cls_or_mappers,
selectable, polymorphic_on):
"""Receive an update from a call to query.with_polymorphic().
Note the newer style of using a free standing with_polymporphic()
construct doesn't make use of this method.
"""
if self.is_aliased_class:
# TODO: invalidrequest ?
raise NotImplementedError(
"Can't use with_polymorphic() against "
"an Aliased object"
)
if cls_or_mappers is None:
query._reset_polymorphic_adapter(self.mapper)
return
mappers, from_obj = self.mapper._with_polymorphic_args(
cls_or_mappers, selectable)
self._with_polymorphic = mappers
self._polymorphic_discriminator = polymorphic_on
self.selectable = from_obj
query._mapper_loads_polymorphically_with(
self.mapper, sql_util.ColumnAdapter(
from_obj, self.mapper._equivalent_columns))
filter_fn = id
@property
def type(self):
return self.mapper.class_
@property
def entity_zero_or_selectable(self):
return self.entity_zero
def corresponds_to(self, entity):
if entity.is_aliased_class:
if self.is_aliased_class:
if entity._base_alias is self.entity_zero._base_alias:
return True
return False
elif self.is_aliased_class:
if self.entity_zero._use_mapper_path:
return entity in self._with_polymorphic
else:
return entity is self.entity_zero
return entity.common_parent(self.entity_zero)
def adapt_to_selectable(self, query, sel):
query._entities.append(self)
def _get_entity_clauses(self, query, context):
adapter = None
if not self.is_aliased_class:
if query._polymorphic_adapters:
adapter = query._polymorphic_adapters.get(self.mapper, None)
else:
adapter = self.aliased_adapter
if adapter:
if query._from_obj_alias:
ret = adapter.wrap(query._from_obj_alias)
else:
ret = adapter
else:
ret = query._from_obj_alias
return ret
def row_processor(self, query, context, result):
adapter = self._get_entity_clauses(query, context)
if context.adapter and adapter:
adapter = adapter.wrap(context.adapter)
elif not adapter:
adapter = context.adapter
# polymorphic mappers which have concrete tables in
# their hierarchy usually
# require row aliasing unconditionally.
if not adapter and self.mapper._requires_row_aliasing:
adapter = sql_util.ColumnAdapter(
self.selectable,
self.mapper._equivalent_columns)
if query._primary_entity is self:
_instance = loading.instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
only_load_props=query._only_load_props,
refresh_state=context.refresh_state,
polymorphic_discriminator=self._polymorphic_discriminator
)
else:
_instance = loading.instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
polymorphic_discriminator=self._polymorphic_discriminator
)
return _instance, self._label_name
def setup_context(self, query, context):
adapter = self._get_entity_clauses(query, context)
# if self._adapted_selectable is None:
context.froms += (self.selectable,)
if context.order_by is False and self.mapper.order_by:
context.order_by = self.mapper.order_by
# apply adaptation to the mapper's order_by if needed.
if adapter:
context.order_by = adapter.adapt_list(
util.to_list(
context.order_by
)
)
if self._with_polymorphic:
poly_properties = self.mapper._iterate_polymorphic_properties(
self._with_polymorphic)
else:
poly_properties = self.mapper._polymorphic_properties
for value in poly_properties:
if query._only_load_props and \
value.key not in query._only_load_props:
continue
value.setup(
context,
self,
self.path,
adapter,
only_load_props=query._only_load_props,
column_collection=context.primary_columns
)
if self._polymorphic_discriminator is not None and \
self._polymorphic_discriminator \
is not self.mapper.polymorphic_on:
if adapter:
pd = adapter.columns[self._polymorphic_discriminator]
else:
pd = self._polymorphic_discriminator
context.primary_columns.append(pd)
def __str__(self):
return str(self.mapper)
@inspection._self_inspects
class Bundle(object):
"""A grouping of SQL expressions that are returned by a :class:`.Query`
under one namespace.
The :class:`.Bundle` essentially allows nesting of the tuple-based
results returned by a column-oriented :class:`.Query` object. It also
is extensible via simple subclassing, where the primary capability
to override is that of how the set of expressions should be returned,
allowing post-processing as well as custom return types, without
involving ORM identity-mapped classes.
.. versionadded:: 0.9.0
.. seealso::
:ref:`bundles`
"""
single_entity = False
"""If True, queries for a single Bundle will be returned as a single
entity, rather than an element within a keyed tuple."""
def __init__(self, name, *exprs, **kw):
"""Construct a new :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
for row in session.query(bn).filter(
bn.c.x == 5).filter(bn.c.y == 4):
print(row.mybundle.x, row.mybundle.y)
:param name: name of the bundle.
:param \*exprs: columns or SQL expressions comprising the bundle.
:param single_entity=False: if True, rows for this :class:`.Bundle`
can be returned as a "single entity" outside of any enclosing tuple
in the same manner as a mapped entity.
"""
self.name = self._label = name
self.exprs = exprs
self.c = self.columns = ColumnCollection()
self.columns.update((getattr(col, "key", col._label), col)
for col in exprs)
self.single_entity = kw.pop('single_entity', self.single_entity)
columns = None
"""A namespace of SQL expressions referred to by this :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
q = sess.query(bn).filter(bn.c.x == 5)
Nesting of bundles is also supported::
b1 = Bundle("b1",
Bundle('b2', MyClass.a, MyClass.b),
Bundle('b3', MyClass.x, MyClass.y)
)
q = sess.query(b1).filter(
b1.c.b2.c.a == 5).filter(b1.c.b3.c.y == 9)
.. seealso::
:attr:`.Bundle.c`
"""
c = None
"""An alias for :attr:`.Bundle.columns`."""
def _clone(self):
cloned = self.__class__.__new__(self.__class__)
cloned.__dict__.update(self.__dict__)
return cloned
def __clause_element__(self):
return expression.ClauseList(group=False, *self.c)
@property
def clauses(self):
return self.__clause_element__().clauses
def label(self, name):
"""Provide a copy of this :class:`.Bundle` passing a new label."""
cloned = self._clone()
cloned.name = name
return cloned
def create_row_processor(self, query, procs, labels):
"""Produce the "row processing" function for this :class:`.Bundle`.
May be overridden by subclasses.
.. seealso::
:ref:`bundles` - includes an example of subclassing.
"""
keyed_tuple = util.lightweight_named_tuple('result', labels)
def proc(row):
return keyed_tuple([proc(row) for proc in procs])
return proc
class _BundleEntity(_QueryEntity):
def __init__(self, query, bundle, setup_entities=True):
query._entities.append(self)
self.bundle = self.expr = bundle
self.type = type(bundle)
self._label_name = bundle.name
self._entities = []
if setup_entities:
for expr in bundle.exprs:
if isinstance(expr, Bundle):
_BundleEntity(self, expr)
else:
_ColumnEntity(self, expr, namespace=self)
self.entities = ()
self.filter_fn = lambda item: item
self.supports_single_entity = self.bundle.single_entity
@property
def entity_zero(self):
for ent in self._entities:
ezero = ent.entity_zero
if ezero is not None:
return ezero
else:
return None
def corresponds_to(self, entity):
# TODO: this seems to have no effect for
# _ColumnEntity either
return False
@property
def entity_zero_or_selectable(self):
for ent in self._entities:
ezero = ent.entity_zero_or_selectable
if ezero is not None:
return ezero
else:
return None
def adapt_to_selectable(self, query, sel):
c = _BundleEntity(query, self.bundle, setup_entities=False)
# c._label_name = self._label_name
# c.entity_zero = self.entity_zero
# c.entities = self.entities
for ent in self._entities:
ent.adapt_to_selectable(c, sel)
def setup_entity(self, ext_info, aliased_adapter):
for ent in self._entities:
ent.setup_entity(ext_info, aliased_adapter)
def setup_context(self, query, context):
for ent in self._entities:
ent.setup_context(query, context)
def row_processor(self, query, context, result):
procs, labels = zip(
*[ent.row_processor(query, context, result)
for ent in self._entities]
)
proc = self.bundle.create_row_processor(query, procs, labels)
return proc, self._label_name
class _ColumnEntity(_QueryEntity):
"""Column/expression based entity."""
def __init__(self, query, column, namespace=None):
self.expr = column
self.namespace = namespace
if isinstance(column, util.string_types):
column = sql.literal_column(column)
self._label_name = column.name
elif isinstance(column, (
attributes.QueryableAttribute,
interfaces.PropComparator
)):
self._label_name = column.key
column = column._query_clause_element()
else:
self._label_name = getattr(column, 'key', None)
if not isinstance(column, expression.ColumnElement) and \
hasattr(column, '_select_iterable'):
for c in column._select_iterable:
if c is column:
break
_ColumnEntity(query, c, namespace=column)
else:
return
elif isinstance(column, Bundle):
_BundleEntity(query, column)
return
if not isinstance(column, sql.ColumnElement):
raise sa_exc.InvalidRequestError(
"SQL expression, column, or mapped entity "
"expected - got '%r'" % (column, )
)
self.type = type_ = column.type
if type_.hashable:
self.filter_fn = lambda item: item
else:
counter = util.counter()
self.filter_fn = lambda item: counter()
# If the Column is unnamed, give it a
# label() so that mutable column expressions
# can be located in the result even
# if the expression's identity has been changed
# due to adaption.
if not column._label and not getattr(column, 'is_literal', False):
column = column.label(self._label_name)
query._entities.append(self)
self.column = column
self.froms = set()
# look for ORM entities represented within the
# given expression. Try to count only entities
# for columns whose FROM object is in the actual list
# of FROMs for the overall expression - this helps
# subqueries which were built from ORM constructs from
# leaking out their entities into the main select construct
self.actual_froms = actual_froms = set(column._from_objects)
self.entities = util.OrderedSet(
elem._annotations['parententity']
for elem in visitors.iterate(column, {})
if 'parententity' in elem._annotations
and actual_froms.intersection(elem._from_objects)
)
if self.entities:
self.entity_zero = list(self.entities)[0]
elif self.namespace is not None:
self.entity_zero = self.namespace
else:
self.entity_zero = None
supports_single_entity = False
@property
def entity_zero_or_selectable(self):
if self.entity_zero is not None:
return self.entity_zero
elif self.actual_froms:
return list(self.actual_froms)[0]
else:
return None
def adapt_to_selectable(self, query, sel):
c = _ColumnEntity(query, sel.corresponding_column(self.column))
c._label_name = self._label_name
c.entity_zero = self.entity_zero
c.entities = self.entities
def setup_entity(self, ext_info, aliased_adapter):
if 'selectable' not in self.__dict__:
self.selectable = ext_info.selectable
self.froms.add(ext_info.selectable)
def corresponds_to(self, entity):
# TODO: just returning False here,
# no tests fail
if self.entity_zero is None:
return False
elif _is_aliased_class(entity):
# TODO: polymorphic subclasses ?
return entity is self.entity_zero
else:
return not _is_aliased_class(self.entity_zero) and \
entity.common_parent(self.entity_zero)
def _resolve_expr_against_query_aliases(self, query, expr, context):
return query._adapt_clause(expr, False, True)
def row_processor(self, query, context, result):
column = self._resolve_expr_against_query_aliases(
query, self.column, context)
if context.adapter:
column = context.adapter.columns[column]
getter = result._getter(column)
return getter, self._label_name
def setup_context(self, query, context):
column = self._resolve_expr_against_query_aliases(
query, self.column, context)
context.froms += tuple(self.froms)
context.primary_columns.append(column)
def __str__(self):
return str(self.column)
class QueryContext(object):
multi_row_eager_loaders = False
adapter = None
froms = ()
for_update = None
def __init__(self, query):
if query._statement is not None:
if isinstance(query._statement, expression.SelectBase) and \
not query._statement._textual and \
not query._statement.use_labels:
self.statement = query._statement.apply_labels()
else:
self.statement = query._statement
else:
self.statement = None
self.from_clause = query._from_obj
self.whereclause = query._criterion
self.order_by = query._order_by
self.query = query
self.session = query.session
self.populate_existing = query._populate_existing
self.invoke_all_eagers = query._invoke_all_eagers
self.version_check = query._version_check
self.refresh_state = query._refresh_state
self.primary_columns = []
self.secondary_columns = []
self.eager_order_by = []
self.eager_joins = {}
self.create_eager_joins = []
self.propagate_options = set(o for o in query._with_options if
o.propagate_to_loaders)
self.attributes = query._attributes.copy()
class AliasOption(interfaces.MapperOption):
def __init__(self, alias):
"""Return a :class:`.MapperOption` that will indicate to the :class:`.Query`
that the main table has been aliased.
This is a seldom-used option to suit the
very rare case that :func:`.contains_eager`
is being used in conjunction with a user-defined SELECT
statement that aliases the parent table. E.g.::
# define an aliased UNION called 'ulist'
ulist = users.select(users.c.user_id==7).\\
union(users.select(users.c.user_id>7)).\\
alias('ulist')
# add on an eager load of "addresses"
statement = ulist.outerjoin(addresses).\\
select().apply_labels()
# create query, indicating "ulist" will be an
# alias for the main table, "addresses"
# property should be eager loaded
query = session.query(User).options(
contains_alias(ulist),
contains_eager(User.addresses))
# then get results via the statement
results = query.from_statement(statement).all()
:param alias: is the string name of an alias, or a
:class:`~.sql.expression.Alias` object representing
the alias.
"""
self.alias = alias
def process_query(self, query):
if isinstance(self.alias, util.string_types):
alias = query._mapper_zero().mapped_table.alias(self.alias)
else:
alias = self.alias
query._from_obj_alias = sql_util.ColumnAdapter(alias)
|
@_generative()
def params(self, *args, **kwargs):
"""add values for bind parameters which may have been
specified in filter().
parameters may be specified using \**kwargs, or optionally a single
dictionary as the first positional argument. The reason for both is
that \**kwargs is convenient, however some parameter dictionaries
contain unicode keys in which case \**kwargs cannot be used.
"""
if len(args) == 1:
kwargs.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary.")
self._params = self._params.copy()
self._params.update(kwargs)
| 1,260 | 1,278 |
# orm/query.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The Query class and support.
Defines the :class:`.Query` class, the central
construct used by the ORM to construct database queries.
The :class:`.Query` class should not be confused with the
:class:`.Select` class, which defines database
SELECT operations at the SQL (non-ORM) level. ``Query`` differs from
``Select`` in that it returns ORM-mapped objects and interacts with an
ORM session, whereas the ``Select`` construct interacts directly with the
database to return iterable result sets.
"""
from itertools import chain
from . import (
attributes, interfaces, object_mapper, persistence,
exc as orm_exc, loading
)
from .base import _entity_descriptor, _is_aliased_class, \
_is_mapped_class, _orm_columns, _generative
from .path_registry import PathRegistry
from .util import (
AliasedClass, ORMAdapter, join as orm_join, with_parent, aliased
)
from .. import sql, util, log, exc as sa_exc, inspect, inspection
from ..sql.expression import _interpret_as_from
from ..sql import (
util as sql_util,
expression, visitors
)
from ..sql.base import ColumnCollection
from . import properties
__all__ = ['Query', 'QueryContext', 'aliased']
_path_registry = PathRegistry.root
@inspection._self_inspects
@log.class_logger
class Query(object):
"""ORM-level SQL construction object.
:class:`.Query` is the source of all SELECT statements generated by the
ORM, both those formulated by end-user query operations as well as by
high level internal operations such as related collection loading. It
features a generative interface whereby successive calls return a new
:class:`.Query` object, a copy of the former with additional
criteria and options associated with it.
:class:`.Query` objects are normally initially generated using the
:meth:`~.Session.query` method of :class:`.Session`. For a full
walkthrough of :class:`.Query` usage, see the
:ref:`ormtutorial_toplevel`.
"""
_enable_eagerloads = True
_enable_assertions = True
_with_labels = False
_criterion = None
_yield_per = None
_order_by = False
_group_by = False
_having = None
_distinct = False
_prefixes = None
_offset = None
_limit = None
_for_update_arg = None
_statement = None
_correlate = frozenset()
_populate_existing = False
_invoke_all_eagers = True
_version_check = False
_autoflush = True
_only_load_props = None
_refresh_state = None
_from_obj = ()
_join_entities = ()
_select_from_entity = None
_mapper_adapter_map = {}
_filter_aliases = None
_from_obj_alias = None
_joinpath = _joinpoint = util.immutabledict()
_execution_options = util.immutabledict()
_params = util.immutabledict()
_attributes = util.immutabledict()
_with_options = ()
_with_hints = ()
_enable_single_crit = True
_current_path = _path_registry
def __init__(self, entities, session=None):
self.session = session
self._polymorphic_adapters = {}
self._set_entities(entities)
def _set_entities(self, entities, entity_wrapper=None):
if entity_wrapper is None:
entity_wrapper = _QueryEntity
self._entities = []
self._primary_entity = None
for ent in util.to_list(entities):
entity_wrapper(self, ent)
self._set_entity_selectables(self._entities)
def _set_entity_selectables(self, entities):
self._mapper_adapter_map = d = self._mapper_adapter_map.copy()
for ent in entities:
for entity in ent.entities:
if entity not in d:
ext_info = inspect(entity)
if not ext_info.is_aliased_class and \
ext_info.mapper.with_polymorphic:
if ext_info.mapper.mapped_table not in \
self._polymorphic_adapters:
self._mapper_loads_polymorphically_with(
ext_info.mapper,
sql_util.ColumnAdapter(
ext_info.selectable,
ext_info.mapper._equivalent_columns
)
)
aliased_adapter = None
elif ext_info.is_aliased_class:
aliased_adapter = ext_info._adapter
else:
aliased_adapter = None
d[entity] = (
ext_info,
aliased_adapter
)
ent.setup_entity(*d[entity])
def _mapper_loads_polymorphically_with(self, mapper, adapter):
for m2 in mapper._with_polymorphic_mappers or [mapper]:
self._polymorphic_adapters[m2] = adapter
for m in m2.iterate_to_root():
self._polymorphic_adapters[m.local_table] = adapter
def _set_select_from(self, obj, set_base_alias):
fa = []
select_from_alias = None
for from_obj in obj:
info = inspect(from_obj)
if hasattr(info, 'mapper') and \
(info.is_mapper or info.is_aliased_class):
self._select_from_entity = from_obj
if set_base_alias:
raise sa_exc.ArgumentError(
"A selectable (FromClause) instance is "
"expected when the base alias is being set.")
fa.append(info.selectable)
elif not info.is_selectable:
raise sa_exc.ArgumentError(
"argument is not a mapped class, mapper, "
"aliased(), or FromClause instance.")
else:
if isinstance(from_obj, expression.SelectBase):
from_obj = from_obj.alias()
if set_base_alias:
select_from_alias = from_obj
fa.append(from_obj)
self._from_obj = tuple(fa)
if set_base_alias and \
len(self._from_obj) == 1 and \
isinstance(select_from_alias, expression.Alias):
equivs = self.__all_equivs()
self._from_obj_alias = sql_util.ColumnAdapter(
self._from_obj[0], equivs)
def _reset_polymorphic_adapter(self, mapper):
for m2 in mapper._with_polymorphic_mappers:
self._polymorphic_adapters.pop(m2, None)
for m in m2.iterate_to_root():
self._polymorphic_adapters.pop(m.local_table, None)
def _adapt_polymorphic_element(self, element):
if "parententity" in element._annotations:
search = element._annotations['parententity']
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
if isinstance(element, expression.FromClause):
search = element
elif hasattr(element, 'table'):
search = element.table
else:
return None
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
def _adapt_col_list(self, cols):
return [
self._adapt_clause(
expression._literal_as_label_reference(o),
True, True)
for o in cols
]
@_generative()
def _adapt_all_clauses(self):
self._orm_only_adapt = False
def _adapt_clause(self, clause, as_filter, orm_only):
"""Adapt incoming clauses to transformations which
have been applied within this query."""
adapters = []
# do we adapt all expression elements or only those
# tagged as 'ORM' constructs ?
orm_only = getattr(self, '_orm_only_adapt', orm_only)
if as_filter and self._filter_aliases:
for fa in self._filter_aliases._visitor_iterator:
adapters.append(
(
orm_only, fa.replace
)
)
if self._from_obj_alias:
# for the "from obj" alias, apply extra rule to the
# 'ORM only' check, if this query were generated from a
# subquery of itself, i.e. _from_selectable(), apply adaption
# to all SQL constructs.
adapters.append(
(
getattr(self, '_orm_only_from_obj_alias', orm_only),
self._from_obj_alias.replace
)
)
if self._polymorphic_adapters:
adapters.append(
(
orm_only, self._adapt_polymorphic_element
)
)
if not adapters:
return clause
def replace(elem):
for _orm_only, adapter in adapters:
# if 'orm only', look for ORM annotations
# in the element before adapting.
if not _orm_only or \
'_orm_adapt' in elem._annotations or \
"parententity" in elem._annotations:
e = adapter(elem)
if e is not None:
return e
return visitors.replacement_traverse(
clause,
{},
replace
)
def _entity_zero(self):
return self._entities[0]
def _mapper_zero(self):
return self._select_from_entity or \
self._entity_zero().entity_zero
@property
def _mapper_entities(self):
for ent in self._entities:
if isinstance(ent, _MapperEntity):
yield ent
def _joinpoint_zero(self):
return self._joinpoint.get(
'_joinpoint_entity',
self._mapper_zero()
)
def _mapper_zero_or_none(self):
if self._primary_entity:
return self._primary_entity.mapper
else:
return None
def _only_mapper_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._mapper_zero()
def _only_full_mapper_zero(self, methname):
if self._entities != [self._primary_entity]:
raise sa_exc.InvalidRequestError(
"%s() can only be used against "
"a single mapped class." % methname)
return self._primary_entity.entity_zero
def _only_entity_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._entity_zero()
def __all_equivs(self):
equivs = {}
for ent in self._mapper_entities:
equivs.update(ent.mapper._equivalent_columns)
return equivs
def _get_condition(self):
return self._no_criterion_condition(
"get", order_by=False, distinct=False)
def _get_existing_condition(self):
self._no_criterion_assertion("get", order_by=False, distinct=False)
def _no_criterion_assertion(self, meth, order_by=True, distinct=True):
if not self._enable_assertions:
return
if self._criterion is not None or \
self._statement is not None or self._from_obj or \
self._limit is not None or self._offset is not None or \
self._group_by or (order_by and self._order_by) or \
(distinct and self._distinct):
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
def _no_criterion_condition(self, meth, order_by=True, distinct=True):
self._no_criterion_assertion(meth, order_by, distinct)
self._from_obj = ()
self._statement = self._criterion = None
self._order_by = self._group_by = self._distinct = False
def _no_clauseelement_condition(self, meth):
if not self._enable_assertions:
return
if self._order_by:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
self._no_criterion_condition(meth)
def _no_statement_condition(self, meth):
if not self._enable_assertions:
return
if self._statement is not None:
raise sa_exc.InvalidRequestError(
("Query.%s() being called on a Query with an existing full "
"statement - can't apply criterion.") % meth)
def _no_limit_offset(self, meth):
if not self._enable_assertions:
return
if self._limit is not None or self._offset is not None:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a Query which already has LIMIT "
"or OFFSET applied. To modify the row-limited results of a "
" Query, call from_self() first. "
"Otherwise, call %s() before limit() or offset() "
"are applied."
% (meth, meth)
)
def _no_select_modifiers(self, meth):
if not self._enable_assertions:
return
for attr, methname, notset in (
('_limit', 'limit()', None),
('_offset', 'offset()', None),
('_order_by', 'order_by()', False),
('_group_by', 'group_by()', False),
('_distinct', 'distinct()', False),
):
if getattr(self, attr) is not notset:
raise sa_exc.InvalidRequestError(
"Can't call Query.%s() when %s has been called" %
(meth, methname)
)
def _get_options(self, populate_existing=None,
version_check=None,
only_load_props=None,
refresh_state=None):
if populate_existing:
self._populate_existing = populate_existing
if version_check:
self._version_check = version_check
if refresh_state:
self._refresh_state = refresh_state
if only_load_props:
self._only_load_props = set(only_load_props)
return self
def _clone(self):
cls = self.__class__
q = cls.__new__(cls)
q.__dict__ = self.__dict__.copy()
return q
@property
def statement(self):
"""The full SELECT statement represented by this Query.
The statement by default will not have disambiguating labels
applied to the construct unless with_labels(True) is called
first.
"""
stmt = self._compile_context(labels=self._with_labels).\
statement
if self._params:
stmt = stmt.params(self._params)
# TODO: there's no tests covering effects of
# the annotation not being there
return stmt._annotate({'no_replacement_traverse': True})
def subquery(self, name=None, with_labels=False, reduce_columns=False):
"""return the full SELECT statement represented by
this :class:`.Query`, embedded within an :class:`.Alias`.
Eager JOIN generation within the query is disabled.
:param name: string name to be assigned as the alias;
this is passed through to :meth:`.FromClause.alias`.
If ``None``, a name will be deterministically generated
at compile time.
:param with_labels: if True, :meth:`.with_labels` will be called
on the :class:`.Query` first to apply table-qualified labels
to all columns.
:param reduce_columns: if True, :meth:`.Select.reduce_columns` will
be called on the resulting :func:`.select` construct,
to remove same-named columns where one also refers to the other
via foreign key or WHERE clause equivalence.
.. versionchanged:: 0.8 the ``with_labels`` and ``reduce_columns``
keyword arguments were added.
"""
q = self.enable_eagerloads(False)
if with_labels:
q = q.with_labels()
q = q.statement
if reduce_columns:
q = q.reduce_columns()
return q.alias(name=name)
def cte(self, name=None, recursive=False):
"""Return the full SELECT statement represented by this
:class:`.Query` represented as a common table expression (CTE).
.. versionadded:: 0.7.6
Parameters and usage are the same as those of the
:meth:`.SelectBase.cte` method; see that method for
further details.
Here is the `Postgresql WITH
RECURSIVE example
<http://www.postgresql.org/docs/8.4/static/queries-with.html>`_.
Note that, in this example, the ``included_parts`` cte and the
``incl_alias`` alias of it are Core selectables, which
means the columns are accessed via the ``.c.`` attribute. The
``parts_alias`` object is an :func:`.orm.aliased` instance of the
``Part`` entity, so column-mapped attributes are available
directly::
from sqlalchemy.orm import aliased
class Part(Base):
__tablename__ = 'part'
part = Column(String, primary_key=True)
sub_part = Column(String, primary_key=True)
quantity = Column(Integer)
included_parts = session.query(
Part.sub_part,
Part.part,
Part.quantity).\\
filter(Part.part=="our part").\\
cte(name="included_parts", recursive=True)
incl_alias = aliased(included_parts, name="pr")
parts_alias = aliased(Part, name="p")
included_parts = included_parts.union_all(
session.query(
parts_alias.sub_part,
parts_alias.part,
parts_alias.quantity).\\
filter(parts_alias.part==incl_alias.c.sub_part)
)
q = session.query(
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).
label('total_quantity')
).\\
group_by(included_parts.c.sub_part)
.. seealso::
:meth:`.SelectBase.cte`
"""
return self.enable_eagerloads(False).\
statement.cte(name=name, recursive=recursive)
def label(self, name):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted
to a scalar subquery with a label of the given name.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.label`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.label(name)
def as_scalar(self):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted to a scalar subquery.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.as_scalar`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.as_scalar()
@property
def selectable(self):
"""Return the :class:`.Select` object emitted by this :class:`.Query`.
Used for :func:`.inspect` compatibility, this is equivalent to::
query.enable_eagerloads(False).with_labels().statement
"""
return self.__clause_element__()
def __clause_element__(self):
return self.enable_eagerloads(False).with_labels().statement
@_generative()
def enable_eagerloads(self, value):
"""Control whether or not eager joins and subqueries are
rendered.
When set to False, the returned Query will not render
eager joins regardless of :func:`~sqlalchemy.orm.joinedload`,
:func:`~sqlalchemy.orm.subqueryload` options
or mapper-level ``lazy='joined'``/``lazy='subquery'``
configurations.
This is used primarily when nesting the Query's
statement into a subquery or other
selectable, or when using :meth:`.Query.yield_per`.
"""
self._enable_eagerloads = value
def _no_yield_per(self, message):
raise sa_exc.InvalidRequestError(
"The yield_per Query option is currently not "
"compatible with %s eager loading. Please "
"specify lazyload('*') or query.enable_eagerloads(False) in "
"order to "
"proceed with query.yield_per()." % message)
@_generative()
def with_labels(self):
"""Apply column labels to the return value of Query.statement.
Indicates that this Query's `statement` accessor should return
a SELECT statement that applies labels to all columns in the
form <tablename>_<columnname>; this is commonly used to
disambiguate columns from multiple tables which have the same
name.
When the `Query` actually issues SQL to load rows, it always
uses column labeling.
"""
self._with_labels = True
@_generative()
def enable_assertions(self, value):
"""Control whether assertions are generated.
When set to False, the returned Query will
not assert its state before certain operations,
including that LIMIT/OFFSET has not been applied
when filter() is called, no criterion exists
when get() is called, and no "from_statement()"
exists when filter()/order_by()/group_by() etc.
is called. This more permissive mode is used by
custom Query subclasses to specify criterion or
other modifiers outside of the usual usage patterns.
Care should be taken to ensure that the usage
pattern is even possible. A statement applied
by from_statement() will override any criterion
set by filter() or order_by(), for example.
"""
self._enable_assertions = value
@property
def whereclause(self):
"""A readonly attribute which returns the current WHERE criterion for
this Query.
This returned value is a SQL expression construct, or ``None`` if no
criterion has been established.
"""
return self._criterion
@_generative()
def _with_current_path(self, path):
"""indicate that this query applies to objects loaded
within a certain path.
Used by deferred loaders (see strategies.py) which transfer
query options from an originating query to a newly generated
query intended for the deferred load.
"""
self._current_path = path
@_generative(_no_clauseelement_condition)
def with_polymorphic(self,
cls_or_mappers,
selectable=None,
polymorphic_on=None):
"""Load columns for inheriting classes.
:meth:`.Query.with_polymorphic` applies transformations
to the "main" mapped class represented by this :class:`.Query`.
The "main" mapped class here means the :class:`.Query`
object's first argument is a full class, i.e.
``session.query(SomeClass)``. These transformations allow additional
tables to be present in the FROM clause so that columns for a
joined-inheritance subclass are available in the query, both for the
purposes of load-time efficiency as well as the ability to use
these columns at query time.
See the documentation section :ref:`with_polymorphic` for
details on how this method is used.
.. versionchanged:: 0.8
A new and more flexible function
:func:`.orm.with_polymorphic` supersedes
:meth:`.Query.with_polymorphic`, as it can apply the equivalent
functionality to any set of columns or classes in the
:class:`.Query`, not just the "zero mapper". See that
function for a description of arguments.
"""
if not self._primary_entity:
raise sa_exc.InvalidRequestError(
"No primary mapper set up for this Query.")
entity = self._entities[0]._clone()
self._entities = [entity] + self._entities[1:]
entity.set_with_polymorphic(self,
cls_or_mappers,
selectable=selectable,
polymorphic_on=polymorphic_on)
@_generative()
def yield_per(self, count):
"""Yield only ``count`` rows at a time.
The purpose of this method is when fetching very large result sets
(> 10K rows), to batch results in sub-collections and yield them
out partially, so that the Python interpreter doesn't need to declare
very large areas of memory which is both time consuming and leads
to excessive memory use. The performance from fetching hundreds of
thousands of rows can often double when a suitable yield-per setting
(e.g. approximately 1000) is used, even with DBAPIs that buffer
rows (which are most).
The :meth:`.Query.yield_per` method **is not compatible with most
eager loading schemes, including subqueryload and joinedload with
collections**. For this reason, it may be helpful to disable
eager loads, either unconditionally with
:meth:`.Query.enable_eagerloads`::
q = sess.query(Object).yield_per(100).enable_eagerloads(False)
Or more selectively using :func:`.lazyload`; such as with
an asterisk to specify the default loader scheme::
q = sess.query(Object).yield_per(100).\\
options(lazyload('*'), joinedload(Object.some_related))
.. warning::
Use this method with caution; if the same instance is
present in more than one batch of rows, end-user changes
to attributes will be overwritten.
In particular, it's usually impossible to use this setting
with eagerly loaded collections (i.e. any lazy='joined' or
'subquery') since those collections will be cleared for a
new load when encountered in a subsequent result batch.
In the case of 'subquery' loading, the full result for all
rows is fetched which generally defeats the purpose of
:meth:`~sqlalchemy.orm.query.Query.yield_per`.
Also note that while
:meth:`~sqlalchemy.orm.query.Query.yield_per` will set the
``stream_results`` execution option to True, currently
this is only understood by
:mod:`~sqlalchemy.dialects.postgresql.psycopg2` dialect
which will stream results using server side cursors
instead of pre-buffer all rows for this query. Other
DBAPIs **pre-buffer all rows** before making them
available. The memory use of raw database rows is much less
than that of an ORM-mapped object, but should still be taken into
consideration when benchmarking.
.. seealso::
:meth:`.Query.enable_eagerloads`
"""
self._yield_per = count
self._execution_options = self._execution_options.union(
{"stream_results": True})
def get(self, ident):
"""Return an instance based on the given primary key identifier,
or ``None`` if not found.
E.g.::
my_user = session.query(User).get(5)
some_object = session.query(VersionedFoo).get((5, 10))
:meth:`~.Query.get` is special in that it provides direct
access to the identity map of the owning :class:`.Session`.
If the given primary key identifier is present
in the local identity map, the object is returned
directly from this collection and no SQL is emitted,
unless the object has been marked fully expired.
If not present,
a SELECT is performed in order to locate the object.
:meth:`~.Query.get` also will perform a check if
the object is present in the identity map and
marked as expired - a SELECT
is emitted to refresh the object as well as to
ensure that the row is still present.
If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
:meth:`~.Query.get` is only used to return a single
mapped instance, not multiple instances or
individual column constructs, and strictly
on a single primary key value. The originating
:class:`.Query` must be constructed in this way,
i.e. against a single mapped entity,
with no additional filtering criterion. Loading
options via :meth:`~.Query.options` may be applied
however, and will be used if the object is not
yet locally present.
A lazy-loading, many-to-one attribute configured
by :func:`.relationship`, using a simple
foreign-key-to-primary-key criterion, will also use an
operation equivalent to :meth:`~.Query.get` in order to retrieve
the target value from the local identity map
before querying the database. See :doc:`/orm/loading`
for further details on relationship loading.
:param ident: A scalar or tuple value representing
the primary key. For a composite primary key,
the order of identifiers corresponds in most cases
to that of the mapped :class:`.Table` object's
primary key columns. For a :func:`.mapper` that
was given the ``primary key`` argument during
construction, the order of identifiers corresponds
to the elements present in this collection.
:return: The object instance, or ``None``.
"""
# convert composite types to individual args
if hasattr(ident, '__composite_values__'):
ident = ident.__composite_values__()
ident = util.to_list(ident)
mapper = self._only_full_mapper_zero("get")
if len(ident) != len(mapper.primary_key):
raise sa_exc.InvalidRequestError(
"Incorrect number of values in identifier to formulate "
"primary key for query.get(); primary key columns are %s" %
','.join("'%s'" % c for c in mapper.primary_key))
key = mapper.identity_key_from_primary_key(ident)
if not self._populate_existing and \
not mapper.always_refresh and \
self._for_update_arg is None:
instance = loading.get_from_identity(
self.session, key, attributes.PASSIVE_OFF)
if instance is not None:
self._get_existing_condition()
# reject calls for id in identity map but class
# mismatch.
if not issubclass(instance.__class__, mapper.class_):
return None
return instance
return loading.load_on_ident(self, key)
@_generative()
def correlate(self, *args):
"""Return a :class:`.Query` construct which will correlate the given
FROM clauses to that of an enclosing :class:`.Query` or
:func:`~.expression.select`.
The method here accepts mapped classes, :func:`.aliased` constructs,
and :func:`.mapper` constructs as arguments, which are resolved into
expression constructs, in addition to appropriate expression
constructs.
The correlation arguments are ultimately passed to
:meth:`.Select.correlate` after coercion to expression constructs.
The correlation arguments take effect in such cases
as when :meth:`.Query.from_self` is used, or when
a subquery as returned by :meth:`.Query.subquery` is
embedded in another :func:`~.expression.select` construct.
"""
self._correlate = self._correlate.union(
_interpret_as_from(s)
if s is not None else None
for s in args)
@_generative()
def autoflush(self, setting):
"""Return a Query with a specific 'autoflush' setting.
Note that a Session with autoflush=False will
not autoflush, even if this flag is set to True at the
Query level. Therefore this flag is usually used only
to disable autoflush for a specific Query.
"""
self._autoflush = setting
@_generative()
def populate_existing(self):
"""Return a :class:`.Query` that will expire and refresh all instances
as they are loaded, or reused from the current :class:`.Session`.
:meth:`.populate_existing` does not improve behavior when
the ORM is used normally - the :class:`.Session` object's usual
behavior of maintaining a transaction and expiring all attributes
after rollback or commit handles object state automatically.
This method is not intended for general use.
"""
self._populate_existing = True
@_generative()
def _with_invoke_all_eagers(self, value):
"""Set the 'invoke all eagers' flag which causes joined- and
subquery loaders to traverse into already-loaded related objects
and collections.
Default is that of :attr:`.Query._invoke_all_eagers`.
"""
self._invoke_all_eagers = value
def with_parent(self, instance, property=None):
"""Add filtering criterion that relates the given instance
to a child object or collection, using its attribute state
as well as an established :func:`.relationship()`
configuration.
The method uses the :func:`.with_parent` function to generate
the clause, the result of which is passed to :meth:`.Query.filter`.
Parameters are the same as :func:`.with_parent`, with the exception
that the given property can be None, in which case a search is
performed against this :class:`.Query` object's target mapper.
"""
if property is None:
mapper = object_mapper(instance)
for prop in mapper.iterate_properties:
if isinstance(prop, properties.RelationshipProperty) and \
prop.mapper is self._mapper_zero():
property = prop
break
else:
raise sa_exc.InvalidRequestError(
"Could not locate a property which relates instances "
"of class '%s' to instances of class '%s'" %
(
self._mapper_zero().class_.__name__,
instance.__class__.__name__)
)
return self.filter(with_parent(instance, property))
@_generative()
def add_entity(self, entity, alias=None):
"""add a mapped entity to the list of result columns
to be returned."""
if alias is not None:
entity = aliased(entity, alias)
self._entities = list(self._entities)
m = _MapperEntity(self, entity)
self._set_entity_selectables([m])
@_generative()
def with_session(self, session):
"""Return a :class:`.Query` that will use the given :class:`.Session`.
"""
self.session = session
def from_self(self, *entities):
"""return a Query that selects from this Query's
SELECT statement.
\*entities - optional list of entities which will replace
those being selected.
"""
fromclause = self.with_labels().enable_eagerloads(False).\
statement.correlate(None)
q = self._from_selectable(fromclause)
q._enable_single_crit = False
if entities:
q._set_entities(entities)
return q
@_generative()
def _set_enable_single_crit(self, val):
self._enable_single_crit = val
@_generative()
def _from_selectable(self, fromclause):
for attr in (
'_statement', '_criterion',
'_order_by', '_group_by',
'_limit', '_offset',
'_joinpath', '_joinpoint',
'_distinct', '_having',
'_prefixes',
):
self.__dict__.pop(attr, None)
self._set_select_from([fromclause], True)
# this enables clause adaptation for non-ORM
# expressions.
self._orm_only_from_obj_alias = False
old_entities = self._entities
self._entities = []
for e in old_entities:
e.adapt_to_selectable(self, self._from_obj[0])
def values(self, *columns):
"""Return an iterator yielding result tuples corresponding
to the given list of columns"""
if not columns:
return iter(())
q = self._clone()
q._set_entities(columns, entity_wrapper=_ColumnEntity)
if not q._yield_per:
q._yield_per = 10
return iter(q)
_values = values
def value(self, column):
"""Return a scalar result corresponding to the given
column expression."""
try:
return next(self.values(column))[0]
except StopIteration:
return None
@_generative()
def with_entities(self, *entities):
"""Return a new :class:`.Query` replacing the SELECT list with the
given entities.
e.g.::
# Users, filtered on some arbitrary criterion
# and then ordered by related email address
q = session.query(User).\\
join(User.address).\\
filter(User.name.like('%ed%')).\\
order_by(Address.email)
# given *only* User.id==5, Address.email, and 'q', what
# would the *next* User in the result be ?
subq = q.with_entities(Address.email).\\
order_by(None).\\
filter(User.id==5).\\
subquery()
q = q.join((subq, subq.c.email < Address.email)).\\
limit(1)
.. versionadded:: 0.6.5
"""
self._set_entities(entities)
@_generative()
def add_columns(self, *column):
"""Add one or more column expressions to the list
of result columns to be returned."""
self._entities = list(self._entities)
l = len(self._entities)
for c in column:
_ColumnEntity(self, c)
# _ColumnEntity may add many entities if the
# given arg is a FROM clause
self._set_entity_selectables(self._entities[l:])
@util.pending_deprecation("0.7",
":meth:`.add_column` is superseded "
"by :meth:`.add_columns`",
False)
def add_column(self, column):
"""Add a column expression to the list of result columns to be
returned.
Pending deprecation: :meth:`.add_column` will be superseded by
:meth:`.add_columns`.
"""
return self.add_columns(column)
def options(self, *args):
"""Return a new Query object, applying the given list of
mapper options.
Most supplied options regard changing how column- and
relationship-mapped attributes are loaded. See the sections
:ref:`deferred` and :doc:`/orm/loading` for reference
documentation.
"""
return self._options(False, *args)
def _conditional_options(self, *args):
return self._options(True, *args)
@_generative()
def _options(self, conditional, *args):
# most MapperOptions write to the '_attributes' dictionary,
# so copy that as well
self._attributes = self._attributes.copy()
opts = tuple(util.flatten_iterator(args))
self._with_options = self._with_options + opts
if conditional:
for opt in opts:
opt.process_query_conditionally(self)
else:
for opt in opts:
opt.process_query(self)
def with_transformation(self, fn):
"""Return a new :class:`.Query` object transformed by
the given function.
E.g.::
def filter_something(criterion):
def transform(q):
return q.filter(criterion)
return transform
q = q.with_transformation(filter_something(x==5))
This allows ad-hoc recipes to be created for :class:`.Query`
objects. See the example at :ref:`hybrid_transformers`.
.. versionadded:: 0.7.4
"""
return fn(self)
@_generative()
def with_hint(self, selectable, text, dialect_name='*'):
"""Add an indexing or other executional context
hint for the given entity or selectable to
this :class:`.Query`.
Functionality is passed straight through to
:meth:`~sqlalchemy.sql.expression.Select.with_hint`,
with the addition that ``selectable`` can be a
:class:`.Table`, :class:`.Alias`, or ORM entity / mapped class
/etc.
.. seealso::
:meth:`.Query.with_statement_hint`
"""
if selectable is not None:
selectable = inspect(selectable).selectable
self._with_hints += ((selectable, text, dialect_name),)
def with_statement_hint(self, text, dialect_name='*'):
"""add a statement hint to this :class:`.Select`.
This method is similar to :meth:`.Select.with_hint` except that
it does not require an individual table, and instead applies to the
statement as a whole.
This feature calls down into :meth:`.Select.with_statement_hint`.
.. versionadded:: 1.0.0
.. seealso::
:meth:`.Query.with_hint`
"""
return self.with_hint(None, text, dialect_name)
@_generative()
def execution_options(self, **kwargs):
""" Set non-SQL options which take effect during execution.
The options are the same as those accepted by
:meth:`.Connection.execution_options`.
Note that the ``stream_results`` execution option is enabled
automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()`
method is used.
"""
self._execution_options = self._execution_options.union(kwargs)
@_generative()
def with_lockmode(self, mode):
"""Return a new :class:`.Query` object with the specified "locking mode",
which essentially refers to the ``FOR UPDATE`` clause.
.. deprecated:: 0.9.0 superseded by :meth:`.Query.with_for_update`.
:param mode: a string representing the desired locking mode.
Valid values are:
* ``None`` - translates to no lockmode
* ``'update'`` - translates to ``FOR UPDATE``
(standard SQL, supported by most dialects)
* ``'update_nowait'`` - translates to ``FOR UPDATE NOWAIT``
(supported by Oracle, PostgreSQL 8.1 upwards)
* ``'read'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL),
and ``FOR SHARE`` (for PostgreSQL)
.. seealso::
:meth:`.Query.with_for_update` - improved API for
specifying the ``FOR UPDATE`` clause.
"""
self._for_update_arg = LockmodeArg.parse_legacy_query(mode)
@_generative()
def with_for_update(self, read=False, nowait=False, of=None):
"""return a new :class:`.Query` with the specified options for the
``FOR UPDATE`` clause.
The behavior of this method is identical to that of
:meth:`.SelectBase.with_for_update`. When called with no arguments,
the resulting ``SELECT`` statement will have a ``FOR UPDATE`` clause
appended. When additional arguments are specified, backend-specific
options such as ``FOR UPDATE NOWAIT`` or ``LOCK IN SHARE MODE``
can take effect.
E.g.::
q = sess.query(User).with_for_update(nowait=True, of=User)
The above query on a Postgresql backend will render like::
SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT
.. versionadded:: 0.9.0 :meth:`.Query.with_for_update` supersedes
the :meth:`.Query.with_lockmode` method.
.. seealso::
:meth:`.GenerativeSelect.with_for_update` - Core level method with
full argument and behavioral description.
"""
self._for_update_arg = LockmodeArg(read=read, nowait=nowait, of=of)
@_generative()
def params(self, *args, **kwargs):
"""add values for bind parameters which may have been
specified in filter().
parameters may be specified using \**kwargs, or optionally a single
dictionary as the first positional argument. The reason for both is
that \**kwargs is convenient, however some parameter dictionaries
contain unicode keys in which case \**kwargs cannot be used.
"""
if len(args) == 1:
kwargs.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary.")
self._params = self._params.copy()
self._params.update(kwargs)
@_generative(_no_statement_condition, _no_limit_offset)
def filter(self, *criterion):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using SQL expressions.
e.g.::
session.query(MyClass).filter(MyClass.name == 'some name')
Multiple criteria are joined together by AND::
session.query(MyClass).\\
filter(MyClass.name == 'some name', MyClass.id > 5)
The criterion is any SQL expression object applicable to the
WHERE clause of a select. String expressions are coerced
into SQL expression constructs via the :func:`.text` construct.
.. versionchanged:: 0.7.5
Multiple criteria joined by AND.
.. seealso::
:meth:`.Query.filter_by` - filter on keyword expressions.
"""
for criterion in list(criterion):
criterion = expression._expression_literal_as_text(criterion)
criterion = self._adapt_clause(criterion, True, True)
if self._criterion is not None:
self._criterion = self._criterion & criterion
else:
self._criterion = criterion
def filter_by(self, **kwargs):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using keyword expressions.
e.g.::
session.query(MyClass).filter_by(name = 'some name')
Multiple criteria are joined together by AND::
session.query(MyClass).\\
filter_by(name = 'some name', id = 5)
The keyword expressions are extracted from the primary
entity of the query, or the last entity that was the
target of a call to :meth:`.Query.join`.
.. seealso::
:meth:`.Query.filter` - filter on SQL expressions.
"""
clauses = [_entity_descriptor(self._joinpoint_zero(), key) == value
for key, value in kwargs.items()]
return self.filter(sql.and_(*clauses))
@_generative(_no_statement_condition, _no_limit_offset)
def order_by(self, *criterion):
"""apply one or more ORDER BY criterion to the query and return
the newly resulting ``Query``
All existing ORDER BY settings can be suppressed by
passing ``None`` - this will suppress any ORDER BY configured
on mappers as well.
Alternatively, an existing ORDER BY setting on the Query
object can be entirely cancelled by passing ``False``
as the value - use this before calling methods where
an ORDER BY is invalid.
"""
if len(criterion) == 1:
if criterion[0] is False:
if '_order_by' in self.__dict__:
del self._order_by
return
if criterion[0] is None:
self._order_by = None
return
criterion = self._adapt_col_list(criterion)
if self._order_by is False or self._order_by is None:
self._order_by = criterion
else:
self._order_by = self._order_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def group_by(self, *criterion):
"""apply one or more GROUP BY criterion to the query and return
the newly resulting :class:`.Query`"""
criterion = list(chain(*[_orm_columns(c) for c in criterion]))
criterion = self._adapt_col_list(criterion)
if self._group_by is False:
self._group_by = criterion
else:
self._group_by = self._group_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def having(self, criterion):
"""apply a HAVING criterion to the query and return the
newly resulting :class:`.Query`.
:meth:`~.Query.having` is used in conjunction with
:meth:`~.Query.group_by`.
HAVING criterion makes it possible to use filters on aggregate
functions like COUNT, SUM, AVG, MAX, and MIN, eg.::
q = session.query(User.id).\\
join(User.addresses).\\
group_by(User.id).\\
having(func.count(Address.id) > 2)
"""
criterion = expression._expression_literal_as_text(criterion)
if criterion is not None and \
not isinstance(criterion, sql.ClauseElement):
raise sa_exc.ArgumentError(
"having() argument must be of type "
"sqlalchemy.sql.ClauseElement or string")
criterion = self._adapt_clause(criterion, True, True)
if self._having is not None:
self._having = self._having & criterion
else:
self._having = criterion
def union(self, *q):
"""Produce a UNION of this Query against one or more queries.
e.g.::
q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar')
q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo')
q3 = q1.union(q2)
The method accepts multiple Query objects so as to control
the level of nesting. A series of ``union()`` calls such as::
x.union(y).union(z).all()
will nest on each ``union()``, and produces::
SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION
SELECT * FROM y) UNION SELECT * FROM Z)
Whereas::
x.union(y, z).all()
produces::
SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION
SELECT * FROM Z)
Note that many database backends do not allow ORDER BY to
be rendered on a query called within UNION, EXCEPT, etc.
To disable all ORDER BY clauses including those configured
on mappers, issue ``query.order_by(None)`` - the resulting
:class:`.Query` object will not render ORDER BY within
its SELECT statement.
"""
return self._from_selectable(
expression.union(*([self] + list(q))))
def union_all(self, *q):
"""Produce a UNION ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.union_all(*([self] + list(q)))
)
def intersect(self, *q):
"""Produce an INTERSECT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect(*([self] + list(q)))
)
def intersect_all(self, *q):
"""Produce an INTERSECT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect_all(*([self] + list(q)))
)
def except_(self, *q):
"""Produce an EXCEPT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_(*([self] + list(q)))
)
def except_all(self, *q):
"""Produce an EXCEPT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_all(*([self] + list(q)))
)
def join(self, *props, **kwargs):
"""Create a SQL JOIN against this :class:`.Query` object's criterion
and apply generatively, returning the newly resulting :class:`.Query`.
**Simple Relationship Joins**
Consider a mapping between two classes ``User`` and ``Address``,
with a relationship ``User.addresses`` representing a collection
of ``Address`` objects associated with each ``User``. The most
common usage of :meth:`~.Query.join` is to create a JOIN along this
relationship, using the ``User.addresses`` attribute as an indicator
for how this should occur::
q = session.query(User).join(User.addresses)
Where above, the call to :meth:`~.Query.join` along ``User.addresses``
will result in SQL equivalent to::
SELECT user.* FROM user JOIN address ON user.id = address.user_id
In the above example we refer to ``User.addresses`` as passed to
:meth:`~.Query.join` as the *on clause*, that is, it indicates
how the "ON" portion of the JOIN should be constructed. For a
single-entity query such as the one above (i.e. we start by selecting
only from ``User`` and nothing else), the relationship can also be
specified by its string name::
q = session.query(User).join("addresses")
:meth:`~.Query.join` can also accommodate multiple
"on clause" arguments to produce a chain of joins, such as below
where a join across four related entities is constructed::
q = session.query(User).join("orders", "items", "keywords")
The above would be shorthand for three separate calls to
:meth:`~.Query.join`, each using an explicit attribute to indicate
the source entity::
q = session.query(User).\\
join(User.orders).\\
join(Order.items).\\
join(Item.keywords)
**Joins to a Target Entity or Selectable**
A second form of :meth:`~.Query.join` allows any mapped entity
or core selectable construct as a target. In this usage,
:meth:`~.Query.join` will attempt
to create a JOIN along the natural foreign key relationship between
two entities::
q = session.query(User).join(Address)
The above calling form of :meth:`~.Query.join` will raise an error if
either there are no foreign keys between the two entities, or if
there are multiple foreign key linkages between them. In the
above calling form, :meth:`~.Query.join` is called upon to
create the "on clause" automatically for us. The target can
be any mapped entity or selectable, such as a :class:`.Table`::
q = session.query(User).join(addresses_table)
**Joins to a Target with an ON Clause**
The third calling form allows both the target entity as well
as the ON clause to be passed explicitly. Suppose for
example we wanted to join to ``Address`` twice, using
an alias the second time. We use :func:`~sqlalchemy.orm.aliased`
to create a distinct alias of ``Address``, and join
to it using the ``target, onclause`` form, so that the
alias can be specified explicitly as the target along with
the relationship to instruct how the ON clause should proceed::
a_alias = aliased(Address)
q = session.query(User).\\
join(User.addresses).\\
join(a_alias, User.addresses).\\
filter(Address.email_address=='[email protected]').\\
filter(a_alias.email_address=='[email protected]')
Where above, the generated SQL would be similar to::
SELECT user.* FROM user
JOIN address ON user.id = address.user_id
JOIN address AS address_1 ON user.id=address_1.user_id
WHERE address.email_address = :email_address_1
AND address_1.email_address = :email_address_2
The two-argument calling form of :meth:`~.Query.join`
also allows us to construct arbitrary joins with SQL-oriented
"on clause" expressions, not relying upon configured relationships
at all. Any SQL expression can be passed as the ON clause
when using the two-argument form, which should refer to the target
entity in some way as well as an applicable source entity::
q = session.query(User).join(Address, User.id==Address.user_id)
.. versionchanged:: 0.7
In SQLAlchemy 0.6 and earlier, the two argument form of
:meth:`~.Query.join` requires the usage of a tuple:
``query(User).join((Address, User.id==Address.user_id))``\ .
This calling form is accepted in 0.7 and further, though
is not necessary unless multiple join conditions are passed to
a single :meth:`~.Query.join` call, which itself is also not
generally necessary as it is now equivalent to multiple
calls (this wasn't always the case).
**Advanced Join Targeting and Adaption**
There is a lot of flexibility in what the "target" can be when using
:meth:`~.Query.join`. As noted previously, it also accepts
:class:`.Table` constructs and other selectables such as
:func:`.alias` and :func:`.select` constructs, with either the one
or two-argument forms::
addresses_q = select([Address.user_id]).\\
where(Address.email_address.endswith("@bar.com")).\\
alias()
q = session.query(User).\\
join(addresses_q, addresses_q.c.user_id==User.id)
:meth:`~.Query.join` also features the ability to *adapt* a
:meth:`~sqlalchemy.orm.relationship` -driven ON clause to the target
selectable. Below we construct a JOIN from ``User`` to a subquery
against ``Address``, allowing the relationship denoted by
``User.addresses`` to *adapt* itself to the altered target::
address_subq = session.query(Address).\\
filter(Address.email_address == '[email protected]').\\
subquery()
q = session.query(User).join(address_subq, User.addresses)
Producing SQL similar to::
SELECT user.* FROM user
JOIN (
SELECT address.id AS id,
address.user_id AS user_id,
address.email_address AS email_address
FROM address
WHERE address.email_address = :email_address_1
) AS anon_1 ON user.id = anon_1.user_id
The above form allows one to fall back onto an explicit ON
clause at any time::
q = session.query(User).\\
join(address_subq, User.id==address_subq.c.user_id)
**Controlling what to Join From**
While :meth:`~.Query.join` exclusively deals with the "right"
side of the JOIN, we can also control the "left" side, in those
cases where it's needed, using :meth:`~.Query.select_from`.
Below we construct a query against ``Address`` but can still
make usage of ``User.addresses`` as our ON clause by instructing
the :class:`.Query` to select first from the ``User``
entity::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which will produce SQL similar to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
**Constructing Aliases Anonymously**
:meth:`~.Query.join` can construct anonymous aliases
using the ``aliased=True`` flag. This feature is useful
when a query is being joined algorithmically, such as
when querying self-referentially to an arbitrary depth::
q = session.query(Node).\\
join("children", "children", aliased=True)
When ``aliased=True`` is used, the actual "alias" construct
is not explicitly available. To work with it, methods such as
:meth:`.Query.filter` will adapt the incoming entity to
the last join point::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1')
When using automatic aliasing, the ``from_joinpoint=True``
argument can allow a multi-node join to be broken into
multiple calls to :meth:`~.Query.join`, so that
each path along the way can be further filtered::
q = session.query(Node).\\
join("children", aliased=True).\\
filter(Node.name='child 1').\\
join("children", aliased=True, from_joinpoint=True).\\
filter(Node.name == 'grandchild 1')
The filtering aliases above can then be reset back to the
original ``Node`` entity using :meth:`~.Query.reset_joinpoint`::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1').\\
reset_joinpoint().\\
filter(Node.name == 'parent 1)
For an example of ``aliased=True``, see the distribution
example :ref:`examples_xmlpersistence` which illustrates
an XPath-like query system using algorithmic joins.
:param \*props: A collection of one or more join conditions,
each consisting of a relationship-bound attribute or string
relationship name representing an "on clause", or a single
target entity, or a tuple in the form of ``(target, onclause)``.
A special two-argument calling form of the form ``target, onclause``
is also accepted.
:param aliased=False: If True, indicate that the JOIN target should be
anonymously aliased. Subsequent calls to :meth:`~.Query.filter`
and similar will adapt the incoming criterion to the target
alias, until :meth:`~.Query.reset_joinpoint` is called.
:param isouter=False: If True, the join used will be a left outer join,
just as if the :meth:`.Query.outerjoin` method were called. This
flag is here to maintain consistency with the same flag as accepted
by :meth:`.FromClause.join` and other Core constructs.
.. versionadded:: 1.0.0
:param from_joinpoint=False: When using ``aliased=True``, a setting
of True here will cause the join to be from the most recent
joined target, rather than starting back from the original
FROM clauses of the query.
.. seealso::
:ref:`ormtutorial_joins` in the ORM tutorial.
:ref:`inheritance_toplevel` for details on how
:meth:`~.Query.join` is used for inheritance relationships.
:func:`.orm.join` - a standalone ORM-level join function,
used internally by :meth:`.Query.join`, which in previous
SQLAlchemy versions was the primary ORM-level joining interface.
"""
aliased, from_joinpoint, isouter = kwargs.pop('aliased', False),\
kwargs.pop('from_joinpoint', False),\
kwargs.pop('isouter', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
','.join(kwargs.keys))
isouter = isouter
return self._join(props,
outerjoin=isouter, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def outerjoin(self, *props, **kwargs):
"""Create a left outer join against this ``Query`` object's criterion
and apply generatively, returning the newly resulting ``Query``.
Usage is the same as the ``join()`` method.
"""
aliased, from_joinpoint = kwargs.pop('aliased', False), \
kwargs.pop('from_joinpoint', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
','.join(kwargs))
return self._join(props,
outerjoin=True, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def _update_joinpoint(self, jp):
self._joinpoint = jp
# copy backwards to the root of the _joinpath
# dict, so that no existing dict in the path is mutated
while 'prev' in jp:
f, prev = jp['prev']
prev = prev.copy()
prev[f] = jp
jp['prev'] = (f, prev)
jp = prev
self._joinpath = jp
@_generative(_no_statement_condition, _no_limit_offset)
def _join(self, keys, outerjoin, create_aliases, from_joinpoint):
"""consumes arguments from join() or outerjoin(), places them into a
consistent format with which to form the actual JOIN constructs.
"""
if not from_joinpoint:
self._reset_joinpoint()
if len(keys) == 2 and \
isinstance(keys[0], (expression.FromClause,
type, AliasedClass)) and \
isinstance(keys[1], (str, expression.ClauseElement,
interfaces.PropComparator)):
# detect 2-arg form of join and
# convert to a tuple.
keys = (keys,)
for arg1 in util.to_list(keys):
if isinstance(arg1, tuple):
# "tuple" form of join, multiple
# tuples are accepted as well. The simpler
# "2-arg" form is preferred. May deprecate
# the "tuple" usage.
arg1, arg2 = arg1
else:
arg2 = None
# determine onclause/right_entity. there
# is a little bit of legacy behavior still at work here
# which means they might be in either order. may possibly
# lock this down to (right_entity, onclause) in 0.6.
if isinstance(
arg1, (interfaces.PropComparator, util.string_types)):
right_entity, onclause = arg2, arg1
else:
right_entity, onclause = arg1, arg2
left_entity = prop = None
if isinstance(onclause, interfaces.PropComparator):
of_type = getattr(onclause, '_of_type', None)
else:
of_type = None
if isinstance(onclause, util.string_types):
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity, onclause)
onclause = descriptor
# check for q.join(Class.propname, from_joinpoint=True)
# and Class is that of the current joinpoint
elif from_joinpoint and \
isinstance(onclause, interfaces.PropComparator):
left_entity = onclause._parententity
info = inspect(self._joinpoint_zero())
left_mapper, left_selectable, left_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', None)
if left_mapper is left_entity:
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity,
onclause.key)
onclause = descriptor
if isinstance(onclause, interfaces.PropComparator):
if right_entity is None:
if of_type:
right_entity = of_type
else:
right_entity = onclause.property.mapper
left_entity = onclause._parententity
prop = onclause.property
if not isinstance(onclause, attributes.QueryableAttribute):
onclause = prop
if not create_aliases:
# check for this path already present.
# don't render in that case.
edge = (left_entity, right_entity, prop.key)
if edge in self._joinpoint:
# The child's prev reference might be stale --
# it could point to a parent older than the
# current joinpoint. If this is the case,
# then we need to update it and then fix the
# tree's spine with _update_joinpoint. Copy
# and then mutate the child, which might be
# shared by a different query object.
jp = self._joinpoint[edge].copy()
jp['prev'] = (edge, self._joinpoint)
self._update_joinpoint(jp)
continue
elif onclause is not None and right_entity is None:
# TODO: no coverage here
raise NotImplementedError("query.join(a==b) not supported.")
self._join_left_to_right(
left_entity,
right_entity, onclause,
outerjoin, create_aliases, prop)
def _join_left_to_right(self, left, right,
onclause, outerjoin, create_aliases, prop):
"""append a JOIN to the query's from clause."""
self._polymorphic_adapters = self._polymorphic_adapters.copy()
if left is None:
if self._from_obj:
left = self._from_obj[0]
elif self._entities:
left = self._entities[0].entity_zero_or_selectable
if left is None:
raise sa_exc.InvalidRequestError(
"Don't know how to join from %s; please use "
"select_from() to establish the left "
"entity/selectable of this join" % self._entities[0])
if left is right and \
not create_aliases:
raise sa_exc.InvalidRequestError(
"Can't construct a join from %s to %s, they "
"are the same entity" %
(left, right))
l_info = inspect(left)
r_info = inspect(right)
overlap = False
if not create_aliases:
right_mapper = getattr(r_info, "mapper", None)
# if the target is a joined inheritance mapping,
# be more liberal about auto-aliasing.
if right_mapper and (
right_mapper.with_polymorphic or
isinstance(right_mapper.mapped_table, expression.Join)
):
for from_obj in self._from_obj or [l_info.selectable]:
if sql_util.selectables_overlap(
l_info.selectable, from_obj) and \
sql_util.selectables_overlap(
from_obj, r_info.selectable):
overlap = True
break
if (overlap or not create_aliases) and \
l_info.selectable is r_info.selectable:
raise sa_exc.InvalidRequestError(
"Can't join table/selectable '%s' to itself" %
l_info.selectable)
right, onclause = self._prepare_right_side(
r_info, right, onclause,
create_aliases,
prop, overlap)
# if joining on a MapperProperty path,
# track the path to prevent redundant joins
if not create_aliases and prop:
self._update_joinpoint({
'_joinpoint_entity': right,
'prev': ((left, right, prop.key), self._joinpoint)
})
else:
self._joinpoint = {'_joinpoint_entity': right}
self._join_to_left(l_info, left, right, onclause, outerjoin)
def _prepare_right_side(self, r_info, right, onclause, create_aliases,
prop, overlap):
info = r_info
right_mapper, right_selectable, right_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', False)
if right_mapper:
self._join_entities += (info, )
if right_mapper and prop and \
not right_mapper.common_parent(prop.mapper):
raise sa_exc.InvalidRequestError(
"Join target %s does not correspond to "
"the right side of join condition %s" % (right, onclause)
)
if not right_mapper and prop:
right_mapper = prop.mapper
need_adapter = False
if right_mapper and right is right_selectable:
if not right_selectable.is_derived_from(
right_mapper.mapped_table):
raise sa_exc.InvalidRequestError(
"Selectable '%s' is not derived from '%s'" %
(right_selectable.description,
right_mapper.mapped_table.description))
if isinstance(right_selectable, expression.SelectBase):
# TODO: this isn't even covered now!
right_selectable = right_selectable.alias()
need_adapter = True
right = aliased(right_mapper, right_selectable)
aliased_entity = right_mapper and \
not right_is_aliased and \
(
right_mapper.with_polymorphic and isinstance(
right_mapper._with_polymorphic_selectable,
expression.Alias)
or
overlap # test for overlap:
# orm/inheritance/relationships.py
# SelfReferentialM2MTest
)
if not need_adapter and (create_aliases or aliased_entity):
right = aliased(right, flat=True)
need_adapter = True
# if an alias() of the right side was generated here,
# apply an adapter to all subsequent filter() calls
# until reset_joinpoint() is called.
if need_adapter:
self._filter_aliases = ORMAdapter(
right,
equivalents=right_mapper and
right_mapper._equivalent_columns or {},
chain_to=self._filter_aliases)
# if the onclause is a ClauseElement, adapt it with any
# adapters that are in place right now
if isinstance(onclause, expression.ClauseElement):
onclause = self._adapt_clause(onclause, True, True)
# if an alias() on the right side was generated,
# which is intended to wrap a the right side in a subquery,
# ensure that columns retrieved from this target in the result
# set are also adapted.
if aliased_entity and not create_aliases:
self._mapper_loads_polymorphically_with(
right_mapper,
ORMAdapter(
right,
equivalents=right_mapper._equivalent_columns
)
)
return right, onclause
def _join_to_left(self, l_info, left, right, onclause, outerjoin):
info = l_info
left_mapper = getattr(info, 'mapper', None)
left_selectable = info.selectable
if self._from_obj:
replace_clause_index, clause = sql_util.find_join_source(
self._from_obj,
left_selectable)
if clause is not None:
try:
clause = orm_join(clause,
right,
onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = \
self._from_obj[:replace_clause_index] + \
(clause, ) + \
self._from_obj[replace_clause_index + 1:]
return
if left_mapper:
for ent in self._entities:
if ent.corresponds_to(left):
clause = ent.selectable
break
else:
clause = left
else:
clause = left_selectable
assert clause is not None
try:
clause = orm_join(clause, right, onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = self._from_obj + (clause,)
def _reset_joinpoint(self):
self._joinpoint = self._joinpath
self._filter_aliases = None
@_generative(_no_statement_condition)
def reset_joinpoint(self):
"""Return a new :class:`.Query`, where the "join point" has
been reset back to the base FROM entities of the query.
This method is usually used in conjunction with the
``aliased=True`` feature of the :meth:`~.Query.join`
method. See the example in :meth:`~.Query.join` for how
this is used.
"""
self._reset_joinpoint()
@_generative(_no_clauseelement_condition)
def select_from(self, *from_obj):
"""Set the FROM clause of this :class:`.Query` explicitly.
:meth:`.Query.select_from` is often used in conjunction with
:meth:`.Query.join` in order to control which entity is selected
from on the "left" side of the join.
The entity or selectable object here effectively replaces the
"left edge" of any calls to :meth:`~.Query.join`, when no
joinpoint is otherwise established - usually, the default "join
point" is the leftmost entity in the :class:`~.Query` object's
list of entities to be selected.
A typical example::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which produces SQL equivalent to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
:param \*from_obj: collection of one or more entities to apply
to the FROM clause. Entities can be mapped classes,
:class:`.AliasedClass` objects, :class:`.Mapper` objects
as well as core :class:`.FromClause` elements like subqueries.
.. versionchanged:: 0.9
This method no longer applies the given FROM object
to be the selectable from which matching entities
select from; the :meth:`.select_entity_from` method
now accomplishes this. See that method for a description
of this behavior.
.. seealso::
:meth:`~.Query.join`
:meth:`.Query.select_entity_from`
"""
self._set_select_from(from_obj, False)
@_generative(_no_clauseelement_condition)
def select_entity_from(self, from_obj):
"""Set the FROM clause of this :class:`.Query` to a
core selectable, applying it as a replacement FROM clause
for corresponding mapped entities.
This method is similar to the :meth:`.Query.select_from`
method, in that it sets the FROM clause of the query. However,
where :meth:`.Query.select_from` only affects what is placed
in the FROM, this method also applies the given selectable
to replace the FROM which the selected entities would normally
select from.
The given ``from_obj`` must be an instance of a :class:`.FromClause`,
e.g. a :func:`.select` or :class:`.Alias` construct.
An example would be a :class:`.Query` that selects ``User`` entities,
but uses :meth:`.Query.select_entity_from` to have the entities
selected from a :func:`.select` construct instead of the
base ``user`` table::
select_stmt = select([User]).where(User.id == 7)
q = session.query(User).\\
select_entity_from(select_stmt).\\
filter(User.name == 'ed')
The query generated will select ``User`` entities directly
from the given :func:`.select` construct, and will be::
SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name
FROM (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE anon_1.name = :name_1
Notice above that even the WHERE criterion was "adapted" such that
the ``anon_1`` subquery effectively replaces all references to the
``user`` table, except for the one that it refers to internally.
Compare this to :meth:`.Query.select_from`, which as of
version 0.9, does not affect existing entities. The
statement below::
q = session.query(User).\\
select_from(select_stmt).\\
filter(User.name == 'ed')
Produces SQL where both the ``user`` table as well as the
``select_stmt`` construct are present as separate elements
in the FROM clause. No "adaptation" of the ``user`` table
is applied::
SELECT "user".id AS user_id, "user".name AS user_name
FROM "user", (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE "user".name = :name_1
:meth:`.Query.select_entity_from` maintains an older
behavior of :meth:`.Query.select_from`. In modern usage,
similar results can also be achieved using :func:`.aliased`::
select_stmt = select([User]).where(User.id == 7)
user_from_select = aliased(User, select_stmt.alias())
q = session.query(user_from_select)
:param from_obj: a :class:`.FromClause` object that will replace
the FROM clause of this :class:`.Query`.
.. seealso::
:meth:`.Query.select_from`
.. versionadded:: 0.8
:meth:`.Query.select_entity_from` was added to specify
the specific behavior of entity replacement, however
the :meth:`.Query.select_from` maintains this behavior
as well until 0.9.
"""
self._set_select_from([from_obj], True)
def __getitem__(self, item):
if isinstance(item, slice):
start, stop, step = util.decode_slice(item)
if isinstance(stop, int) and \
isinstance(start, int) and \
stop - start <= 0:
return []
# perhaps we should execute a count() here so that we
# can still use LIMIT/OFFSET ?
elif (isinstance(start, int) and start < 0) \
or (isinstance(stop, int) and stop < 0):
return list(self)[item]
res = self.slice(start, stop)
if step is not None:
return list(res)[None:None:item.step]
else:
return list(res)
else:
if item == -1:
return list(self)[-1]
else:
return list(self[item:item + 1])[0]
@_generative(_no_statement_condition)
def slice(self, start, stop):
"""apply LIMIT/OFFSET to the ``Query`` based on a "
"range and return the newly resulting ``Query``."""
if start is not None and stop is not None:
self._offset = (self._offset or 0) + start
self._limit = stop - start
elif start is None and stop is not None:
self._limit = stop
elif start is not None and stop is None:
self._offset = (self._offset or 0) + start
if self._offset == 0:
self._offset = None
@_generative(_no_statement_condition)
def limit(self, limit):
"""Apply a ``LIMIT`` to the query and return the newly resulting
``Query``.
"""
self._limit = limit
@_generative(_no_statement_condition)
def offset(self, offset):
"""Apply an ``OFFSET`` to the query and return the newly resulting
``Query``.
"""
self._offset = offset
@_generative(_no_statement_condition)
def distinct(self, *criterion):
"""Apply a ``DISTINCT`` to the query and return the newly resulting
``Query``.
:param \*expr: optional column expressions. When present,
the Postgresql dialect will render a ``DISTINCT ON (<expressions>>)``
construct.
"""
if not criterion:
self._distinct = True
else:
criterion = self._adapt_col_list(criterion)
if isinstance(self._distinct, list):
self._distinct += criterion
else:
self._distinct = criterion
@_generative()
def prefix_with(self, *prefixes):
"""Apply the prefixes to the query and return the newly resulting
``Query``.
:param \*prefixes: optional prefixes, typically strings,
not using any commas. In particular is useful for MySQL keywords.
e.g.::
query = sess.query(User.name).\\
prefix_with('HIGH_PRIORITY').\\
prefix_with('SQL_SMALL_RESULT', 'ALL')
Would render::
SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL users.name AS users_name
FROM users
.. versionadded:: 0.7.7
"""
if self._prefixes:
self._prefixes += prefixes
else:
self._prefixes = prefixes
def all(self):
"""Return the results represented by this ``Query`` as a list.
This results in an execution of the underlying query.
"""
return list(self)
@_generative(_no_clauseelement_condition)
def from_statement(self, statement):
"""Execute the given SELECT statement and return results.
This method bypasses all internal statement compilation, and the
statement is executed without modification.
The statement is typically either a :func:`~.expression.text`
or :func:`~.expression.select` construct, and should return the set
of columns
appropriate to the entity class represented by this :class:`.Query`.
.. seealso::
:ref:`orm_tutorial_literal_sql` - usage examples in the
ORM tutorial
"""
statement = expression._expression_literal_as_text(statement)
if not isinstance(statement,
(expression.TextClause,
expression.SelectBase)):
raise sa_exc.ArgumentError(
"from_statement accepts text(), select(), "
"and union() objects only.")
self._statement = statement
def first(self):
"""Return the first result of this ``Query`` or
None if the result doesn't contain any row.
first() applies a limit of one within the generated SQL, so that
only one primary entity row is generated on the server side
(note this may consist of multiple result rows if join-loaded
collections are present).
Calling ``first()`` results in an execution of the underlying query.
"""
if self._statement is not None:
ret = list(self)[0:1]
else:
ret = list(self[0:1])
if len(ret) > 0:
return ret[0]
else:
return None
def one(self):
"""Return exactly one result or raise an exception.
Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects
no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
if multiple object identities are returned, or if multiple
rows are returned for a query that does not return object
identities.
Note that an entity query, that is, one which selects one or
more mapped classes as opposed to individual column attributes,
may ultimately represent many rows but only one row of
unique entity or entities - this is a successful result for one().
Calling ``one()`` results in an execution of the underlying query.
.. versionchanged:: 0.6
``one()`` fully fetches all results instead of applying
any kind of limit, so that the "unique"-ing of entities does not
conceal multiple object identities.
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
raise orm_exc.NoResultFound("No row was found for one()")
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one()")
def scalar(self):
"""Return the first element of the first result or None
if no rows present. If multiple rows are returned,
raises MultipleResultsFound.
>>> session.query(Item).scalar()
<Item>
>>> session.query(Item.id).scalar()
1
>>> session.query(Item.id).filter(Item.id < 0).scalar()
None
>>> session.query(Item.id, Item.name).scalar()
1
>>> session.query(func.count(Parent.id)).scalar()
20
This results in an execution of the underlying query.
"""
try:
ret = self.one()
if not isinstance(ret, tuple):
return ret
return ret[0]
except orm_exc.NoResultFound:
return None
def __iter__(self):
context = self._compile_context()
context.statement.use_labels = True
if self._autoflush and not self._populate_existing:
self.session._autoflush()
return self._execute_and_instances(context)
def _connection_from_session(self, **kw):
conn = self.session.connection(
**kw)
if self._execution_options:
conn = conn.execution_options(**self._execution_options)
return conn
def _execute_and_instances(self, querycontext):
conn = self._connection_from_session(
mapper=self._mapper_zero_or_none(),
clause=querycontext.statement,
close_with_result=True)
result = conn.execute(querycontext.statement, self._params)
return loading.instances(self, result, querycontext)
@property
def column_descriptions(self):
"""Return metadata about the columns which would be
returned by this :class:`.Query`.
Format is a list of dictionaries::
user_alias = aliased(User, name='user2')
q = sess.query(User, User.id, user_alias)
# this expression:
q.column_descriptions
# would return:
[
{
'name':'User',
'type':User,
'aliased':False,
'expr':User,
},
{
'name':'id',
'type':Integer(),
'aliased':False,
'expr':User.id,
},
{
'name':'user2',
'type':User,
'aliased':True,
'expr':user_alias
}
]
"""
return [
{
'name': ent._label_name,
'type': ent.type,
'aliased': getattr(ent, 'is_aliased_class', False),
'expr': ent.expr
}
for ent in self._entities
]
def instances(self, cursor, __context=None):
"""Given a ResultProxy cursor as returned by connection.execute(),
return an ORM result as an iterator.
e.g.::
result = engine.execute("select * from users")
for u in session.query(User).instances(result):
print u
"""
context = __context
if context is None:
context = QueryContext(self)
return loading.instances(self, cursor, context)
def merge_result(self, iterator, load=True):
"""Merge a result into this :class:`.Query` object's Session.
Given an iterator returned by a :class:`.Query` of the same structure
as this one, return an identical iterator of results, with all mapped
instances merged into the session using :meth:`.Session.merge`. This
is an optimized method which will merge all mapped instances,
preserving the structure of the result rows and unmapped columns with
less method overhead than that of calling :meth:`.Session.merge`
explicitly for each value.
The structure of the results is determined based on the column list of
this :class:`.Query` - if these do not correspond, unchecked errors
will occur.
The 'load' argument is the same as that of :meth:`.Session.merge`.
For an example of how :meth:`~.Query.merge_result` is used, see
the source code for the example :ref:`examples_caching`, where
:meth:`~.Query.merge_result` is used to efficiently restore state
from a cache back into a target :class:`.Session`.
"""
return loading.merge_result(self, iterator, load)
@property
def _select_args(self):
return {
'limit': self._limit,
'offset': self._offset,
'distinct': self._distinct,
'prefixes': self._prefixes,
'group_by': self._group_by or None,
'having': self._having
}
@property
def _should_nest_selectable(self):
kwargs = self._select_args
return (kwargs.get('limit') is not None or
kwargs.get('offset') is not None or
kwargs.get('distinct', False))
def exists(self):
"""A convenience method that turns a query into an EXISTS subquery
of the form EXISTS (SELECT 1 FROM ... WHERE ...).
e.g.::
q = session.query(User).filter(User.name == 'fred')
session.query(q.exists())
Producing SQL similar to::
SELECT EXISTS (
SELECT 1 FROM users WHERE users.name = :name_1
) AS anon_1
The EXISTS construct is usually used in the WHERE clause::
session.query(User.id).filter(q.exists()).scalar()
Note that some databases such as SQL Server don't allow an
EXISTS expression to be present in the columns clause of a
SELECT. To select a simple boolean value based on the exists
as a WHERE, use :func:`.literal`::
from sqlalchemy import literal
session.query(literal(True)).filter(q.exists()).scalar()
.. versionadded:: 0.8.1
"""
# .add_columns() for the case that we are a query().select_from(X),
# so that ".statement" can be produced (#2995) but also without
# omitting the FROM clause from a query(X) (#2818);
# .with_only_columns() after we have a core select() so that
# we get just "SELECT 1" without any entities.
return sql.exists(self.add_columns('1').with_labels().
statement.with_only_columns([1]))
def count(self):
"""Return a count of rows this Query would return.
This generates the SQL for this Query as follows::
SELECT count(1) AS count_1 FROM (
SELECT <rest of query follows...>
) AS anon_1
.. versionchanged:: 0.7
The above scheme is newly refined as of 0.7b3.
For fine grained control over specific columns
to count, to skip the usage of a subquery or
otherwise control of the FROM clause,
or to use other aggregate functions,
use :attr:`~sqlalchemy.sql.expression.func`
expressions in conjunction
with :meth:`~.Session.query`, i.e.::
from sqlalchemy import func
# count User records, without
# using a subquery.
session.query(func.count(User.id))
# return count of user "id" grouped
# by "name"
session.query(func.count(User.id)).\\
group_by(User.name)
from sqlalchemy import distinct
# count distinct "name" values
session.query(func.count(distinct(User.name)))
"""
col = sql.func.count(sql.literal_column('*'))
return self.from_self(col).scalar()
def delete(self, synchronize_session='evaluate'):
"""Perform a bulk delete query.
Deletes rows matched by this query from the database.
:param synchronize_session: chooses the strategy for the removal of
matched objects from the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, objects may still remain in
the session which were in fact deleted which can lead to confusing
results if they are accessed via get() or already loaded
collections.
``'fetch'`` - performs a select query before the delete to find
objects that are matched by the delete query and need to be
removed from the session. Matched objects are removed from the
session.
``'evaluate'`` - Evaluate the query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an error is raised. In that case you probably
want to use the 'fetch' strategy as a fallback.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
This method has several key caveats:
* The method does **not** offer in-Python cascading of relationships
- it is assumed that ON DELETE CASCADE/SET NULL/etc. is configured
for any foreign key references which require it, otherwise the
database may emit an integrity violation if foreign key references
are being enforced.
After the DELETE, dependent objects in the :class:`.Session` which
were impacted by an ON DELETE may not contain the current
state, or may have been deleted. This issue is resolved once the
:class:`.Session` is expired,
which normally occurs upon :meth:`.Session.commit` or can be forced
by using :meth:`.Session.expire_all`. Accessing an expired object
whose row has been deleted will invoke a SELECT to locate the
row; when the row is not found, an
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
* The :meth:`.MapperEvents.before_delete` and
:meth:`.MapperEvents.after_delete`
events are **not** invoked from this method. Instead, the
:meth:`.SessionEvents.after_bulk_delete` method is provided to act
upon a mass DELETE of entity rows.
.. seealso::
:meth:`.Query.update`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
# TODO: cascades need handling.
delete_op = persistence.BulkDelete.factory(
self, synchronize_session)
delete_op.exec_()
return delete_op.rowcount
def update(self, values, synchronize_session='evaluate'):
"""Perform a bulk update query.
Updates rows matched by this query in the database.
E.g.::
sess.query(User).filter(User.age == 25).\
update({User.age: User.age - 10}, synchronize_session='fetch')
sess.query(User).filter(User.age == 25).\
update({"age": User.age - 10}, synchronize_session='evaluate')
:param values: a dictionary with attributes names, or alternatively
mapped attributes or SQL expressions, as keys, and literal
values or sql expressions as values.
.. versionchanged:: 1.0.0 - string names in the values dictionary
are now resolved against the mapped entity; previously, these
strings were passed as literal column names with no mapper-level
translation.
:param synchronize_session: chooses the strategy to update the
attributes on objects in the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, updated objects may still
remain in the session with stale values on their attributes, which
can lead to confusing results.
``'fetch'`` - performs a select query before the update to find
objects that are matched by the update query. The updated
attributes are expired on matched objects.
``'evaluate'`` - Evaluate the Query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an exception is raised.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
This method has several key caveats:
* The method does **not** offer in-Python cascading of relationships
- it is assumed that ON UPDATE CASCADE is configured for any foreign
key references which require it, otherwise the database may emit an
integrity violation if foreign key references are being enforced.
After the UPDATE, dependent objects in the :class:`.Session` which
were impacted by an ON UPDATE CASCADE may not contain the current
state; this issue is resolved once the :class:`.Session` is expired,
which normally occurs upon :meth:`.Session.commit` or can be forced
by using :meth:`.Session.expire_all`.
* The method supports multiple table updates, as
detailed in :ref:`multi_table_updates`, and this behavior does
extend to support updates of joined-inheritance and other multiple
table mappings. However, the **join condition of an inheritance
mapper is currently not automatically rendered**.
Care must be taken in any multiple-table update to explicitly
include the joining condition between those tables, even in mappings
where this is normally automatic.
E.g. if a class ``Engineer`` subclasses ``Employee``, an UPDATE of
the ``Engineer`` local table using criteria against the ``Employee``
local table might look like::
session.query(Engineer).\\
filter(Engineer.id == Employee.id).\\
filter(Employee.name == 'dilbert').\\
update({"engineer_type": "programmer"})
* The :meth:`.MapperEvents.before_update` and
:meth:`.MapperEvents.after_update`
events are **not** invoked from this method. Instead, the
:meth:`.SessionEvents.after_bulk_update` method is provided to act
upon a mass UPDATE of entity rows.
.. seealso::
:meth:`.Query.delete`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
update_op = persistence.BulkUpdate.factory(
self, synchronize_session, values)
update_op.exec_()
return update_op.rowcount
def _compile_context(self, labels=True):
context = QueryContext(self)
if context.statement is not None:
return context
context.labels = labels
context._for_update_arg = self._for_update_arg
for entity in self._entities:
entity.setup_context(self, context)
for rec in context.create_eager_joins:
strategy = rec[0]
strategy(*rec[1:])
if context.from_clause:
# "load from explicit FROMs" mode,
# i.e. when select_from() or join() is used
context.froms = list(context.from_clause)
else:
# "load from discrete FROMs" mode,
# i.e. when each _MappedEntity has its own FROM
context.froms = context.froms
if self._enable_single_crit:
self._adjust_for_single_inheritance(context)
if not context.primary_columns:
if self._only_load_props:
raise sa_exc.InvalidRequestError(
"No column-based properties specified for "
"refresh operation. Use session.expire() "
"to reload collections and related items.")
else:
raise sa_exc.InvalidRequestError(
"Query contains no columns with which to "
"SELECT from.")
if context.multi_row_eager_loaders and self._should_nest_selectable:
context.statement = self._compound_eager_statement(context)
else:
context.statement = self._simple_statement(context)
return context
def _compound_eager_statement(self, context):
# for eager joins present and LIMIT/OFFSET/DISTINCT,
# wrap the query inside a select,
# then append eager joins onto that
if context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
else:
context.order_by = None
order_by_col_expr = []
inner = sql.select(
context.primary_columns + order_by_col_expr,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
# TODO: this order_by is only needed if
# LIMIT/OFFSET is present in self._select_args,
# else the application on the outside is enough
order_by=context.order_by,
**self._select_args
)
for hint in self._with_hints:
inner = inner.with_hint(*hint)
if self._correlate:
inner = inner.correlate(*self._correlate)
inner = inner.alias()
equivs = self.__all_equivs()
context.adapter = sql_util.ColumnAdapter(inner, equivs)
statement = sql.select(
[inner] + context.secondary_columns,
use_labels=context.labels)
statement._for_update_arg = context._for_update_arg
from_clause = inner
for eager_join in context.eager_joins.values():
# EagerLoader places a 'stop_on' attribute on the join,
# giving us a marker as to where the "splice point" of
# the join should be
from_clause = sql_util.splice_joins(
from_clause,
eager_join, eager_join.stop_on)
statement.append_from(from_clause)
if context.order_by:
statement.append_order_by(
*context.adapter.copy_and_process(
context.order_by
)
)
statement.append_order_by(*context.eager_order_by)
return statement
def _simple_statement(self, context):
if not context.order_by:
context.order_by = None
if self._distinct and context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
context.primary_columns += order_by_col_expr
context.froms += tuple(context.eager_joins.values())
statement = sql.select(
context.primary_columns +
context.secondary_columns,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
order_by=context.order_by,
**self._select_args
)
statement._for_update_arg = context._for_update_arg
for hint in self._with_hints:
statement = statement.with_hint(*hint)
if self._correlate:
statement = statement.correlate(*self._correlate)
if context.eager_order_by:
statement.append_order_by(*context.eager_order_by)
return statement
def _adjust_for_single_inheritance(self, context):
"""Apply single-table-inheritance filtering.
For all distinct single-table-inheritance mappers represented in
the columns clause of this query, add criterion to the WHERE
clause of the given QueryContext such that only the appropriate
subtypes are selected from the total results.
"""
for (ext_info, adapter) in set(self._mapper_adapter_map.values()):
if ext_info in self._join_entities:
continue
single_crit = ext_info.mapper._single_table_criterion
if single_crit is not None:
if adapter:
single_crit = adapter.traverse(single_crit)
single_crit = self._adapt_clause(single_crit, False, False)
context.whereclause = sql.and_(
sql.True_._ifnone(context.whereclause),
single_crit)
def __str__(self):
return str(self._compile_context().statement)
from ..sql.selectable import ForUpdateArg
class LockmodeArg(ForUpdateArg):
@classmethod
def parse_legacy_query(self, mode):
if mode in (None, False):
return None
if mode == "read":
read = True
nowait = False
elif mode == "update":
read = nowait = False
elif mode == "update_nowait":
nowait = True
read = False
else:
raise sa_exc.ArgumentError(
"Unknown with_lockmode argument: %r" % mode)
return LockmodeArg(read=read, nowait=nowait)
class _QueryEntity(object):
"""represent an entity column returned within a Query result."""
def __new__(cls, *args, **kwargs):
if cls is _QueryEntity:
entity = args[1]
if not isinstance(entity, util.string_types) and \
_is_mapped_class(entity):
cls = _MapperEntity
elif isinstance(entity, Bundle):
cls = _BundleEntity
else:
cls = _ColumnEntity
return object.__new__(cls)
def _clone(self):
q = self.__class__.__new__(self.__class__)
q.__dict__ = self.__dict__.copy()
return q
class _MapperEntity(_QueryEntity):
"""mapper/class/AliasedClass entity"""
def __init__(self, query, entity):
if not query._primary_entity:
query._primary_entity = self
query._entities.append(self)
self.entities = [entity]
self.expr = entity
supports_single_entity = True
def setup_entity(self, ext_info, aliased_adapter):
self.mapper = ext_info.mapper
self.aliased_adapter = aliased_adapter
self.selectable = ext_info.selectable
self.is_aliased_class = ext_info.is_aliased_class
self._with_polymorphic = ext_info.with_polymorphic_mappers
self._polymorphic_discriminator = \
ext_info.polymorphic_on
self.entity_zero = ext_info
if ext_info.is_aliased_class:
self._label_name = self.entity_zero.name
else:
self._label_name = self.mapper.class_.__name__
self.path = self.entity_zero._path_registry
def set_with_polymorphic(self, query, cls_or_mappers,
selectable, polymorphic_on):
"""Receive an update from a call to query.with_polymorphic().
Note the newer style of using a free standing with_polymporphic()
construct doesn't make use of this method.
"""
if self.is_aliased_class:
# TODO: invalidrequest ?
raise NotImplementedError(
"Can't use with_polymorphic() against "
"an Aliased object"
)
if cls_or_mappers is None:
query._reset_polymorphic_adapter(self.mapper)
return
mappers, from_obj = self.mapper._with_polymorphic_args(
cls_or_mappers, selectable)
self._with_polymorphic = mappers
self._polymorphic_discriminator = polymorphic_on
self.selectable = from_obj
query._mapper_loads_polymorphically_with(
self.mapper, sql_util.ColumnAdapter(
from_obj, self.mapper._equivalent_columns))
filter_fn = id
@property
def type(self):
return self.mapper.class_
@property
def entity_zero_or_selectable(self):
return self.entity_zero
def corresponds_to(self, entity):
if entity.is_aliased_class:
if self.is_aliased_class:
if entity._base_alias is self.entity_zero._base_alias:
return True
return False
elif self.is_aliased_class:
if self.entity_zero._use_mapper_path:
return entity in self._with_polymorphic
else:
return entity is self.entity_zero
return entity.common_parent(self.entity_zero)
def adapt_to_selectable(self, query, sel):
query._entities.append(self)
def _get_entity_clauses(self, query, context):
adapter = None
if not self.is_aliased_class:
if query._polymorphic_adapters:
adapter = query._polymorphic_adapters.get(self.mapper, None)
else:
adapter = self.aliased_adapter
if adapter:
if query._from_obj_alias:
ret = adapter.wrap(query._from_obj_alias)
else:
ret = adapter
else:
ret = query._from_obj_alias
return ret
def row_processor(self, query, context, result):
adapter = self._get_entity_clauses(query, context)
if context.adapter and adapter:
adapter = adapter.wrap(context.adapter)
elif not adapter:
adapter = context.adapter
# polymorphic mappers which have concrete tables in
# their hierarchy usually
# require row aliasing unconditionally.
if not adapter and self.mapper._requires_row_aliasing:
adapter = sql_util.ColumnAdapter(
self.selectable,
self.mapper._equivalent_columns)
if query._primary_entity is self:
_instance = loading.instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
only_load_props=query._only_load_props,
refresh_state=context.refresh_state,
polymorphic_discriminator=self._polymorphic_discriminator
)
else:
_instance = loading.instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
polymorphic_discriminator=self._polymorphic_discriminator
)
return _instance, self._label_name
def setup_context(self, query, context):
adapter = self._get_entity_clauses(query, context)
# if self._adapted_selectable is None:
context.froms += (self.selectable,)
if context.order_by is False and self.mapper.order_by:
context.order_by = self.mapper.order_by
# apply adaptation to the mapper's order_by if needed.
if adapter:
context.order_by = adapter.adapt_list(
util.to_list(
context.order_by
)
)
if self._with_polymorphic:
poly_properties = self.mapper._iterate_polymorphic_properties(
self._with_polymorphic)
else:
poly_properties = self.mapper._polymorphic_properties
for value in poly_properties:
if query._only_load_props and \
value.key not in query._only_load_props:
continue
value.setup(
context,
self,
self.path,
adapter,
only_load_props=query._only_load_props,
column_collection=context.primary_columns
)
if self._polymorphic_discriminator is not None and \
self._polymorphic_discriminator \
is not self.mapper.polymorphic_on:
if adapter:
pd = adapter.columns[self._polymorphic_discriminator]
else:
pd = self._polymorphic_discriminator
context.primary_columns.append(pd)
def __str__(self):
return str(self.mapper)
@inspection._self_inspects
class Bundle(object):
"""A grouping of SQL expressions that are returned by a :class:`.Query`
under one namespace.
The :class:`.Bundle` essentially allows nesting of the tuple-based
results returned by a column-oriented :class:`.Query` object. It also
is extensible via simple subclassing, where the primary capability
to override is that of how the set of expressions should be returned,
allowing post-processing as well as custom return types, without
involving ORM identity-mapped classes.
.. versionadded:: 0.9.0
.. seealso::
:ref:`bundles`
"""
single_entity = False
"""If True, queries for a single Bundle will be returned as a single
entity, rather than an element within a keyed tuple."""
def __init__(self, name, *exprs, **kw):
"""Construct a new :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
for row in session.query(bn).filter(
bn.c.x == 5).filter(bn.c.y == 4):
print(row.mybundle.x, row.mybundle.y)
:param name: name of the bundle.
:param \*exprs: columns or SQL expressions comprising the bundle.
:param single_entity=False: if True, rows for this :class:`.Bundle`
can be returned as a "single entity" outside of any enclosing tuple
in the same manner as a mapped entity.
"""
self.name = self._label = name
self.exprs = exprs
self.c = self.columns = ColumnCollection()
self.columns.update((getattr(col, "key", col._label), col)
for col in exprs)
self.single_entity = kw.pop('single_entity', self.single_entity)
columns = None
"""A namespace of SQL expressions referred to by this :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
q = sess.query(bn).filter(bn.c.x == 5)
Nesting of bundles is also supported::
b1 = Bundle("b1",
Bundle('b2', MyClass.a, MyClass.b),
Bundle('b3', MyClass.x, MyClass.y)
)
q = sess.query(b1).filter(
b1.c.b2.c.a == 5).filter(b1.c.b3.c.y == 9)
.. seealso::
:attr:`.Bundle.c`
"""
c = None
"""An alias for :attr:`.Bundle.columns`."""
def _clone(self):
cloned = self.__class__.__new__(self.__class__)
cloned.__dict__.update(self.__dict__)
return cloned
def __clause_element__(self):
return expression.ClauseList(group=False, *self.c)
@property
def clauses(self):
return self.__clause_element__().clauses
def label(self, name):
"""Provide a copy of this :class:`.Bundle` passing a new label."""
cloned = self._clone()
cloned.name = name
return cloned
def create_row_processor(self, query, procs, labels):
"""Produce the "row processing" function for this :class:`.Bundle`.
May be overridden by subclasses.
.. seealso::
:ref:`bundles` - includes an example of subclassing.
"""
keyed_tuple = util.lightweight_named_tuple('result', labels)
def proc(row):
return keyed_tuple([proc(row) for proc in procs])
return proc
class _BundleEntity(_QueryEntity):
def __init__(self, query, bundle, setup_entities=True):
query._entities.append(self)
self.bundle = self.expr = bundle
self.type = type(bundle)
self._label_name = bundle.name
self._entities = []
if setup_entities:
for expr in bundle.exprs:
if isinstance(expr, Bundle):
_BundleEntity(self, expr)
else:
_ColumnEntity(self, expr, namespace=self)
self.entities = ()
self.filter_fn = lambda item: item
self.supports_single_entity = self.bundle.single_entity
@property
def entity_zero(self):
for ent in self._entities:
ezero = ent.entity_zero
if ezero is not None:
return ezero
else:
return None
def corresponds_to(self, entity):
# TODO: this seems to have no effect for
# _ColumnEntity either
return False
@property
def entity_zero_or_selectable(self):
for ent in self._entities:
ezero = ent.entity_zero_or_selectable
if ezero is not None:
return ezero
else:
return None
def adapt_to_selectable(self, query, sel):
c = _BundleEntity(query, self.bundle, setup_entities=False)
# c._label_name = self._label_name
# c.entity_zero = self.entity_zero
# c.entities = self.entities
for ent in self._entities:
ent.adapt_to_selectable(c, sel)
def setup_entity(self, ext_info, aliased_adapter):
for ent in self._entities:
ent.setup_entity(ext_info, aliased_adapter)
def setup_context(self, query, context):
for ent in self._entities:
ent.setup_context(query, context)
def row_processor(self, query, context, result):
procs, labels = zip(
*[ent.row_processor(query, context, result)
for ent in self._entities]
)
proc = self.bundle.create_row_processor(query, procs, labels)
return proc, self._label_name
class _ColumnEntity(_QueryEntity):
"""Column/expression based entity."""
def __init__(self, query, column, namespace=None):
self.expr = column
self.namespace = namespace
if isinstance(column, util.string_types):
column = sql.literal_column(column)
self._label_name = column.name
elif isinstance(column, (
attributes.QueryableAttribute,
interfaces.PropComparator
)):
self._label_name = column.key
column = column._query_clause_element()
else:
self._label_name = getattr(column, 'key', None)
if not isinstance(column, expression.ColumnElement) and \
hasattr(column, '_select_iterable'):
for c in column._select_iterable:
if c is column:
break
_ColumnEntity(query, c, namespace=column)
else:
return
elif isinstance(column, Bundle):
_BundleEntity(query, column)
return
if not isinstance(column, sql.ColumnElement):
raise sa_exc.InvalidRequestError(
"SQL expression, column, or mapped entity "
"expected - got '%r'" % (column, )
)
self.type = type_ = column.type
if type_.hashable:
self.filter_fn = lambda item: item
else:
counter = util.counter()
self.filter_fn = lambda item: counter()
# If the Column is unnamed, give it a
# label() so that mutable column expressions
# can be located in the result even
# if the expression's identity has been changed
# due to adaption.
if not column._label and not getattr(column, 'is_literal', False):
column = column.label(self._label_name)
query._entities.append(self)
self.column = column
self.froms = set()
# look for ORM entities represented within the
# given expression. Try to count only entities
# for columns whose FROM object is in the actual list
# of FROMs for the overall expression - this helps
# subqueries which were built from ORM constructs from
# leaking out their entities into the main select construct
self.actual_froms = actual_froms = set(column._from_objects)
self.entities = util.OrderedSet(
elem._annotations['parententity']
for elem in visitors.iterate(column, {})
if 'parententity' in elem._annotations
and actual_froms.intersection(elem._from_objects)
)
if self.entities:
self.entity_zero = list(self.entities)[0]
elif self.namespace is not None:
self.entity_zero = self.namespace
else:
self.entity_zero = None
supports_single_entity = False
@property
def entity_zero_or_selectable(self):
if self.entity_zero is not None:
return self.entity_zero
elif self.actual_froms:
return list(self.actual_froms)[0]
else:
return None
def adapt_to_selectable(self, query, sel):
c = _ColumnEntity(query, sel.corresponding_column(self.column))
c._label_name = self._label_name
c.entity_zero = self.entity_zero
c.entities = self.entities
def setup_entity(self, ext_info, aliased_adapter):
if 'selectable' not in self.__dict__:
self.selectable = ext_info.selectable
self.froms.add(ext_info.selectable)
def corresponds_to(self, entity):
# TODO: just returning False here,
# no tests fail
if self.entity_zero is None:
return False
elif _is_aliased_class(entity):
# TODO: polymorphic subclasses ?
return entity is self.entity_zero
else:
return not _is_aliased_class(self.entity_zero) and \
entity.common_parent(self.entity_zero)
def _resolve_expr_against_query_aliases(self, query, expr, context):
return query._adapt_clause(expr, False, True)
def row_processor(self, query, context, result):
column = self._resolve_expr_against_query_aliases(
query, self.column, context)
if context.adapter:
column = context.adapter.columns[column]
getter = result._getter(column)
return getter, self._label_name
def setup_context(self, query, context):
column = self._resolve_expr_against_query_aliases(
query, self.column, context)
context.froms += tuple(self.froms)
context.primary_columns.append(column)
def __str__(self):
return str(self.column)
class QueryContext(object):
multi_row_eager_loaders = False
adapter = None
froms = ()
for_update = None
def __init__(self, query):
if query._statement is not None:
if isinstance(query._statement, expression.SelectBase) and \
not query._statement._textual and \
not query._statement.use_labels:
self.statement = query._statement.apply_labels()
else:
self.statement = query._statement
else:
self.statement = None
self.from_clause = query._from_obj
self.whereclause = query._criterion
self.order_by = query._order_by
self.query = query
self.session = query.session
self.populate_existing = query._populate_existing
self.invoke_all_eagers = query._invoke_all_eagers
self.version_check = query._version_check
self.refresh_state = query._refresh_state
self.primary_columns = []
self.secondary_columns = []
self.eager_order_by = []
self.eager_joins = {}
self.create_eager_joins = []
self.propagate_options = set(o for o in query._with_options if
o.propagate_to_loaders)
self.attributes = query._attributes.copy()
class AliasOption(interfaces.MapperOption):
def __init__(self, alias):
"""Return a :class:`.MapperOption` that will indicate to the :class:`.Query`
that the main table has been aliased.
This is a seldom-used option to suit the
very rare case that :func:`.contains_eager`
is being used in conjunction with a user-defined SELECT
statement that aliases the parent table. E.g.::
# define an aliased UNION called 'ulist'
ulist = users.select(users.c.user_id==7).\\
union(users.select(users.c.user_id>7)).\\
alias('ulist')
# add on an eager load of "addresses"
statement = ulist.outerjoin(addresses).\\
select().apply_labels()
# create query, indicating "ulist" will be an
# alias for the main table, "addresses"
# property should be eager loaded
query = session.query(User).options(
contains_alias(ulist),
contains_eager(User.addresses))
# then get results via the statement
results = query.from_statement(statement).all()
:param alias: is the string name of an alias, or a
:class:`~.sql.expression.Alias` object representing
the alias.
"""
self.alias = alias
def process_query(self, query):
if isinstance(self.alias, util.string_types):
alias = query._mapper_zero().mapped_table.alias(self.alias)
else:
alias = self.alias
query._from_obj_alias = sql_util.ColumnAdapter(alias)
|
__init__
|
Construct a new :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
for row in session.query(bn).filter(
bn.c.x == 5).filter(bn.c.y == 4):
print(row.mybundle.x, row.mybundle.y)
:param name: name of the bundle.
:param \*exprs: columns or SQL expressions comprising the bundle.
:param single_entity=False: if True, rows for this :class:`.Bundle`
can be returned as a "single entity" outside of any enclosing tuple
in the same manner as a mapped entity.
|
# orm/query.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The Query class and support.
Defines the :class:`.Query` class, the central
construct used by the ORM to construct database queries.
The :class:`.Query` class should not be confused with the
:class:`.Select` class, which defines database
SELECT operations at the SQL (non-ORM) level. ``Query`` differs from
``Select`` in that it returns ORM-mapped objects and interacts with an
ORM session, whereas the ``Select`` construct interacts directly with the
database to return iterable result sets.
"""
from itertools import chain
from . import (
attributes, interfaces, object_mapper, persistence,
exc as orm_exc, loading
)
from .base import _entity_descriptor, _is_aliased_class, \
_is_mapped_class, _orm_columns, _generative
from .path_registry import PathRegistry
from .util import (
AliasedClass, ORMAdapter, join as orm_join, with_parent, aliased
)
from .. import sql, util, log, exc as sa_exc, inspect, inspection
from ..sql.expression import _interpret_as_from
from ..sql import (
util as sql_util,
expression, visitors
)
from ..sql.base import ColumnCollection
from . import properties
__all__ = ['Query', 'QueryContext', 'aliased']
_path_registry = PathRegistry.root
@inspection._self_inspects
@log.class_logger
class Query(object):
"""ORM-level SQL construction object.
:class:`.Query` is the source of all SELECT statements generated by the
ORM, both those formulated by end-user query operations as well as by
high level internal operations such as related collection loading. It
features a generative interface whereby successive calls return a new
:class:`.Query` object, a copy of the former with additional
criteria and options associated with it.
:class:`.Query` objects are normally initially generated using the
:meth:`~.Session.query` method of :class:`.Session`. For a full
walkthrough of :class:`.Query` usage, see the
:ref:`ormtutorial_toplevel`.
"""
_enable_eagerloads = True
_enable_assertions = True
_with_labels = False
_criterion = None
_yield_per = None
_order_by = False
_group_by = False
_having = None
_distinct = False
_prefixes = None
_offset = None
_limit = None
_for_update_arg = None
_statement = None
_correlate = frozenset()
_populate_existing = False
_invoke_all_eagers = True
_version_check = False
_autoflush = True
_only_load_props = None
_refresh_state = None
_from_obj = ()
_join_entities = ()
_select_from_entity = None
_mapper_adapter_map = {}
_filter_aliases = None
_from_obj_alias = None
_joinpath = _joinpoint = util.immutabledict()
_execution_options = util.immutabledict()
_params = util.immutabledict()
_attributes = util.immutabledict()
_with_options = ()
_with_hints = ()
_enable_single_crit = True
_current_path = _path_registry
def __init__(self, entities, session=None):
self.session = session
self._polymorphic_adapters = {}
self._set_entities(entities)
def _set_entities(self, entities, entity_wrapper=None):
if entity_wrapper is None:
entity_wrapper = _QueryEntity
self._entities = []
self._primary_entity = None
for ent in util.to_list(entities):
entity_wrapper(self, ent)
self._set_entity_selectables(self._entities)
def _set_entity_selectables(self, entities):
self._mapper_adapter_map = d = self._mapper_adapter_map.copy()
for ent in entities:
for entity in ent.entities:
if entity not in d:
ext_info = inspect(entity)
if not ext_info.is_aliased_class and \
ext_info.mapper.with_polymorphic:
if ext_info.mapper.mapped_table not in \
self._polymorphic_adapters:
self._mapper_loads_polymorphically_with(
ext_info.mapper,
sql_util.ColumnAdapter(
ext_info.selectable,
ext_info.mapper._equivalent_columns
)
)
aliased_adapter = None
elif ext_info.is_aliased_class:
aliased_adapter = ext_info._adapter
else:
aliased_adapter = None
d[entity] = (
ext_info,
aliased_adapter
)
ent.setup_entity(*d[entity])
def _mapper_loads_polymorphically_with(self, mapper, adapter):
for m2 in mapper._with_polymorphic_mappers or [mapper]:
self._polymorphic_adapters[m2] = adapter
for m in m2.iterate_to_root():
self._polymorphic_adapters[m.local_table] = adapter
def _set_select_from(self, obj, set_base_alias):
fa = []
select_from_alias = None
for from_obj in obj:
info = inspect(from_obj)
if hasattr(info, 'mapper') and \
(info.is_mapper or info.is_aliased_class):
self._select_from_entity = from_obj
if set_base_alias:
raise sa_exc.ArgumentError(
"A selectable (FromClause) instance is "
"expected when the base alias is being set.")
fa.append(info.selectable)
elif not info.is_selectable:
raise sa_exc.ArgumentError(
"argument is not a mapped class, mapper, "
"aliased(), or FromClause instance.")
else:
if isinstance(from_obj, expression.SelectBase):
from_obj = from_obj.alias()
if set_base_alias:
select_from_alias = from_obj
fa.append(from_obj)
self._from_obj = tuple(fa)
if set_base_alias and \
len(self._from_obj) == 1 and \
isinstance(select_from_alias, expression.Alias):
equivs = self.__all_equivs()
self._from_obj_alias = sql_util.ColumnAdapter(
self._from_obj[0], equivs)
def _reset_polymorphic_adapter(self, mapper):
for m2 in mapper._with_polymorphic_mappers:
self._polymorphic_adapters.pop(m2, None)
for m in m2.iterate_to_root():
self._polymorphic_adapters.pop(m.local_table, None)
def _adapt_polymorphic_element(self, element):
if "parententity" in element._annotations:
search = element._annotations['parententity']
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
if isinstance(element, expression.FromClause):
search = element
elif hasattr(element, 'table'):
search = element.table
else:
return None
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
def _adapt_col_list(self, cols):
return [
self._adapt_clause(
expression._literal_as_label_reference(o),
True, True)
for o in cols
]
@_generative()
def _adapt_all_clauses(self):
self._orm_only_adapt = False
def _adapt_clause(self, clause, as_filter, orm_only):
"""Adapt incoming clauses to transformations which
have been applied within this query."""
adapters = []
# do we adapt all expression elements or only those
# tagged as 'ORM' constructs ?
orm_only = getattr(self, '_orm_only_adapt', orm_only)
if as_filter and self._filter_aliases:
for fa in self._filter_aliases._visitor_iterator:
adapters.append(
(
orm_only, fa.replace
)
)
if self._from_obj_alias:
# for the "from obj" alias, apply extra rule to the
# 'ORM only' check, if this query were generated from a
# subquery of itself, i.e. _from_selectable(), apply adaption
# to all SQL constructs.
adapters.append(
(
getattr(self, '_orm_only_from_obj_alias', orm_only),
self._from_obj_alias.replace
)
)
if self._polymorphic_adapters:
adapters.append(
(
orm_only, self._adapt_polymorphic_element
)
)
if not adapters:
return clause
def replace(elem):
for _orm_only, adapter in adapters:
# if 'orm only', look for ORM annotations
# in the element before adapting.
if not _orm_only or \
'_orm_adapt' in elem._annotations or \
"parententity" in elem._annotations:
e = adapter(elem)
if e is not None:
return e
return visitors.replacement_traverse(
clause,
{},
replace
)
def _entity_zero(self):
return self._entities[0]
def _mapper_zero(self):
return self._select_from_entity or \
self._entity_zero().entity_zero
@property
def _mapper_entities(self):
for ent in self._entities:
if isinstance(ent, _MapperEntity):
yield ent
def _joinpoint_zero(self):
return self._joinpoint.get(
'_joinpoint_entity',
self._mapper_zero()
)
def _mapper_zero_or_none(self):
if self._primary_entity:
return self._primary_entity.mapper
else:
return None
def _only_mapper_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._mapper_zero()
def _only_full_mapper_zero(self, methname):
if self._entities != [self._primary_entity]:
raise sa_exc.InvalidRequestError(
"%s() can only be used against "
"a single mapped class." % methname)
return self._primary_entity.entity_zero
def _only_entity_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._entity_zero()
def __all_equivs(self):
equivs = {}
for ent in self._mapper_entities:
equivs.update(ent.mapper._equivalent_columns)
return equivs
def _get_condition(self):
return self._no_criterion_condition(
"get", order_by=False, distinct=False)
def _get_existing_condition(self):
self._no_criterion_assertion("get", order_by=False, distinct=False)
def _no_criterion_assertion(self, meth, order_by=True, distinct=True):
if not self._enable_assertions:
return
if self._criterion is not None or \
self._statement is not None or self._from_obj or \
self._limit is not None or self._offset is not None or \
self._group_by or (order_by and self._order_by) or \
(distinct and self._distinct):
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
def _no_criterion_condition(self, meth, order_by=True, distinct=True):
self._no_criterion_assertion(meth, order_by, distinct)
self._from_obj = ()
self._statement = self._criterion = None
self._order_by = self._group_by = self._distinct = False
def _no_clauseelement_condition(self, meth):
if not self._enable_assertions:
return
if self._order_by:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
self._no_criterion_condition(meth)
def _no_statement_condition(self, meth):
if not self._enable_assertions:
return
if self._statement is not None:
raise sa_exc.InvalidRequestError(
("Query.%s() being called on a Query with an existing full "
"statement - can't apply criterion.") % meth)
def _no_limit_offset(self, meth):
if not self._enable_assertions:
return
if self._limit is not None or self._offset is not None:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a Query which already has LIMIT "
"or OFFSET applied. To modify the row-limited results of a "
" Query, call from_self() first. "
"Otherwise, call %s() before limit() or offset() "
"are applied."
% (meth, meth)
)
def _no_select_modifiers(self, meth):
if not self._enable_assertions:
return
for attr, methname, notset in (
('_limit', 'limit()', None),
('_offset', 'offset()', None),
('_order_by', 'order_by()', False),
('_group_by', 'group_by()', False),
('_distinct', 'distinct()', False),
):
if getattr(self, attr) is not notset:
raise sa_exc.InvalidRequestError(
"Can't call Query.%s() when %s has been called" %
(meth, methname)
)
def _get_options(self, populate_existing=None,
version_check=None,
only_load_props=None,
refresh_state=None):
if populate_existing:
self._populate_existing = populate_existing
if version_check:
self._version_check = version_check
if refresh_state:
self._refresh_state = refresh_state
if only_load_props:
self._only_load_props = set(only_load_props)
return self
def _clone(self):
cls = self.__class__
q = cls.__new__(cls)
q.__dict__ = self.__dict__.copy()
return q
@property
def statement(self):
"""The full SELECT statement represented by this Query.
The statement by default will not have disambiguating labels
applied to the construct unless with_labels(True) is called
first.
"""
stmt = self._compile_context(labels=self._with_labels).\
statement
if self._params:
stmt = stmt.params(self._params)
# TODO: there's no tests covering effects of
# the annotation not being there
return stmt._annotate({'no_replacement_traverse': True})
def subquery(self, name=None, with_labels=False, reduce_columns=False):
"""return the full SELECT statement represented by
this :class:`.Query`, embedded within an :class:`.Alias`.
Eager JOIN generation within the query is disabled.
:param name: string name to be assigned as the alias;
this is passed through to :meth:`.FromClause.alias`.
If ``None``, a name will be deterministically generated
at compile time.
:param with_labels: if True, :meth:`.with_labels` will be called
on the :class:`.Query` first to apply table-qualified labels
to all columns.
:param reduce_columns: if True, :meth:`.Select.reduce_columns` will
be called on the resulting :func:`.select` construct,
to remove same-named columns where one also refers to the other
via foreign key or WHERE clause equivalence.
.. versionchanged:: 0.8 the ``with_labels`` and ``reduce_columns``
keyword arguments were added.
"""
q = self.enable_eagerloads(False)
if with_labels:
q = q.with_labels()
q = q.statement
if reduce_columns:
q = q.reduce_columns()
return q.alias(name=name)
def cte(self, name=None, recursive=False):
"""Return the full SELECT statement represented by this
:class:`.Query` represented as a common table expression (CTE).
.. versionadded:: 0.7.6
Parameters and usage are the same as those of the
:meth:`.SelectBase.cte` method; see that method for
further details.
Here is the `Postgresql WITH
RECURSIVE example
<http://www.postgresql.org/docs/8.4/static/queries-with.html>`_.
Note that, in this example, the ``included_parts`` cte and the
``incl_alias`` alias of it are Core selectables, which
means the columns are accessed via the ``.c.`` attribute. The
``parts_alias`` object is an :func:`.orm.aliased` instance of the
``Part`` entity, so column-mapped attributes are available
directly::
from sqlalchemy.orm import aliased
class Part(Base):
__tablename__ = 'part'
part = Column(String, primary_key=True)
sub_part = Column(String, primary_key=True)
quantity = Column(Integer)
included_parts = session.query(
Part.sub_part,
Part.part,
Part.quantity).\\
filter(Part.part=="our part").\\
cte(name="included_parts", recursive=True)
incl_alias = aliased(included_parts, name="pr")
parts_alias = aliased(Part, name="p")
included_parts = included_parts.union_all(
session.query(
parts_alias.sub_part,
parts_alias.part,
parts_alias.quantity).\\
filter(parts_alias.part==incl_alias.c.sub_part)
)
q = session.query(
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).
label('total_quantity')
).\\
group_by(included_parts.c.sub_part)
.. seealso::
:meth:`.SelectBase.cte`
"""
return self.enable_eagerloads(False).\
statement.cte(name=name, recursive=recursive)
def label(self, name):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted
to a scalar subquery with a label of the given name.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.label`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.label(name)
def as_scalar(self):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted to a scalar subquery.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.as_scalar`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.as_scalar()
@property
def selectable(self):
"""Return the :class:`.Select` object emitted by this :class:`.Query`.
Used for :func:`.inspect` compatibility, this is equivalent to::
query.enable_eagerloads(False).with_labels().statement
"""
return self.__clause_element__()
def __clause_element__(self):
return self.enable_eagerloads(False).with_labels().statement
@_generative()
def enable_eagerloads(self, value):
"""Control whether or not eager joins and subqueries are
rendered.
When set to False, the returned Query will not render
eager joins regardless of :func:`~sqlalchemy.orm.joinedload`,
:func:`~sqlalchemy.orm.subqueryload` options
or mapper-level ``lazy='joined'``/``lazy='subquery'``
configurations.
This is used primarily when nesting the Query's
statement into a subquery or other
selectable, or when using :meth:`.Query.yield_per`.
"""
self._enable_eagerloads = value
def _no_yield_per(self, message):
raise sa_exc.InvalidRequestError(
"The yield_per Query option is currently not "
"compatible with %s eager loading. Please "
"specify lazyload('*') or query.enable_eagerloads(False) in "
"order to "
"proceed with query.yield_per()." % message)
@_generative()
def with_labels(self):
"""Apply column labels to the return value of Query.statement.
Indicates that this Query's `statement` accessor should return
a SELECT statement that applies labels to all columns in the
form <tablename>_<columnname>; this is commonly used to
disambiguate columns from multiple tables which have the same
name.
When the `Query` actually issues SQL to load rows, it always
uses column labeling.
"""
self._with_labels = True
@_generative()
def enable_assertions(self, value):
"""Control whether assertions are generated.
When set to False, the returned Query will
not assert its state before certain operations,
including that LIMIT/OFFSET has not been applied
when filter() is called, no criterion exists
when get() is called, and no "from_statement()"
exists when filter()/order_by()/group_by() etc.
is called. This more permissive mode is used by
custom Query subclasses to specify criterion or
other modifiers outside of the usual usage patterns.
Care should be taken to ensure that the usage
pattern is even possible. A statement applied
by from_statement() will override any criterion
set by filter() or order_by(), for example.
"""
self._enable_assertions = value
@property
def whereclause(self):
"""A readonly attribute which returns the current WHERE criterion for
this Query.
This returned value is a SQL expression construct, or ``None`` if no
criterion has been established.
"""
return self._criterion
@_generative()
def _with_current_path(self, path):
"""indicate that this query applies to objects loaded
within a certain path.
Used by deferred loaders (see strategies.py) which transfer
query options from an originating query to a newly generated
query intended for the deferred load.
"""
self._current_path = path
@_generative(_no_clauseelement_condition)
def with_polymorphic(self,
cls_or_mappers,
selectable=None,
polymorphic_on=None):
"""Load columns for inheriting classes.
:meth:`.Query.with_polymorphic` applies transformations
to the "main" mapped class represented by this :class:`.Query`.
The "main" mapped class here means the :class:`.Query`
object's first argument is a full class, i.e.
``session.query(SomeClass)``. These transformations allow additional
tables to be present in the FROM clause so that columns for a
joined-inheritance subclass are available in the query, both for the
purposes of load-time efficiency as well as the ability to use
these columns at query time.
See the documentation section :ref:`with_polymorphic` for
details on how this method is used.
.. versionchanged:: 0.8
A new and more flexible function
:func:`.orm.with_polymorphic` supersedes
:meth:`.Query.with_polymorphic`, as it can apply the equivalent
functionality to any set of columns or classes in the
:class:`.Query`, not just the "zero mapper". See that
function for a description of arguments.
"""
if not self._primary_entity:
raise sa_exc.InvalidRequestError(
"No primary mapper set up for this Query.")
entity = self._entities[0]._clone()
self._entities = [entity] + self._entities[1:]
entity.set_with_polymorphic(self,
cls_or_mappers,
selectable=selectable,
polymorphic_on=polymorphic_on)
@_generative()
def yield_per(self, count):
"""Yield only ``count`` rows at a time.
The purpose of this method is when fetching very large result sets
(> 10K rows), to batch results in sub-collections and yield them
out partially, so that the Python interpreter doesn't need to declare
very large areas of memory which is both time consuming and leads
to excessive memory use. The performance from fetching hundreds of
thousands of rows can often double when a suitable yield-per setting
(e.g. approximately 1000) is used, even with DBAPIs that buffer
rows (which are most).
The :meth:`.Query.yield_per` method **is not compatible with most
eager loading schemes, including subqueryload and joinedload with
collections**. For this reason, it may be helpful to disable
eager loads, either unconditionally with
:meth:`.Query.enable_eagerloads`::
q = sess.query(Object).yield_per(100).enable_eagerloads(False)
Or more selectively using :func:`.lazyload`; such as with
an asterisk to specify the default loader scheme::
q = sess.query(Object).yield_per(100).\\
options(lazyload('*'), joinedload(Object.some_related))
.. warning::
Use this method with caution; if the same instance is
present in more than one batch of rows, end-user changes
to attributes will be overwritten.
In particular, it's usually impossible to use this setting
with eagerly loaded collections (i.e. any lazy='joined' or
'subquery') since those collections will be cleared for a
new load when encountered in a subsequent result batch.
In the case of 'subquery' loading, the full result for all
rows is fetched which generally defeats the purpose of
:meth:`~sqlalchemy.orm.query.Query.yield_per`.
Also note that while
:meth:`~sqlalchemy.orm.query.Query.yield_per` will set the
``stream_results`` execution option to True, currently
this is only understood by
:mod:`~sqlalchemy.dialects.postgresql.psycopg2` dialect
which will stream results using server side cursors
instead of pre-buffer all rows for this query. Other
DBAPIs **pre-buffer all rows** before making them
available. The memory use of raw database rows is much less
than that of an ORM-mapped object, but should still be taken into
consideration when benchmarking.
.. seealso::
:meth:`.Query.enable_eagerloads`
"""
self._yield_per = count
self._execution_options = self._execution_options.union(
{"stream_results": True})
def get(self, ident):
"""Return an instance based on the given primary key identifier,
or ``None`` if not found.
E.g.::
my_user = session.query(User).get(5)
some_object = session.query(VersionedFoo).get((5, 10))
:meth:`~.Query.get` is special in that it provides direct
access to the identity map of the owning :class:`.Session`.
If the given primary key identifier is present
in the local identity map, the object is returned
directly from this collection and no SQL is emitted,
unless the object has been marked fully expired.
If not present,
a SELECT is performed in order to locate the object.
:meth:`~.Query.get` also will perform a check if
the object is present in the identity map and
marked as expired - a SELECT
is emitted to refresh the object as well as to
ensure that the row is still present.
If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
:meth:`~.Query.get` is only used to return a single
mapped instance, not multiple instances or
individual column constructs, and strictly
on a single primary key value. The originating
:class:`.Query` must be constructed in this way,
i.e. against a single mapped entity,
with no additional filtering criterion. Loading
options via :meth:`~.Query.options` may be applied
however, and will be used if the object is not
yet locally present.
A lazy-loading, many-to-one attribute configured
by :func:`.relationship`, using a simple
foreign-key-to-primary-key criterion, will also use an
operation equivalent to :meth:`~.Query.get` in order to retrieve
the target value from the local identity map
before querying the database. See :doc:`/orm/loading`
for further details on relationship loading.
:param ident: A scalar or tuple value representing
the primary key. For a composite primary key,
the order of identifiers corresponds in most cases
to that of the mapped :class:`.Table` object's
primary key columns. For a :func:`.mapper` that
was given the ``primary key`` argument during
construction, the order of identifiers corresponds
to the elements present in this collection.
:return: The object instance, or ``None``.
"""
# convert composite types to individual args
if hasattr(ident, '__composite_values__'):
ident = ident.__composite_values__()
ident = util.to_list(ident)
mapper = self._only_full_mapper_zero("get")
if len(ident) != len(mapper.primary_key):
raise sa_exc.InvalidRequestError(
"Incorrect number of values in identifier to formulate "
"primary key for query.get(); primary key columns are %s" %
','.join("'%s'" % c for c in mapper.primary_key))
key = mapper.identity_key_from_primary_key(ident)
if not self._populate_existing and \
not mapper.always_refresh and \
self._for_update_arg is None:
instance = loading.get_from_identity(
self.session, key, attributes.PASSIVE_OFF)
if instance is not None:
self._get_existing_condition()
# reject calls for id in identity map but class
# mismatch.
if not issubclass(instance.__class__, mapper.class_):
return None
return instance
return loading.load_on_ident(self, key)
@_generative()
def correlate(self, *args):
"""Return a :class:`.Query` construct which will correlate the given
FROM clauses to that of an enclosing :class:`.Query` or
:func:`~.expression.select`.
The method here accepts mapped classes, :func:`.aliased` constructs,
and :func:`.mapper` constructs as arguments, which are resolved into
expression constructs, in addition to appropriate expression
constructs.
The correlation arguments are ultimately passed to
:meth:`.Select.correlate` after coercion to expression constructs.
The correlation arguments take effect in such cases
as when :meth:`.Query.from_self` is used, or when
a subquery as returned by :meth:`.Query.subquery` is
embedded in another :func:`~.expression.select` construct.
"""
self._correlate = self._correlate.union(
_interpret_as_from(s)
if s is not None else None
for s in args)
@_generative()
def autoflush(self, setting):
"""Return a Query with a specific 'autoflush' setting.
Note that a Session with autoflush=False will
not autoflush, even if this flag is set to True at the
Query level. Therefore this flag is usually used only
to disable autoflush for a specific Query.
"""
self._autoflush = setting
@_generative()
def populate_existing(self):
"""Return a :class:`.Query` that will expire and refresh all instances
as they are loaded, or reused from the current :class:`.Session`.
:meth:`.populate_existing` does not improve behavior when
the ORM is used normally - the :class:`.Session` object's usual
behavior of maintaining a transaction and expiring all attributes
after rollback or commit handles object state automatically.
This method is not intended for general use.
"""
self._populate_existing = True
@_generative()
def _with_invoke_all_eagers(self, value):
"""Set the 'invoke all eagers' flag which causes joined- and
subquery loaders to traverse into already-loaded related objects
and collections.
Default is that of :attr:`.Query._invoke_all_eagers`.
"""
self._invoke_all_eagers = value
def with_parent(self, instance, property=None):
"""Add filtering criterion that relates the given instance
to a child object or collection, using its attribute state
as well as an established :func:`.relationship()`
configuration.
The method uses the :func:`.with_parent` function to generate
the clause, the result of which is passed to :meth:`.Query.filter`.
Parameters are the same as :func:`.with_parent`, with the exception
that the given property can be None, in which case a search is
performed against this :class:`.Query` object's target mapper.
"""
if property is None:
mapper = object_mapper(instance)
for prop in mapper.iterate_properties:
if isinstance(prop, properties.RelationshipProperty) and \
prop.mapper is self._mapper_zero():
property = prop
break
else:
raise sa_exc.InvalidRequestError(
"Could not locate a property which relates instances "
"of class '%s' to instances of class '%s'" %
(
self._mapper_zero().class_.__name__,
instance.__class__.__name__)
)
return self.filter(with_parent(instance, property))
@_generative()
def add_entity(self, entity, alias=None):
"""add a mapped entity to the list of result columns
to be returned."""
if alias is not None:
entity = aliased(entity, alias)
self._entities = list(self._entities)
m = _MapperEntity(self, entity)
self._set_entity_selectables([m])
@_generative()
def with_session(self, session):
"""Return a :class:`.Query` that will use the given :class:`.Session`.
"""
self.session = session
def from_self(self, *entities):
"""return a Query that selects from this Query's
SELECT statement.
\*entities - optional list of entities which will replace
those being selected.
"""
fromclause = self.with_labels().enable_eagerloads(False).\
statement.correlate(None)
q = self._from_selectable(fromclause)
q._enable_single_crit = False
if entities:
q._set_entities(entities)
return q
@_generative()
def _set_enable_single_crit(self, val):
self._enable_single_crit = val
@_generative()
def _from_selectable(self, fromclause):
for attr in (
'_statement', '_criterion',
'_order_by', '_group_by',
'_limit', '_offset',
'_joinpath', '_joinpoint',
'_distinct', '_having',
'_prefixes',
):
self.__dict__.pop(attr, None)
self._set_select_from([fromclause], True)
# this enables clause adaptation for non-ORM
# expressions.
self._orm_only_from_obj_alias = False
old_entities = self._entities
self._entities = []
for e in old_entities:
e.adapt_to_selectable(self, self._from_obj[0])
def values(self, *columns):
"""Return an iterator yielding result tuples corresponding
to the given list of columns"""
if not columns:
return iter(())
q = self._clone()
q._set_entities(columns, entity_wrapper=_ColumnEntity)
if not q._yield_per:
q._yield_per = 10
return iter(q)
_values = values
def value(self, column):
"""Return a scalar result corresponding to the given
column expression."""
try:
return next(self.values(column))[0]
except StopIteration:
return None
@_generative()
def with_entities(self, *entities):
"""Return a new :class:`.Query` replacing the SELECT list with the
given entities.
e.g.::
# Users, filtered on some arbitrary criterion
# and then ordered by related email address
q = session.query(User).\\
join(User.address).\\
filter(User.name.like('%ed%')).\\
order_by(Address.email)
# given *only* User.id==5, Address.email, and 'q', what
# would the *next* User in the result be ?
subq = q.with_entities(Address.email).\\
order_by(None).\\
filter(User.id==5).\\
subquery()
q = q.join((subq, subq.c.email < Address.email)).\\
limit(1)
.. versionadded:: 0.6.5
"""
self._set_entities(entities)
@_generative()
def add_columns(self, *column):
"""Add one or more column expressions to the list
of result columns to be returned."""
self._entities = list(self._entities)
l = len(self._entities)
for c in column:
_ColumnEntity(self, c)
# _ColumnEntity may add many entities if the
# given arg is a FROM clause
self._set_entity_selectables(self._entities[l:])
@util.pending_deprecation("0.7",
":meth:`.add_column` is superseded "
"by :meth:`.add_columns`",
False)
def add_column(self, column):
"""Add a column expression to the list of result columns to be
returned.
Pending deprecation: :meth:`.add_column` will be superseded by
:meth:`.add_columns`.
"""
return self.add_columns(column)
def options(self, *args):
"""Return a new Query object, applying the given list of
mapper options.
Most supplied options regard changing how column- and
relationship-mapped attributes are loaded. See the sections
:ref:`deferred` and :doc:`/orm/loading` for reference
documentation.
"""
return self._options(False, *args)
def _conditional_options(self, *args):
return self._options(True, *args)
@_generative()
def _options(self, conditional, *args):
# most MapperOptions write to the '_attributes' dictionary,
# so copy that as well
self._attributes = self._attributes.copy()
opts = tuple(util.flatten_iterator(args))
self._with_options = self._with_options + opts
if conditional:
for opt in opts:
opt.process_query_conditionally(self)
else:
for opt in opts:
opt.process_query(self)
def with_transformation(self, fn):
"""Return a new :class:`.Query` object transformed by
the given function.
E.g.::
def filter_something(criterion):
def transform(q):
return q.filter(criterion)
return transform
q = q.with_transformation(filter_something(x==5))
This allows ad-hoc recipes to be created for :class:`.Query`
objects. See the example at :ref:`hybrid_transformers`.
.. versionadded:: 0.7.4
"""
return fn(self)
@_generative()
def with_hint(self, selectable, text, dialect_name='*'):
"""Add an indexing or other executional context
hint for the given entity or selectable to
this :class:`.Query`.
Functionality is passed straight through to
:meth:`~sqlalchemy.sql.expression.Select.with_hint`,
with the addition that ``selectable`` can be a
:class:`.Table`, :class:`.Alias`, or ORM entity / mapped class
/etc.
.. seealso::
:meth:`.Query.with_statement_hint`
"""
if selectable is not None:
selectable = inspect(selectable).selectable
self._with_hints += ((selectable, text, dialect_name),)
def with_statement_hint(self, text, dialect_name='*'):
"""add a statement hint to this :class:`.Select`.
This method is similar to :meth:`.Select.with_hint` except that
it does not require an individual table, and instead applies to the
statement as a whole.
This feature calls down into :meth:`.Select.with_statement_hint`.
.. versionadded:: 1.0.0
.. seealso::
:meth:`.Query.with_hint`
"""
return self.with_hint(None, text, dialect_name)
@_generative()
def execution_options(self, **kwargs):
""" Set non-SQL options which take effect during execution.
The options are the same as those accepted by
:meth:`.Connection.execution_options`.
Note that the ``stream_results`` execution option is enabled
automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()`
method is used.
"""
self._execution_options = self._execution_options.union(kwargs)
@_generative()
def with_lockmode(self, mode):
"""Return a new :class:`.Query` object with the specified "locking mode",
which essentially refers to the ``FOR UPDATE`` clause.
.. deprecated:: 0.9.0 superseded by :meth:`.Query.with_for_update`.
:param mode: a string representing the desired locking mode.
Valid values are:
* ``None`` - translates to no lockmode
* ``'update'`` - translates to ``FOR UPDATE``
(standard SQL, supported by most dialects)
* ``'update_nowait'`` - translates to ``FOR UPDATE NOWAIT``
(supported by Oracle, PostgreSQL 8.1 upwards)
* ``'read'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL),
and ``FOR SHARE`` (for PostgreSQL)
.. seealso::
:meth:`.Query.with_for_update` - improved API for
specifying the ``FOR UPDATE`` clause.
"""
self._for_update_arg = LockmodeArg.parse_legacy_query(mode)
@_generative()
def with_for_update(self, read=False, nowait=False, of=None):
"""return a new :class:`.Query` with the specified options for the
``FOR UPDATE`` clause.
The behavior of this method is identical to that of
:meth:`.SelectBase.with_for_update`. When called with no arguments,
the resulting ``SELECT`` statement will have a ``FOR UPDATE`` clause
appended. When additional arguments are specified, backend-specific
options such as ``FOR UPDATE NOWAIT`` or ``LOCK IN SHARE MODE``
can take effect.
E.g.::
q = sess.query(User).with_for_update(nowait=True, of=User)
The above query on a Postgresql backend will render like::
SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT
.. versionadded:: 0.9.0 :meth:`.Query.with_for_update` supersedes
the :meth:`.Query.with_lockmode` method.
.. seealso::
:meth:`.GenerativeSelect.with_for_update` - Core level method with
full argument and behavioral description.
"""
self._for_update_arg = LockmodeArg(read=read, nowait=nowait, of=of)
@_generative()
def params(self, *args, **kwargs):
"""add values for bind parameters which may have been
specified in filter().
parameters may be specified using \**kwargs, or optionally a single
dictionary as the first positional argument. The reason for both is
that \**kwargs is convenient, however some parameter dictionaries
contain unicode keys in which case \**kwargs cannot be used.
"""
if len(args) == 1:
kwargs.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary.")
self._params = self._params.copy()
self._params.update(kwargs)
@_generative(_no_statement_condition, _no_limit_offset)
def filter(self, *criterion):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using SQL expressions.
e.g.::
session.query(MyClass).filter(MyClass.name == 'some name')
Multiple criteria are joined together by AND::
session.query(MyClass).\\
filter(MyClass.name == 'some name', MyClass.id > 5)
The criterion is any SQL expression object applicable to the
WHERE clause of a select. String expressions are coerced
into SQL expression constructs via the :func:`.text` construct.
.. versionchanged:: 0.7.5
Multiple criteria joined by AND.
.. seealso::
:meth:`.Query.filter_by` - filter on keyword expressions.
"""
for criterion in list(criterion):
criterion = expression._expression_literal_as_text(criterion)
criterion = self._adapt_clause(criterion, True, True)
if self._criterion is not None:
self._criterion = self._criterion & criterion
else:
self._criterion = criterion
def filter_by(self, **kwargs):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using keyword expressions.
e.g.::
session.query(MyClass).filter_by(name = 'some name')
Multiple criteria are joined together by AND::
session.query(MyClass).\\
filter_by(name = 'some name', id = 5)
The keyword expressions are extracted from the primary
entity of the query, or the last entity that was the
target of a call to :meth:`.Query.join`.
.. seealso::
:meth:`.Query.filter` - filter on SQL expressions.
"""
clauses = [_entity_descriptor(self._joinpoint_zero(), key) == value
for key, value in kwargs.items()]
return self.filter(sql.and_(*clauses))
@_generative(_no_statement_condition, _no_limit_offset)
def order_by(self, *criterion):
"""apply one or more ORDER BY criterion to the query and return
the newly resulting ``Query``
All existing ORDER BY settings can be suppressed by
passing ``None`` - this will suppress any ORDER BY configured
on mappers as well.
Alternatively, an existing ORDER BY setting on the Query
object can be entirely cancelled by passing ``False``
as the value - use this before calling methods where
an ORDER BY is invalid.
"""
if len(criterion) == 1:
if criterion[0] is False:
if '_order_by' in self.__dict__:
del self._order_by
return
if criterion[0] is None:
self._order_by = None
return
criterion = self._adapt_col_list(criterion)
if self._order_by is False or self._order_by is None:
self._order_by = criterion
else:
self._order_by = self._order_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def group_by(self, *criterion):
"""apply one or more GROUP BY criterion to the query and return
the newly resulting :class:`.Query`"""
criterion = list(chain(*[_orm_columns(c) for c in criterion]))
criterion = self._adapt_col_list(criterion)
if self._group_by is False:
self._group_by = criterion
else:
self._group_by = self._group_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def having(self, criterion):
"""apply a HAVING criterion to the query and return the
newly resulting :class:`.Query`.
:meth:`~.Query.having` is used in conjunction with
:meth:`~.Query.group_by`.
HAVING criterion makes it possible to use filters on aggregate
functions like COUNT, SUM, AVG, MAX, and MIN, eg.::
q = session.query(User.id).\\
join(User.addresses).\\
group_by(User.id).\\
having(func.count(Address.id) > 2)
"""
criterion = expression._expression_literal_as_text(criterion)
if criterion is not None and \
not isinstance(criterion, sql.ClauseElement):
raise sa_exc.ArgumentError(
"having() argument must be of type "
"sqlalchemy.sql.ClauseElement or string")
criterion = self._adapt_clause(criterion, True, True)
if self._having is not None:
self._having = self._having & criterion
else:
self._having = criterion
def union(self, *q):
"""Produce a UNION of this Query against one or more queries.
e.g.::
q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar')
q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo')
q3 = q1.union(q2)
The method accepts multiple Query objects so as to control
the level of nesting. A series of ``union()`` calls such as::
x.union(y).union(z).all()
will nest on each ``union()``, and produces::
SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION
SELECT * FROM y) UNION SELECT * FROM Z)
Whereas::
x.union(y, z).all()
produces::
SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION
SELECT * FROM Z)
Note that many database backends do not allow ORDER BY to
be rendered on a query called within UNION, EXCEPT, etc.
To disable all ORDER BY clauses including those configured
on mappers, issue ``query.order_by(None)`` - the resulting
:class:`.Query` object will not render ORDER BY within
its SELECT statement.
"""
return self._from_selectable(
expression.union(*([self] + list(q))))
def union_all(self, *q):
"""Produce a UNION ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.union_all(*([self] + list(q)))
)
def intersect(self, *q):
"""Produce an INTERSECT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect(*([self] + list(q)))
)
def intersect_all(self, *q):
"""Produce an INTERSECT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect_all(*([self] + list(q)))
)
def except_(self, *q):
"""Produce an EXCEPT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_(*([self] + list(q)))
)
def except_all(self, *q):
"""Produce an EXCEPT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_all(*([self] + list(q)))
)
def join(self, *props, **kwargs):
"""Create a SQL JOIN against this :class:`.Query` object's criterion
and apply generatively, returning the newly resulting :class:`.Query`.
**Simple Relationship Joins**
Consider a mapping between two classes ``User`` and ``Address``,
with a relationship ``User.addresses`` representing a collection
of ``Address`` objects associated with each ``User``. The most
common usage of :meth:`~.Query.join` is to create a JOIN along this
relationship, using the ``User.addresses`` attribute as an indicator
for how this should occur::
q = session.query(User).join(User.addresses)
Where above, the call to :meth:`~.Query.join` along ``User.addresses``
will result in SQL equivalent to::
SELECT user.* FROM user JOIN address ON user.id = address.user_id
In the above example we refer to ``User.addresses`` as passed to
:meth:`~.Query.join` as the *on clause*, that is, it indicates
how the "ON" portion of the JOIN should be constructed. For a
single-entity query such as the one above (i.e. we start by selecting
only from ``User`` and nothing else), the relationship can also be
specified by its string name::
q = session.query(User).join("addresses")
:meth:`~.Query.join` can also accommodate multiple
"on clause" arguments to produce a chain of joins, such as below
where a join across four related entities is constructed::
q = session.query(User).join("orders", "items", "keywords")
The above would be shorthand for three separate calls to
:meth:`~.Query.join`, each using an explicit attribute to indicate
the source entity::
q = session.query(User).\\
join(User.orders).\\
join(Order.items).\\
join(Item.keywords)
**Joins to a Target Entity or Selectable**
A second form of :meth:`~.Query.join` allows any mapped entity
or core selectable construct as a target. In this usage,
:meth:`~.Query.join` will attempt
to create a JOIN along the natural foreign key relationship between
two entities::
q = session.query(User).join(Address)
The above calling form of :meth:`~.Query.join` will raise an error if
either there are no foreign keys between the two entities, or if
there are multiple foreign key linkages between them. In the
above calling form, :meth:`~.Query.join` is called upon to
create the "on clause" automatically for us. The target can
be any mapped entity or selectable, such as a :class:`.Table`::
q = session.query(User).join(addresses_table)
**Joins to a Target with an ON Clause**
The third calling form allows both the target entity as well
as the ON clause to be passed explicitly. Suppose for
example we wanted to join to ``Address`` twice, using
an alias the second time. We use :func:`~sqlalchemy.orm.aliased`
to create a distinct alias of ``Address``, and join
to it using the ``target, onclause`` form, so that the
alias can be specified explicitly as the target along with
the relationship to instruct how the ON clause should proceed::
a_alias = aliased(Address)
q = session.query(User).\\
join(User.addresses).\\
join(a_alias, User.addresses).\\
filter(Address.email_address=='[email protected]').\\
filter(a_alias.email_address=='[email protected]')
Where above, the generated SQL would be similar to::
SELECT user.* FROM user
JOIN address ON user.id = address.user_id
JOIN address AS address_1 ON user.id=address_1.user_id
WHERE address.email_address = :email_address_1
AND address_1.email_address = :email_address_2
The two-argument calling form of :meth:`~.Query.join`
also allows us to construct arbitrary joins with SQL-oriented
"on clause" expressions, not relying upon configured relationships
at all. Any SQL expression can be passed as the ON clause
when using the two-argument form, which should refer to the target
entity in some way as well as an applicable source entity::
q = session.query(User).join(Address, User.id==Address.user_id)
.. versionchanged:: 0.7
In SQLAlchemy 0.6 and earlier, the two argument form of
:meth:`~.Query.join` requires the usage of a tuple:
``query(User).join((Address, User.id==Address.user_id))``\ .
This calling form is accepted in 0.7 and further, though
is not necessary unless multiple join conditions are passed to
a single :meth:`~.Query.join` call, which itself is also not
generally necessary as it is now equivalent to multiple
calls (this wasn't always the case).
**Advanced Join Targeting and Adaption**
There is a lot of flexibility in what the "target" can be when using
:meth:`~.Query.join`. As noted previously, it also accepts
:class:`.Table` constructs and other selectables such as
:func:`.alias` and :func:`.select` constructs, with either the one
or two-argument forms::
addresses_q = select([Address.user_id]).\\
where(Address.email_address.endswith("@bar.com")).\\
alias()
q = session.query(User).\\
join(addresses_q, addresses_q.c.user_id==User.id)
:meth:`~.Query.join` also features the ability to *adapt* a
:meth:`~sqlalchemy.orm.relationship` -driven ON clause to the target
selectable. Below we construct a JOIN from ``User`` to a subquery
against ``Address``, allowing the relationship denoted by
``User.addresses`` to *adapt* itself to the altered target::
address_subq = session.query(Address).\\
filter(Address.email_address == '[email protected]').\\
subquery()
q = session.query(User).join(address_subq, User.addresses)
Producing SQL similar to::
SELECT user.* FROM user
JOIN (
SELECT address.id AS id,
address.user_id AS user_id,
address.email_address AS email_address
FROM address
WHERE address.email_address = :email_address_1
) AS anon_1 ON user.id = anon_1.user_id
The above form allows one to fall back onto an explicit ON
clause at any time::
q = session.query(User).\\
join(address_subq, User.id==address_subq.c.user_id)
**Controlling what to Join From**
While :meth:`~.Query.join` exclusively deals with the "right"
side of the JOIN, we can also control the "left" side, in those
cases where it's needed, using :meth:`~.Query.select_from`.
Below we construct a query against ``Address`` but can still
make usage of ``User.addresses`` as our ON clause by instructing
the :class:`.Query` to select first from the ``User``
entity::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which will produce SQL similar to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
**Constructing Aliases Anonymously**
:meth:`~.Query.join` can construct anonymous aliases
using the ``aliased=True`` flag. This feature is useful
when a query is being joined algorithmically, such as
when querying self-referentially to an arbitrary depth::
q = session.query(Node).\\
join("children", "children", aliased=True)
When ``aliased=True`` is used, the actual "alias" construct
is not explicitly available. To work with it, methods such as
:meth:`.Query.filter` will adapt the incoming entity to
the last join point::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1')
When using automatic aliasing, the ``from_joinpoint=True``
argument can allow a multi-node join to be broken into
multiple calls to :meth:`~.Query.join`, so that
each path along the way can be further filtered::
q = session.query(Node).\\
join("children", aliased=True).\\
filter(Node.name='child 1').\\
join("children", aliased=True, from_joinpoint=True).\\
filter(Node.name == 'grandchild 1')
The filtering aliases above can then be reset back to the
original ``Node`` entity using :meth:`~.Query.reset_joinpoint`::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1').\\
reset_joinpoint().\\
filter(Node.name == 'parent 1)
For an example of ``aliased=True``, see the distribution
example :ref:`examples_xmlpersistence` which illustrates
an XPath-like query system using algorithmic joins.
:param \*props: A collection of one or more join conditions,
each consisting of a relationship-bound attribute or string
relationship name representing an "on clause", or a single
target entity, or a tuple in the form of ``(target, onclause)``.
A special two-argument calling form of the form ``target, onclause``
is also accepted.
:param aliased=False: If True, indicate that the JOIN target should be
anonymously aliased. Subsequent calls to :meth:`~.Query.filter`
and similar will adapt the incoming criterion to the target
alias, until :meth:`~.Query.reset_joinpoint` is called.
:param isouter=False: If True, the join used will be a left outer join,
just as if the :meth:`.Query.outerjoin` method were called. This
flag is here to maintain consistency with the same flag as accepted
by :meth:`.FromClause.join` and other Core constructs.
.. versionadded:: 1.0.0
:param from_joinpoint=False: When using ``aliased=True``, a setting
of True here will cause the join to be from the most recent
joined target, rather than starting back from the original
FROM clauses of the query.
.. seealso::
:ref:`ormtutorial_joins` in the ORM tutorial.
:ref:`inheritance_toplevel` for details on how
:meth:`~.Query.join` is used for inheritance relationships.
:func:`.orm.join` - a standalone ORM-level join function,
used internally by :meth:`.Query.join`, which in previous
SQLAlchemy versions was the primary ORM-level joining interface.
"""
aliased, from_joinpoint, isouter = kwargs.pop('aliased', False),\
kwargs.pop('from_joinpoint', False),\
kwargs.pop('isouter', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
','.join(kwargs.keys))
isouter = isouter
return self._join(props,
outerjoin=isouter, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def outerjoin(self, *props, **kwargs):
"""Create a left outer join against this ``Query`` object's criterion
and apply generatively, returning the newly resulting ``Query``.
Usage is the same as the ``join()`` method.
"""
aliased, from_joinpoint = kwargs.pop('aliased', False), \
kwargs.pop('from_joinpoint', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
','.join(kwargs))
return self._join(props,
outerjoin=True, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def _update_joinpoint(self, jp):
self._joinpoint = jp
# copy backwards to the root of the _joinpath
# dict, so that no existing dict in the path is mutated
while 'prev' in jp:
f, prev = jp['prev']
prev = prev.copy()
prev[f] = jp
jp['prev'] = (f, prev)
jp = prev
self._joinpath = jp
@_generative(_no_statement_condition, _no_limit_offset)
def _join(self, keys, outerjoin, create_aliases, from_joinpoint):
"""consumes arguments from join() or outerjoin(), places them into a
consistent format with which to form the actual JOIN constructs.
"""
if not from_joinpoint:
self._reset_joinpoint()
if len(keys) == 2 and \
isinstance(keys[0], (expression.FromClause,
type, AliasedClass)) and \
isinstance(keys[1], (str, expression.ClauseElement,
interfaces.PropComparator)):
# detect 2-arg form of join and
# convert to a tuple.
keys = (keys,)
for arg1 in util.to_list(keys):
if isinstance(arg1, tuple):
# "tuple" form of join, multiple
# tuples are accepted as well. The simpler
# "2-arg" form is preferred. May deprecate
# the "tuple" usage.
arg1, arg2 = arg1
else:
arg2 = None
# determine onclause/right_entity. there
# is a little bit of legacy behavior still at work here
# which means they might be in either order. may possibly
# lock this down to (right_entity, onclause) in 0.6.
if isinstance(
arg1, (interfaces.PropComparator, util.string_types)):
right_entity, onclause = arg2, arg1
else:
right_entity, onclause = arg1, arg2
left_entity = prop = None
if isinstance(onclause, interfaces.PropComparator):
of_type = getattr(onclause, '_of_type', None)
else:
of_type = None
if isinstance(onclause, util.string_types):
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity, onclause)
onclause = descriptor
# check for q.join(Class.propname, from_joinpoint=True)
# and Class is that of the current joinpoint
elif from_joinpoint and \
isinstance(onclause, interfaces.PropComparator):
left_entity = onclause._parententity
info = inspect(self._joinpoint_zero())
left_mapper, left_selectable, left_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', None)
if left_mapper is left_entity:
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity,
onclause.key)
onclause = descriptor
if isinstance(onclause, interfaces.PropComparator):
if right_entity is None:
if of_type:
right_entity = of_type
else:
right_entity = onclause.property.mapper
left_entity = onclause._parententity
prop = onclause.property
if not isinstance(onclause, attributes.QueryableAttribute):
onclause = prop
if not create_aliases:
# check for this path already present.
# don't render in that case.
edge = (left_entity, right_entity, prop.key)
if edge in self._joinpoint:
# The child's prev reference might be stale --
# it could point to a parent older than the
# current joinpoint. If this is the case,
# then we need to update it and then fix the
# tree's spine with _update_joinpoint. Copy
# and then mutate the child, which might be
# shared by a different query object.
jp = self._joinpoint[edge].copy()
jp['prev'] = (edge, self._joinpoint)
self._update_joinpoint(jp)
continue
elif onclause is not None and right_entity is None:
# TODO: no coverage here
raise NotImplementedError("query.join(a==b) not supported.")
self._join_left_to_right(
left_entity,
right_entity, onclause,
outerjoin, create_aliases, prop)
def _join_left_to_right(self, left, right,
onclause, outerjoin, create_aliases, prop):
"""append a JOIN to the query's from clause."""
self._polymorphic_adapters = self._polymorphic_adapters.copy()
if left is None:
if self._from_obj:
left = self._from_obj[0]
elif self._entities:
left = self._entities[0].entity_zero_or_selectable
if left is None:
raise sa_exc.InvalidRequestError(
"Don't know how to join from %s; please use "
"select_from() to establish the left "
"entity/selectable of this join" % self._entities[0])
if left is right and \
not create_aliases:
raise sa_exc.InvalidRequestError(
"Can't construct a join from %s to %s, they "
"are the same entity" %
(left, right))
l_info = inspect(left)
r_info = inspect(right)
overlap = False
if not create_aliases:
right_mapper = getattr(r_info, "mapper", None)
# if the target is a joined inheritance mapping,
# be more liberal about auto-aliasing.
if right_mapper and (
right_mapper.with_polymorphic or
isinstance(right_mapper.mapped_table, expression.Join)
):
for from_obj in self._from_obj or [l_info.selectable]:
if sql_util.selectables_overlap(
l_info.selectable, from_obj) and \
sql_util.selectables_overlap(
from_obj, r_info.selectable):
overlap = True
break
if (overlap or not create_aliases) and \
l_info.selectable is r_info.selectable:
raise sa_exc.InvalidRequestError(
"Can't join table/selectable '%s' to itself" %
l_info.selectable)
right, onclause = self._prepare_right_side(
r_info, right, onclause,
create_aliases,
prop, overlap)
# if joining on a MapperProperty path,
# track the path to prevent redundant joins
if not create_aliases and prop:
self._update_joinpoint({
'_joinpoint_entity': right,
'prev': ((left, right, prop.key), self._joinpoint)
})
else:
self._joinpoint = {'_joinpoint_entity': right}
self._join_to_left(l_info, left, right, onclause, outerjoin)
def _prepare_right_side(self, r_info, right, onclause, create_aliases,
prop, overlap):
info = r_info
right_mapper, right_selectable, right_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', False)
if right_mapper:
self._join_entities += (info, )
if right_mapper and prop and \
not right_mapper.common_parent(prop.mapper):
raise sa_exc.InvalidRequestError(
"Join target %s does not correspond to "
"the right side of join condition %s" % (right, onclause)
)
if not right_mapper and prop:
right_mapper = prop.mapper
need_adapter = False
if right_mapper and right is right_selectable:
if not right_selectable.is_derived_from(
right_mapper.mapped_table):
raise sa_exc.InvalidRequestError(
"Selectable '%s' is not derived from '%s'" %
(right_selectable.description,
right_mapper.mapped_table.description))
if isinstance(right_selectable, expression.SelectBase):
# TODO: this isn't even covered now!
right_selectable = right_selectable.alias()
need_adapter = True
right = aliased(right_mapper, right_selectable)
aliased_entity = right_mapper and \
not right_is_aliased and \
(
right_mapper.with_polymorphic and isinstance(
right_mapper._with_polymorphic_selectable,
expression.Alias)
or
overlap # test for overlap:
# orm/inheritance/relationships.py
# SelfReferentialM2MTest
)
if not need_adapter and (create_aliases or aliased_entity):
right = aliased(right, flat=True)
need_adapter = True
# if an alias() of the right side was generated here,
# apply an adapter to all subsequent filter() calls
# until reset_joinpoint() is called.
if need_adapter:
self._filter_aliases = ORMAdapter(
right,
equivalents=right_mapper and
right_mapper._equivalent_columns or {},
chain_to=self._filter_aliases)
# if the onclause is a ClauseElement, adapt it with any
# adapters that are in place right now
if isinstance(onclause, expression.ClauseElement):
onclause = self._adapt_clause(onclause, True, True)
# if an alias() on the right side was generated,
# which is intended to wrap a the right side in a subquery,
# ensure that columns retrieved from this target in the result
# set are also adapted.
if aliased_entity and not create_aliases:
self._mapper_loads_polymorphically_with(
right_mapper,
ORMAdapter(
right,
equivalents=right_mapper._equivalent_columns
)
)
return right, onclause
def _join_to_left(self, l_info, left, right, onclause, outerjoin):
info = l_info
left_mapper = getattr(info, 'mapper', None)
left_selectable = info.selectable
if self._from_obj:
replace_clause_index, clause = sql_util.find_join_source(
self._from_obj,
left_selectable)
if clause is not None:
try:
clause = orm_join(clause,
right,
onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = \
self._from_obj[:replace_clause_index] + \
(clause, ) + \
self._from_obj[replace_clause_index + 1:]
return
if left_mapper:
for ent in self._entities:
if ent.corresponds_to(left):
clause = ent.selectable
break
else:
clause = left
else:
clause = left_selectable
assert clause is not None
try:
clause = orm_join(clause, right, onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = self._from_obj + (clause,)
def _reset_joinpoint(self):
self._joinpoint = self._joinpath
self._filter_aliases = None
@_generative(_no_statement_condition)
def reset_joinpoint(self):
"""Return a new :class:`.Query`, where the "join point" has
been reset back to the base FROM entities of the query.
This method is usually used in conjunction with the
``aliased=True`` feature of the :meth:`~.Query.join`
method. See the example in :meth:`~.Query.join` for how
this is used.
"""
self._reset_joinpoint()
@_generative(_no_clauseelement_condition)
def select_from(self, *from_obj):
"""Set the FROM clause of this :class:`.Query` explicitly.
:meth:`.Query.select_from` is often used in conjunction with
:meth:`.Query.join` in order to control which entity is selected
from on the "left" side of the join.
The entity or selectable object here effectively replaces the
"left edge" of any calls to :meth:`~.Query.join`, when no
joinpoint is otherwise established - usually, the default "join
point" is the leftmost entity in the :class:`~.Query` object's
list of entities to be selected.
A typical example::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which produces SQL equivalent to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
:param \*from_obj: collection of one or more entities to apply
to the FROM clause. Entities can be mapped classes,
:class:`.AliasedClass` objects, :class:`.Mapper` objects
as well as core :class:`.FromClause` elements like subqueries.
.. versionchanged:: 0.9
This method no longer applies the given FROM object
to be the selectable from which matching entities
select from; the :meth:`.select_entity_from` method
now accomplishes this. See that method for a description
of this behavior.
.. seealso::
:meth:`~.Query.join`
:meth:`.Query.select_entity_from`
"""
self._set_select_from(from_obj, False)
@_generative(_no_clauseelement_condition)
def select_entity_from(self, from_obj):
"""Set the FROM clause of this :class:`.Query` to a
core selectable, applying it as a replacement FROM clause
for corresponding mapped entities.
This method is similar to the :meth:`.Query.select_from`
method, in that it sets the FROM clause of the query. However,
where :meth:`.Query.select_from` only affects what is placed
in the FROM, this method also applies the given selectable
to replace the FROM which the selected entities would normally
select from.
The given ``from_obj`` must be an instance of a :class:`.FromClause`,
e.g. a :func:`.select` or :class:`.Alias` construct.
An example would be a :class:`.Query` that selects ``User`` entities,
but uses :meth:`.Query.select_entity_from` to have the entities
selected from a :func:`.select` construct instead of the
base ``user`` table::
select_stmt = select([User]).where(User.id == 7)
q = session.query(User).\\
select_entity_from(select_stmt).\\
filter(User.name == 'ed')
The query generated will select ``User`` entities directly
from the given :func:`.select` construct, and will be::
SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name
FROM (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE anon_1.name = :name_1
Notice above that even the WHERE criterion was "adapted" such that
the ``anon_1`` subquery effectively replaces all references to the
``user`` table, except for the one that it refers to internally.
Compare this to :meth:`.Query.select_from`, which as of
version 0.9, does not affect existing entities. The
statement below::
q = session.query(User).\\
select_from(select_stmt).\\
filter(User.name == 'ed')
Produces SQL where both the ``user`` table as well as the
``select_stmt`` construct are present as separate elements
in the FROM clause. No "adaptation" of the ``user`` table
is applied::
SELECT "user".id AS user_id, "user".name AS user_name
FROM "user", (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE "user".name = :name_1
:meth:`.Query.select_entity_from` maintains an older
behavior of :meth:`.Query.select_from`. In modern usage,
similar results can also be achieved using :func:`.aliased`::
select_stmt = select([User]).where(User.id == 7)
user_from_select = aliased(User, select_stmt.alias())
q = session.query(user_from_select)
:param from_obj: a :class:`.FromClause` object that will replace
the FROM clause of this :class:`.Query`.
.. seealso::
:meth:`.Query.select_from`
.. versionadded:: 0.8
:meth:`.Query.select_entity_from` was added to specify
the specific behavior of entity replacement, however
the :meth:`.Query.select_from` maintains this behavior
as well until 0.9.
"""
self._set_select_from([from_obj], True)
def __getitem__(self, item):
if isinstance(item, slice):
start, stop, step = util.decode_slice(item)
if isinstance(stop, int) and \
isinstance(start, int) and \
stop - start <= 0:
return []
# perhaps we should execute a count() here so that we
# can still use LIMIT/OFFSET ?
elif (isinstance(start, int) and start < 0) \
or (isinstance(stop, int) and stop < 0):
return list(self)[item]
res = self.slice(start, stop)
if step is not None:
return list(res)[None:None:item.step]
else:
return list(res)
else:
if item == -1:
return list(self)[-1]
else:
return list(self[item:item + 1])[0]
@_generative(_no_statement_condition)
def slice(self, start, stop):
"""apply LIMIT/OFFSET to the ``Query`` based on a "
"range and return the newly resulting ``Query``."""
if start is not None and stop is not None:
self._offset = (self._offset or 0) + start
self._limit = stop - start
elif start is None and stop is not None:
self._limit = stop
elif start is not None and stop is None:
self._offset = (self._offset or 0) + start
if self._offset == 0:
self._offset = None
@_generative(_no_statement_condition)
def limit(self, limit):
"""Apply a ``LIMIT`` to the query and return the newly resulting
``Query``.
"""
self._limit = limit
@_generative(_no_statement_condition)
def offset(self, offset):
"""Apply an ``OFFSET`` to the query and return the newly resulting
``Query``.
"""
self._offset = offset
@_generative(_no_statement_condition)
def distinct(self, *criterion):
"""Apply a ``DISTINCT`` to the query and return the newly resulting
``Query``.
:param \*expr: optional column expressions. When present,
the Postgresql dialect will render a ``DISTINCT ON (<expressions>>)``
construct.
"""
if not criterion:
self._distinct = True
else:
criterion = self._adapt_col_list(criterion)
if isinstance(self._distinct, list):
self._distinct += criterion
else:
self._distinct = criterion
@_generative()
def prefix_with(self, *prefixes):
"""Apply the prefixes to the query and return the newly resulting
``Query``.
:param \*prefixes: optional prefixes, typically strings,
not using any commas. In particular is useful for MySQL keywords.
e.g.::
query = sess.query(User.name).\\
prefix_with('HIGH_PRIORITY').\\
prefix_with('SQL_SMALL_RESULT', 'ALL')
Would render::
SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL users.name AS users_name
FROM users
.. versionadded:: 0.7.7
"""
if self._prefixes:
self._prefixes += prefixes
else:
self._prefixes = prefixes
def all(self):
"""Return the results represented by this ``Query`` as a list.
This results in an execution of the underlying query.
"""
return list(self)
@_generative(_no_clauseelement_condition)
def from_statement(self, statement):
"""Execute the given SELECT statement and return results.
This method bypasses all internal statement compilation, and the
statement is executed without modification.
The statement is typically either a :func:`~.expression.text`
or :func:`~.expression.select` construct, and should return the set
of columns
appropriate to the entity class represented by this :class:`.Query`.
.. seealso::
:ref:`orm_tutorial_literal_sql` - usage examples in the
ORM tutorial
"""
statement = expression._expression_literal_as_text(statement)
if not isinstance(statement,
(expression.TextClause,
expression.SelectBase)):
raise sa_exc.ArgumentError(
"from_statement accepts text(), select(), "
"and union() objects only.")
self._statement = statement
def first(self):
"""Return the first result of this ``Query`` or
None if the result doesn't contain any row.
first() applies a limit of one within the generated SQL, so that
only one primary entity row is generated on the server side
(note this may consist of multiple result rows if join-loaded
collections are present).
Calling ``first()`` results in an execution of the underlying query.
"""
if self._statement is not None:
ret = list(self)[0:1]
else:
ret = list(self[0:1])
if len(ret) > 0:
return ret[0]
else:
return None
def one(self):
"""Return exactly one result or raise an exception.
Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects
no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
if multiple object identities are returned, or if multiple
rows are returned for a query that does not return object
identities.
Note that an entity query, that is, one which selects one or
more mapped classes as opposed to individual column attributes,
may ultimately represent many rows but only one row of
unique entity or entities - this is a successful result for one().
Calling ``one()`` results in an execution of the underlying query.
.. versionchanged:: 0.6
``one()`` fully fetches all results instead of applying
any kind of limit, so that the "unique"-ing of entities does not
conceal multiple object identities.
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
raise orm_exc.NoResultFound("No row was found for one()")
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one()")
def scalar(self):
"""Return the first element of the first result or None
if no rows present. If multiple rows are returned,
raises MultipleResultsFound.
>>> session.query(Item).scalar()
<Item>
>>> session.query(Item.id).scalar()
1
>>> session.query(Item.id).filter(Item.id < 0).scalar()
None
>>> session.query(Item.id, Item.name).scalar()
1
>>> session.query(func.count(Parent.id)).scalar()
20
This results in an execution of the underlying query.
"""
try:
ret = self.one()
if not isinstance(ret, tuple):
return ret
return ret[0]
except orm_exc.NoResultFound:
return None
def __iter__(self):
context = self._compile_context()
context.statement.use_labels = True
if self._autoflush and not self._populate_existing:
self.session._autoflush()
return self._execute_and_instances(context)
def _connection_from_session(self, **kw):
conn = self.session.connection(
**kw)
if self._execution_options:
conn = conn.execution_options(**self._execution_options)
return conn
def _execute_and_instances(self, querycontext):
conn = self._connection_from_session(
mapper=self._mapper_zero_or_none(),
clause=querycontext.statement,
close_with_result=True)
result = conn.execute(querycontext.statement, self._params)
return loading.instances(self, result, querycontext)
@property
def column_descriptions(self):
"""Return metadata about the columns which would be
returned by this :class:`.Query`.
Format is a list of dictionaries::
user_alias = aliased(User, name='user2')
q = sess.query(User, User.id, user_alias)
# this expression:
q.column_descriptions
# would return:
[
{
'name':'User',
'type':User,
'aliased':False,
'expr':User,
},
{
'name':'id',
'type':Integer(),
'aliased':False,
'expr':User.id,
},
{
'name':'user2',
'type':User,
'aliased':True,
'expr':user_alias
}
]
"""
return [
{
'name': ent._label_name,
'type': ent.type,
'aliased': getattr(ent, 'is_aliased_class', False),
'expr': ent.expr
}
for ent in self._entities
]
def instances(self, cursor, __context=None):
"""Given a ResultProxy cursor as returned by connection.execute(),
return an ORM result as an iterator.
e.g.::
result = engine.execute("select * from users")
for u in session.query(User).instances(result):
print u
"""
context = __context
if context is None:
context = QueryContext(self)
return loading.instances(self, cursor, context)
def merge_result(self, iterator, load=True):
"""Merge a result into this :class:`.Query` object's Session.
Given an iterator returned by a :class:`.Query` of the same structure
as this one, return an identical iterator of results, with all mapped
instances merged into the session using :meth:`.Session.merge`. This
is an optimized method which will merge all mapped instances,
preserving the structure of the result rows and unmapped columns with
less method overhead than that of calling :meth:`.Session.merge`
explicitly for each value.
The structure of the results is determined based on the column list of
this :class:`.Query` - if these do not correspond, unchecked errors
will occur.
The 'load' argument is the same as that of :meth:`.Session.merge`.
For an example of how :meth:`~.Query.merge_result` is used, see
the source code for the example :ref:`examples_caching`, where
:meth:`~.Query.merge_result` is used to efficiently restore state
from a cache back into a target :class:`.Session`.
"""
return loading.merge_result(self, iterator, load)
@property
def _select_args(self):
return {
'limit': self._limit,
'offset': self._offset,
'distinct': self._distinct,
'prefixes': self._prefixes,
'group_by': self._group_by or None,
'having': self._having
}
@property
def _should_nest_selectable(self):
kwargs = self._select_args
return (kwargs.get('limit') is not None or
kwargs.get('offset') is not None or
kwargs.get('distinct', False))
def exists(self):
"""A convenience method that turns a query into an EXISTS subquery
of the form EXISTS (SELECT 1 FROM ... WHERE ...).
e.g.::
q = session.query(User).filter(User.name == 'fred')
session.query(q.exists())
Producing SQL similar to::
SELECT EXISTS (
SELECT 1 FROM users WHERE users.name = :name_1
) AS anon_1
The EXISTS construct is usually used in the WHERE clause::
session.query(User.id).filter(q.exists()).scalar()
Note that some databases such as SQL Server don't allow an
EXISTS expression to be present in the columns clause of a
SELECT. To select a simple boolean value based on the exists
as a WHERE, use :func:`.literal`::
from sqlalchemy import literal
session.query(literal(True)).filter(q.exists()).scalar()
.. versionadded:: 0.8.1
"""
# .add_columns() for the case that we are a query().select_from(X),
# so that ".statement" can be produced (#2995) but also without
# omitting the FROM clause from a query(X) (#2818);
# .with_only_columns() after we have a core select() so that
# we get just "SELECT 1" without any entities.
return sql.exists(self.add_columns('1').with_labels().
statement.with_only_columns([1]))
def count(self):
"""Return a count of rows this Query would return.
This generates the SQL for this Query as follows::
SELECT count(1) AS count_1 FROM (
SELECT <rest of query follows...>
) AS anon_1
.. versionchanged:: 0.7
The above scheme is newly refined as of 0.7b3.
For fine grained control over specific columns
to count, to skip the usage of a subquery or
otherwise control of the FROM clause,
or to use other aggregate functions,
use :attr:`~sqlalchemy.sql.expression.func`
expressions in conjunction
with :meth:`~.Session.query`, i.e.::
from sqlalchemy import func
# count User records, without
# using a subquery.
session.query(func.count(User.id))
# return count of user "id" grouped
# by "name"
session.query(func.count(User.id)).\\
group_by(User.name)
from sqlalchemy import distinct
# count distinct "name" values
session.query(func.count(distinct(User.name)))
"""
col = sql.func.count(sql.literal_column('*'))
return self.from_self(col).scalar()
def delete(self, synchronize_session='evaluate'):
"""Perform a bulk delete query.
Deletes rows matched by this query from the database.
:param synchronize_session: chooses the strategy for the removal of
matched objects from the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, objects may still remain in
the session which were in fact deleted which can lead to confusing
results if they are accessed via get() or already loaded
collections.
``'fetch'`` - performs a select query before the delete to find
objects that are matched by the delete query and need to be
removed from the session. Matched objects are removed from the
session.
``'evaluate'`` - Evaluate the query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an error is raised. In that case you probably
want to use the 'fetch' strategy as a fallback.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
This method has several key caveats:
* The method does **not** offer in-Python cascading of relationships
- it is assumed that ON DELETE CASCADE/SET NULL/etc. is configured
for any foreign key references which require it, otherwise the
database may emit an integrity violation if foreign key references
are being enforced.
After the DELETE, dependent objects in the :class:`.Session` which
were impacted by an ON DELETE may not contain the current
state, or may have been deleted. This issue is resolved once the
:class:`.Session` is expired,
which normally occurs upon :meth:`.Session.commit` or can be forced
by using :meth:`.Session.expire_all`. Accessing an expired object
whose row has been deleted will invoke a SELECT to locate the
row; when the row is not found, an
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
* The :meth:`.MapperEvents.before_delete` and
:meth:`.MapperEvents.after_delete`
events are **not** invoked from this method. Instead, the
:meth:`.SessionEvents.after_bulk_delete` method is provided to act
upon a mass DELETE of entity rows.
.. seealso::
:meth:`.Query.update`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
# TODO: cascades need handling.
delete_op = persistence.BulkDelete.factory(
self, synchronize_session)
delete_op.exec_()
return delete_op.rowcount
def update(self, values, synchronize_session='evaluate'):
"""Perform a bulk update query.
Updates rows matched by this query in the database.
E.g.::
sess.query(User).filter(User.age == 25).\
update({User.age: User.age - 10}, synchronize_session='fetch')
sess.query(User).filter(User.age == 25).\
update({"age": User.age - 10}, synchronize_session='evaluate')
:param values: a dictionary with attributes names, or alternatively
mapped attributes or SQL expressions, as keys, and literal
values or sql expressions as values.
.. versionchanged:: 1.0.0 - string names in the values dictionary
are now resolved against the mapped entity; previously, these
strings were passed as literal column names with no mapper-level
translation.
:param synchronize_session: chooses the strategy to update the
attributes on objects in the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, updated objects may still
remain in the session with stale values on their attributes, which
can lead to confusing results.
``'fetch'`` - performs a select query before the update to find
objects that are matched by the update query. The updated
attributes are expired on matched objects.
``'evaluate'`` - Evaluate the Query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an exception is raised.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
This method has several key caveats:
* The method does **not** offer in-Python cascading of relationships
- it is assumed that ON UPDATE CASCADE is configured for any foreign
key references which require it, otherwise the database may emit an
integrity violation if foreign key references are being enforced.
After the UPDATE, dependent objects in the :class:`.Session` which
were impacted by an ON UPDATE CASCADE may not contain the current
state; this issue is resolved once the :class:`.Session` is expired,
which normally occurs upon :meth:`.Session.commit` or can be forced
by using :meth:`.Session.expire_all`.
* The method supports multiple table updates, as
detailed in :ref:`multi_table_updates`, and this behavior does
extend to support updates of joined-inheritance and other multiple
table mappings. However, the **join condition of an inheritance
mapper is currently not automatically rendered**.
Care must be taken in any multiple-table update to explicitly
include the joining condition between those tables, even in mappings
where this is normally automatic.
E.g. if a class ``Engineer`` subclasses ``Employee``, an UPDATE of
the ``Engineer`` local table using criteria against the ``Employee``
local table might look like::
session.query(Engineer).\\
filter(Engineer.id == Employee.id).\\
filter(Employee.name == 'dilbert').\\
update({"engineer_type": "programmer"})
* The :meth:`.MapperEvents.before_update` and
:meth:`.MapperEvents.after_update`
events are **not** invoked from this method. Instead, the
:meth:`.SessionEvents.after_bulk_update` method is provided to act
upon a mass UPDATE of entity rows.
.. seealso::
:meth:`.Query.delete`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
update_op = persistence.BulkUpdate.factory(
self, synchronize_session, values)
update_op.exec_()
return update_op.rowcount
def _compile_context(self, labels=True):
context = QueryContext(self)
if context.statement is not None:
return context
context.labels = labels
context._for_update_arg = self._for_update_arg
for entity in self._entities:
entity.setup_context(self, context)
for rec in context.create_eager_joins:
strategy = rec[0]
strategy(*rec[1:])
if context.from_clause:
# "load from explicit FROMs" mode,
# i.e. when select_from() or join() is used
context.froms = list(context.from_clause)
else:
# "load from discrete FROMs" mode,
# i.e. when each _MappedEntity has its own FROM
context.froms = context.froms
if self._enable_single_crit:
self._adjust_for_single_inheritance(context)
if not context.primary_columns:
if self._only_load_props:
raise sa_exc.InvalidRequestError(
"No column-based properties specified for "
"refresh operation. Use session.expire() "
"to reload collections and related items.")
else:
raise sa_exc.InvalidRequestError(
"Query contains no columns with which to "
"SELECT from.")
if context.multi_row_eager_loaders and self._should_nest_selectable:
context.statement = self._compound_eager_statement(context)
else:
context.statement = self._simple_statement(context)
return context
def _compound_eager_statement(self, context):
# for eager joins present and LIMIT/OFFSET/DISTINCT,
# wrap the query inside a select,
# then append eager joins onto that
if context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
else:
context.order_by = None
order_by_col_expr = []
inner = sql.select(
context.primary_columns + order_by_col_expr,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
# TODO: this order_by is only needed if
# LIMIT/OFFSET is present in self._select_args,
# else the application on the outside is enough
order_by=context.order_by,
**self._select_args
)
for hint in self._with_hints:
inner = inner.with_hint(*hint)
if self._correlate:
inner = inner.correlate(*self._correlate)
inner = inner.alias()
equivs = self.__all_equivs()
context.adapter = sql_util.ColumnAdapter(inner, equivs)
statement = sql.select(
[inner] + context.secondary_columns,
use_labels=context.labels)
statement._for_update_arg = context._for_update_arg
from_clause = inner
for eager_join in context.eager_joins.values():
# EagerLoader places a 'stop_on' attribute on the join,
# giving us a marker as to where the "splice point" of
# the join should be
from_clause = sql_util.splice_joins(
from_clause,
eager_join, eager_join.stop_on)
statement.append_from(from_clause)
if context.order_by:
statement.append_order_by(
*context.adapter.copy_and_process(
context.order_by
)
)
statement.append_order_by(*context.eager_order_by)
return statement
def _simple_statement(self, context):
if not context.order_by:
context.order_by = None
if self._distinct and context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
context.primary_columns += order_by_col_expr
context.froms += tuple(context.eager_joins.values())
statement = sql.select(
context.primary_columns +
context.secondary_columns,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
order_by=context.order_by,
**self._select_args
)
statement._for_update_arg = context._for_update_arg
for hint in self._with_hints:
statement = statement.with_hint(*hint)
if self._correlate:
statement = statement.correlate(*self._correlate)
if context.eager_order_by:
statement.append_order_by(*context.eager_order_by)
return statement
def _adjust_for_single_inheritance(self, context):
"""Apply single-table-inheritance filtering.
For all distinct single-table-inheritance mappers represented in
the columns clause of this query, add criterion to the WHERE
clause of the given QueryContext such that only the appropriate
subtypes are selected from the total results.
"""
for (ext_info, adapter) in set(self._mapper_adapter_map.values()):
if ext_info in self._join_entities:
continue
single_crit = ext_info.mapper._single_table_criterion
if single_crit is not None:
if adapter:
single_crit = adapter.traverse(single_crit)
single_crit = self._adapt_clause(single_crit, False, False)
context.whereclause = sql.and_(
sql.True_._ifnone(context.whereclause),
single_crit)
def __str__(self):
return str(self._compile_context().statement)
from ..sql.selectable import ForUpdateArg
class LockmodeArg(ForUpdateArg):
@classmethod
def parse_legacy_query(self, mode):
if mode in (None, False):
return None
if mode == "read":
read = True
nowait = False
elif mode == "update":
read = nowait = False
elif mode == "update_nowait":
nowait = True
read = False
else:
raise sa_exc.ArgumentError(
"Unknown with_lockmode argument: %r" % mode)
return LockmodeArg(read=read, nowait=nowait)
class _QueryEntity(object):
"""represent an entity column returned within a Query result."""
def __new__(cls, *args, **kwargs):
if cls is _QueryEntity:
entity = args[1]
if not isinstance(entity, util.string_types) and \
_is_mapped_class(entity):
cls = _MapperEntity
elif isinstance(entity, Bundle):
cls = _BundleEntity
else:
cls = _ColumnEntity
return object.__new__(cls)
def _clone(self):
q = self.__class__.__new__(self.__class__)
q.__dict__ = self.__dict__.copy()
return q
class _MapperEntity(_QueryEntity):
"""mapper/class/AliasedClass entity"""
def __init__(self, query, entity):
if not query._primary_entity:
query._primary_entity = self
query._entities.append(self)
self.entities = [entity]
self.expr = entity
supports_single_entity = True
def setup_entity(self, ext_info, aliased_adapter):
self.mapper = ext_info.mapper
self.aliased_adapter = aliased_adapter
self.selectable = ext_info.selectable
self.is_aliased_class = ext_info.is_aliased_class
self._with_polymorphic = ext_info.with_polymorphic_mappers
self._polymorphic_discriminator = \
ext_info.polymorphic_on
self.entity_zero = ext_info
if ext_info.is_aliased_class:
self._label_name = self.entity_zero.name
else:
self._label_name = self.mapper.class_.__name__
self.path = self.entity_zero._path_registry
def set_with_polymorphic(self, query, cls_or_mappers,
selectable, polymorphic_on):
"""Receive an update from a call to query.with_polymorphic().
Note the newer style of using a free standing with_polymporphic()
construct doesn't make use of this method.
"""
if self.is_aliased_class:
# TODO: invalidrequest ?
raise NotImplementedError(
"Can't use with_polymorphic() against "
"an Aliased object"
)
if cls_or_mappers is None:
query._reset_polymorphic_adapter(self.mapper)
return
mappers, from_obj = self.mapper._with_polymorphic_args(
cls_or_mappers, selectable)
self._with_polymorphic = mappers
self._polymorphic_discriminator = polymorphic_on
self.selectable = from_obj
query._mapper_loads_polymorphically_with(
self.mapper, sql_util.ColumnAdapter(
from_obj, self.mapper._equivalent_columns))
filter_fn = id
@property
def type(self):
return self.mapper.class_
@property
def entity_zero_or_selectable(self):
return self.entity_zero
def corresponds_to(self, entity):
if entity.is_aliased_class:
if self.is_aliased_class:
if entity._base_alias is self.entity_zero._base_alias:
return True
return False
elif self.is_aliased_class:
if self.entity_zero._use_mapper_path:
return entity in self._with_polymorphic
else:
return entity is self.entity_zero
return entity.common_parent(self.entity_zero)
def adapt_to_selectable(self, query, sel):
query._entities.append(self)
def _get_entity_clauses(self, query, context):
adapter = None
if not self.is_aliased_class:
if query._polymorphic_adapters:
adapter = query._polymorphic_adapters.get(self.mapper, None)
else:
adapter = self.aliased_adapter
if adapter:
if query._from_obj_alias:
ret = adapter.wrap(query._from_obj_alias)
else:
ret = adapter
else:
ret = query._from_obj_alias
return ret
def row_processor(self, query, context, result):
adapter = self._get_entity_clauses(query, context)
if context.adapter and adapter:
adapter = adapter.wrap(context.adapter)
elif not adapter:
adapter = context.adapter
# polymorphic mappers which have concrete tables in
# their hierarchy usually
# require row aliasing unconditionally.
if not adapter and self.mapper._requires_row_aliasing:
adapter = sql_util.ColumnAdapter(
self.selectable,
self.mapper._equivalent_columns)
if query._primary_entity is self:
_instance = loading.instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
only_load_props=query._only_load_props,
refresh_state=context.refresh_state,
polymorphic_discriminator=self._polymorphic_discriminator
)
else:
_instance = loading.instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
polymorphic_discriminator=self._polymorphic_discriminator
)
return _instance, self._label_name
def setup_context(self, query, context):
adapter = self._get_entity_clauses(query, context)
# if self._adapted_selectable is None:
context.froms += (self.selectable,)
if context.order_by is False and self.mapper.order_by:
context.order_by = self.mapper.order_by
# apply adaptation to the mapper's order_by if needed.
if adapter:
context.order_by = adapter.adapt_list(
util.to_list(
context.order_by
)
)
if self._with_polymorphic:
poly_properties = self.mapper._iterate_polymorphic_properties(
self._with_polymorphic)
else:
poly_properties = self.mapper._polymorphic_properties
for value in poly_properties:
if query._only_load_props and \
value.key not in query._only_load_props:
continue
value.setup(
context,
self,
self.path,
adapter,
only_load_props=query._only_load_props,
column_collection=context.primary_columns
)
if self._polymorphic_discriminator is not None and \
self._polymorphic_discriminator \
is not self.mapper.polymorphic_on:
if adapter:
pd = adapter.columns[self._polymorphic_discriminator]
else:
pd = self._polymorphic_discriminator
context.primary_columns.append(pd)
def __str__(self):
return str(self.mapper)
@inspection._self_inspects
class Bundle(object):
"""A grouping of SQL expressions that are returned by a :class:`.Query`
under one namespace.
The :class:`.Bundle` essentially allows nesting of the tuple-based
results returned by a column-oriented :class:`.Query` object. It also
is extensible via simple subclassing, where the primary capability
to override is that of how the set of expressions should be returned,
allowing post-processing as well as custom return types, without
involving ORM identity-mapped classes.
.. versionadded:: 0.9.0
.. seealso::
:ref:`bundles`
"""
single_entity = False
"""If True, queries for a single Bundle will be returned as a single
entity, rather than an element within a keyed tuple."""
# MASKED: __init__ function (lines 3299-3322)
columns = None
"""A namespace of SQL expressions referred to by this :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
q = sess.query(bn).filter(bn.c.x == 5)
Nesting of bundles is also supported::
b1 = Bundle("b1",
Bundle('b2', MyClass.a, MyClass.b),
Bundle('b3', MyClass.x, MyClass.y)
)
q = sess.query(b1).filter(
b1.c.b2.c.a == 5).filter(b1.c.b3.c.y == 9)
.. seealso::
:attr:`.Bundle.c`
"""
c = None
"""An alias for :attr:`.Bundle.columns`."""
def _clone(self):
cloned = self.__class__.__new__(self.__class__)
cloned.__dict__.update(self.__dict__)
return cloned
def __clause_element__(self):
return expression.ClauseList(group=False, *self.c)
@property
def clauses(self):
return self.__clause_element__().clauses
def label(self, name):
"""Provide a copy of this :class:`.Bundle` passing a new label."""
cloned = self._clone()
cloned.name = name
return cloned
def create_row_processor(self, query, procs, labels):
"""Produce the "row processing" function for this :class:`.Bundle`.
May be overridden by subclasses.
.. seealso::
:ref:`bundles` - includes an example of subclassing.
"""
keyed_tuple = util.lightweight_named_tuple('result', labels)
def proc(row):
return keyed_tuple([proc(row) for proc in procs])
return proc
class _BundleEntity(_QueryEntity):
def __init__(self, query, bundle, setup_entities=True):
query._entities.append(self)
self.bundle = self.expr = bundle
self.type = type(bundle)
self._label_name = bundle.name
self._entities = []
if setup_entities:
for expr in bundle.exprs:
if isinstance(expr, Bundle):
_BundleEntity(self, expr)
else:
_ColumnEntity(self, expr, namespace=self)
self.entities = ()
self.filter_fn = lambda item: item
self.supports_single_entity = self.bundle.single_entity
@property
def entity_zero(self):
for ent in self._entities:
ezero = ent.entity_zero
if ezero is not None:
return ezero
else:
return None
def corresponds_to(self, entity):
# TODO: this seems to have no effect for
# _ColumnEntity either
return False
@property
def entity_zero_or_selectable(self):
for ent in self._entities:
ezero = ent.entity_zero_or_selectable
if ezero is not None:
return ezero
else:
return None
def adapt_to_selectable(self, query, sel):
c = _BundleEntity(query, self.bundle, setup_entities=False)
# c._label_name = self._label_name
# c.entity_zero = self.entity_zero
# c.entities = self.entities
for ent in self._entities:
ent.adapt_to_selectable(c, sel)
def setup_entity(self, ext_info, aliased_adapter):
for ent in self._entities:
ent.setup_entity(ext_info, aliased_adapter)
def setup_context(self, query, context):
for ent in self._entities:
ent.setup_context(query, context)
def row_processor(self, query, context, result):
procs, labels = zip(
*[ent.row_processor(query, context, result)
for ent in self._entities]
)
proc = self.bundle.create_row_processor(query, procs, labels)
return proc, self._label_name
class _ColumnEntity(_QueryEntity):
"""Column/expression based entity."""
def __init__(self, query, column, namespace=None):
self.expr = column
self.namespace = namespace
if isinstance(column, util.string_types):
column = sql.literal_column(column)
self._label_name = column.name
elif isinstance(column, (
attributes.QueryableAttribute,
interfaces.PropComparator
)):
self._label_name = column.key
column = column._query_clause_element()
else:
self._label_name = getattr(column, 'key', None)
if not isinstance(column, expression.ColumnElement) and \
hasattr(column, '_select_iterable'):
for c in column._select_iterable:
if c is column:
break
_ColumnEntity(query, c, namespace=column)
else:
return
elif isinstance(column, Bundle):
_BundleEntity(query, column)
return
if not isinstance(column, sql.ColumnElement):
raise sa_exc.InvalidRequestError(
"SQL expression, column, or mapped entity "
"expected - got '%r'" % (column, )
)
self.type = type_ = column.type
if type_.hashable:
self.filter_fn = lambda item: item
else:
counter = util.counter()
self.filter_fn = lambda item: counter()
# If the Column is unnamed, give it a
# label() so that mutable column expressions
# can be located in the result even
# if the expression's identity has been changed
# due to adaption.
if not column._label and not getattr(column, 'is_literal', False):
column = column.label(self._label_name)
query._entities.append(self)
self.column = column
self.froms = set()
# look for ORM entities represented within the
# given expression. Try to count only entities
# for columns whose FROM object is in the actual list
# of FROMs for the overall expression - this helps
# subqueries which were built from ORM constructs from
# leaking out their entities into the main select construct
self.actual_froms = actual_froms = set(column._from_objects)
self.entities = util.OrderedSet(
elem._annotations['parententity']
for elem in visitors.iterate(column, {})
if 'parententity' in elem._annotations
and actual_froms.intersection(elem._from_objects)
)
if self.entities:
self.entity_zero = list(self.entities)[0]
elif self.namespace is not None:
self.entity_zero = self.namespace
else:
self.entity_zero = None
supports_single_entity = False
@property
def entity_zero_or_selectable(self):
if self.entity_zero is not None:
return self.entity_zero
elif self.actual_froms:
return list(self.actual_froms)[0]
else:
return None
def adapt_to_selectable(self, query, sel):
c = _ColumnEntity(query, sel.corresponding_column(self.column))
c._label_name = self._label_name
c.entity_zero = self.entity_zero
c.entities = self.entities
def setup_entity(self, ext_info, aliased_adapter):
if 'selectable' not in self.__dict__:
self.selectable = ext_info.selectable
self.froms.add(ext_info.selectable)
def corresponds_to(self, entity):
# TODO: just returning False here,
# no tests fail
if self.entity_zero is None:
return False
elif _is_aliased_class(entity):
# TODO: polymorphic subclasses ?
return entity is self.entity_zero
else:
return not _is_aliased_class(self.entity_zero) and \
entity.common_parent(self.entity_zero)
def _resolve_expr_against_query_aliases(self, query, expr, context):
return query._adapt_clause(expr, False, True)
def row_processor(self, query, context, result):
column = self._resolve_expr_against_query_aliases(
query, self.column, context)
if context.adapter:
column = context.adapter.columns[column]
getter = result._getter(column)
return getter, self._label_name
def setup_context(self, query, context):
column = self._resolve_expr_against_query_aliases(
query, self.column, context)
context.froms += tuple(self.froms)
context.primary_columns.append(column)
def __str__(self):
return str(self.column)
class QueryContext(object):
multi_row_eager_loaders = False
adapter = None
froms = ()
for_update = None
def __init__(self, query):
if query._statement is not None:
if isinstance(query._statement, expression.SelectBase) and \
not query._statement._textual and \
not query._statement.use_labels:
self.statement = query._statement.apply_labels()
else:
self.statement = query._statement
else:
self.statement = None
self.from_clause = query._from_obj
self.whereclause = query._criterion
self.order_by = query._order_by
self.query = query
self.session = query.session
self.populate_existing = query._populate_existing
self.invoke_all_eagers = query._invoke_all_eagers
self.version_check = query._version_check
self.refresh_state = query._refresh_state
self.primary_columns = []
self.secondary_columns = []
self.eager_order_by = []
self.eager_joins = {}
self.create_eager_joins = []
self.propagate_options = set(o for o in query._with_options if
o.propagate_to_loaders)
self.attributes = query._attributes.copy()
class AliasOption(interfaces.MapperOption):
def __init__(self, alias):
"""Return a :class:`.MapperOption` that will indicate to the :class:`.Query`
that the main table has been aliased.
This is a seldom-used option to suit the
very rare case that :func:`.contains_eager`
is being used in conjunction with a user-defined SELECT
statement that aliases the parent table. E.g.::
# define an aliased UNION called 'ulist'
ulist = users.select(users.c.user_id==7).\\
union(users.select(users.c.user_id>7)).\\
alias('ulist')
# add on an eager load of "addresses"
statement = ulist.outerjoin(addresses).\\
select().apply_labels()
# create query, indicating "ulist" will be an
# alias for the main table, "addresses"
# property should be eager loaded
query = session.query(User).options(
contains_alias(ulist),
contains_eager(User.addresses))
# then get results via the statement
results = query.from_statement(statement).all()
:param alias: is the string name of an alias, or a
:class:`~.sql.expression.Alias` object representing
the alias.
"""
self.alias = alias
def process_query(self, query):
if isinstance(self.alias, util.string_types):
alias = query._mapper_zero().mapped_table.alias(self.alias)
else:
alias = self.alias
query._from_obj_alias = sql_util.ColumnAdapter(alias)
|
def __init__(self, name, *exprs, **kw):
"""Construct a new :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
for row in session.query(bn).filter(
bn.c.x == 5).filter(bn.c.y == 4):
print(row.mybundle.x, row.mybundle.y)
:param name: name of the bundle.
:param \*exprs: columns or SQL expressions comprising the bundle.
:param single_entity=False: if True, rows for this :class:`.Bundle`
can be returned as a "single entity" outside of any enclosing tuple
in the same manner as a mapped entity.
"""
self.name = self._label = name
self.exprs = exprs
self.c = self.columns = ColumnCollection()
self.columns.update((getattr(col, "key", col._label), col)
for col in exprs)
self.single_entity = kw.pop('single_entity', self.single_entity)
| 3,299 | 3,322 |
# orm/query.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The Query class and support.
Defines the :class:`.Query` class, the central
construct used by the ORM to construct database queries.
The :class:`.Query` class should not be confused with the
:class:`.Select` class, which defines database
SELECT operations at the SQL (non-ORM) level. ``Query`` differs from
``Select`` in that it returns ORM-mapped objects and interacts with an
ORM session, whereas the ``Select`` construct interacts directly with the
database to return iterable result sets.
"""
from itertools import chain
from . import (
attributes, interfaces, object_mapper, persistence,
exc as orm_exc, loading
)
from .base import _entity_descriptor, _is_aliased_class, \
_is_mapped_class, _orm_columns, _generative
from .path_registry import PathRegistry
from .util import (
AliasedClass, ORMAdapter, join as orm_join, with_parent, aliased
)
from .. import sql, util, log, exc as sa_exc, inspect, inspection
from ..sql.expression import _interpret_as_from
from ..sql import (
util as sql_util,
expression, visitors
)
from ..sql.base import ColumnCollection
from . import properties
__all__ = ['Query', 'QueryContext', 'aliased']
_path_registry = PathRegistry.root
@inspection._self_inspects
@log.class_logger
class Query(object):
"""ORM-level SQL construction object.
:class:`.Query` is the source of all SELECT statements generated by the
ORM, both those formulated by end-user query operations as well as by
high level internal operations such as related collection loading. It
features a generative interface whereby successive calls return a new
:class:`.Query` object, a copy of the former with additional
criteria and options associated with it.
:class:`.Query` objects are normally initially generated using the
:meth:`~.Session.query` method of :class:`.Session`. For a full
walkthrough of :class:`.Query` usage, see the
:ref:`ormtutorial_toplevel`.
"""
_enable_eagerloads = True
_enable_assertions = True
_with_labels = False
_criterion = None
_yield_per = None
_order_by = False
_group_by = False
_having = None
_distinct = False
_prefixes = None
_offset = None
_limit = None
_for_update_arg = None
_statement = None
_correlate = frozenset()
_populate_existing = False
_invoke_all_eagers = True
_version_check = False
_autoflush = True
_only_load_props = None
_refresh_state = None
_from_obj = ()
_join_entities = ()
_select_from_entity = None
_mapper_adapter_map = {}
_filter_aliases = None
_from_obj_alias = None
_joinpath = _joinpoint = util.immutabledict()
_execution_options = util.immutabledict()
_params = util.immutabledict()
_attributes = util.immutabledict()
_with_options = ()
_with_hints = ()
_enable_single_crit = True
_current_path = _path_registry
def __init__(self, entities, session=None):
self.session = session
self._polymorphic_adapters = {}
self._set_entities(entities)
def _set_entities(self, entities, entity_wrapper=None):
if entity_wrapper is None:
entity_wrapper = _QueryEntity
self._entities = []
self._primary_entity = None
for ent in util.to_list(entities):
entity_wrapper(self, ent)
self._set_entity_selectables(self._entities)
def _set_entity_selectables(self, entities):
self._mapper_adapter_map = d = self._mapper_adapter_map.copy()
for ent in entities:
for entity in ent.entities:
if entity not in d:
ext_info = inspect(entity)
if not ext_info.is_aliased_class and \
ext_info.mapper.with_polymorphic:
if ext_info.mapper.mapped_table not in \
self._polymorphic_adapters:
self._mapper_loads_polymorphically_with(
ext_info.mapper,
sql_util.ColumnAdapter(
ext_info.selectable,
ext_info.mapper._equivalent_columns
)
)
aliased_adapter = None
elif ext_info.is_aliased_class:
aliased_adapter = ext_info._adapter
else:
aliased_adapter = None
d[entity] = (
ext_info,
aliased_adapter
)
ent.setup_entity(*d[entity])
def _mapper_loads_polymorphically_with(self, mapper, adapter):
for m2 in mapper._with_polymorphic_mappers or [mapper]:
self._polymorphic_adapters[m2] = adapter
for m in m2.iterate_to_root():
self._polymorphic_adapters[m.local_table] = adapter
def _set_select_from(self, obj, set_base_alias):
fa = []
select_from_alias = None
for from_obj in obj:
info = inspect(from_obj)
if hasattr(info, 'mapper') and \
(info.is_mapper or info.is_aliased_class):
self._select_from_entity = from_obj
if set_base_alias:
raise sa_exc.ArgumentError(
"A selectable (FromClause) instance is "
"expected when the base alias is being set.")
fa.append(info.selectable)
elif not info.is_selectable:
raise sa_exc.ArgumentError(
"argument is not a mapped class, mapper, "
"aliased(), or FromClause instance.")
else:
if isinstance(from_obj, expression.SelectBase):
from_obj = from_obj.alias()
if set_base_alias:
select_from_alias = from_obj
fa.append(from_obj)
self._from_obj = tuple(fa)
if set_base_alias and \
len(self._from_obj) == 1 and \
isinstance(select_from_alias, expression.Alias):
equivs = self.__all_equivs()
self._from_obj_alias = sql_util.ColumnAdapter(
self._from_obj[0], equivs)
def _reset_polymorphic_adapter(self, mapper):
for m2 in mapper._with_polymorphic_mappers:
self._polymorphic_adapters.pop(m2, None)
for m in m2.iterate_to_root():
self._polymorphic_adapters.pop(m.local_table, None)
def _adapt_polymorphic_element(self, element):
if "parententity" in element._annotations:
search = element._annotations['parententity']
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
if isinstance(element, expression.FromClause):
search = element
elif hasattr(element, 'table'):
search = element.table
else:
return None
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
def _adapt_col_list(self, cols):
return [
self._adapt_clause(
expression._literal_as_label_reference(o),
True, True)
for o in cols
]
@_generative()
def _adapt_all_clauses(self):
self._orm_only_adapt = False
def _adapt_clause(self, clause, as_filter, orm_only):
"""Adapt incoming clauses to transformations which
have been applied within this query."""
adapters = []
# do we adapt all expression elements or only those
# tagged as 'ORM' constructs ?
orm_only = getattr(self, '_orm_only_adapt', orm_only)
if as_filter and self._filter_aliases:
for fa in self._filter_aliases._visitor_iterator:
adapters.append(
(
orm_only, fa.replace
)
)
if self._from_obj_alias:
# for the "from obj" alias, apply extra rule to the
# 'ORM only' check, if this query were generated from a
# subquery of itself, i.e. _from_selectable(), apply adaption
# to all SQL constructs.
adapters.append(
(
getattr(self, '_orm_only_from_obj_alias', orm_only),
self._from_obj_alias.replace
)
)
if self._polymorphic_adapters:
adapters.append(
(
orm_only, self._adapt_polymorphic_element
)
)
if not adapters:
return clause
def replace(elem):
for _orm_only, adapter in adapters:
# if 'orm only', look for ORM annotations
# in the element before adapting.
if not _orm_only or \
'_orm_adapt' in elem._annotations or \
"parententity" in elem._annotations:
e = adapter(elem)
if e is not None:
return e
return visitors.replacement_traverse(
clause,
{},
replace
)
def _entity_zero(self):
return self._entities[0]
def _mapper_zero(self):
return self._select_from_entity or \
self._entity_zero().entity_zero
@property
def _mapper_entities(self):
for ent in self._entities:
if isinstance(ent, _MapperEntity):
yield ent
def _joinpoint_zero(self):
return self._joinpoint.get(
'_joinpoint_entity',
self._mapper_zero()
)
def _mapper_zero_or_none(self):
if self._primary_entity:
return self._primary_entity.mapper
else:
return None
def _only_mapper_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._mapper_zero()
def _only_full_mapper_zero(self, methname):
if self._entities != [self._primary_entity]:
raise sa_exc.InvalidRequestError(
"%s() can only be used against "
"a single mapped class." % methname)
return self._primary_entity.entity_zero
def _only_entity_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._entity_zero()
def __all_equivs(self):
equivs = {}
for ent in self._mapper_entities:
equivs.update(ent.mapper._equivalent_columns)
return equivs
def _get_condition(self):
return self._no_criterion_condition(
"get", order_by=False, distinct=False)
def _get_existing_condition(self):
self._no_criterion_assertion("get", order_by=False, distinct=False)
def _no_criterion_assertion(self, meth, order_by=True, distinct=True):
if not self._enable_assertions:
return
if self._criterion is not None or \
self._statement is not None or self._from_obj or \
self._limit is not None or self._offset is not None or \
self._group_by or (order_by and self._order_by) or \
(distinct and self._distinct):
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
def _no_criterion_condition(self, meth, order_by=True, distinct=True):
self._no_criterion_assertion(meth, order_by, distinct)
self._from_obj = ()
self._statement = self._criterion = None
self._order_by = self._group_by = self._distinct = False
def _no_clauseelement_condition(self, meth):
if not self._enable_assertions:
return
if self._order_by:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
self._no_criterion_condition(meth)
def _no_statement_condition(self, meth):
if not self._enable_assertions:
return
if self._statement is not None:
raise sa_exc.InvalidRequestError(
("Query.%s() being called on a Query with an existing full "
"statement - can't apply criterion.") % meth)
def _no_limit_offset(self, meth):
if not self._enable_assertions:
return
if self._limit is not None or self._offset is not None:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a Query which already has LIMIT "
"or OFFSET applied. To modify the row-limited results of a "
" Query, call from_self() first. "
"Otherwise, call %s() before limit() or offset() "
"are applied."
% (meth, meth)
)
def _no_select_modifiers(self, meth):
if not self._enable_assertions:
return
for attr, methname, notset in (
('_limit', 'limit()', None),
('_offset', 'offset()', None),
('_order_by', 'order_by()', False),
('_group_by', 'group_by()', False),
('_distinct', 'distinct()', False),
):
if getattr(self, attr) is not notset:
raise sa_exc.InvalidRequestError(
"Can't call Query.%s() when %s has been called" %
(meth, methname)
)
def _get_options(self, populate_existing=None,
version_check=None,
only_load_props=None,
refresh_state=None):
if populate_existing:
self._populate_existing = populate_existing
if version_check:
self._version_check = version_check
if refresh_state:
self._refresh_state = refresh_state
if only_load_props:
self._only_load_props = set(only_load_props)
return self
def _clone(self):
cls = self.__class__
q = cls.__new__(cls)
q.__dict__ = self.__dict__.copy()
return q
@property
def statement(self):
"""The full SELECT statement represented by this Query.
The statement by default will not have disambiguating labels
applied to the construct unless with_labels(True) is called
first.
"""
stmt = self._compile_context(labels=self._with_labels).\
statement
if self._params:
stmt = stmt.params(self._params)
# TODO: there's no tests covering effects of
# the annotation not being there
return stmt._annotate({'no_replacement_traverse': True})
def subquery(self, name=None, with_labels=False, reduce_columns=False):
"""return the full SELECT statement represented by
this :class:`.Query`, embedded within an :class:`.Alias`.
Eager JOIN generation within the query is disabled.
:param name: string name to be assigned as the alias;
this is passed through to :meth:`.FromClause.alias`.
If ``None``, a name will be deterministically generated
at compile time.
:param with_labels: if True, :meth:`.with_labels` will be called
on the :class:`.Query` first to apply table-qualified labels
to all columns.
:param reduce_columns: if True, :meth:`.Select.reduce_columns` will
be called on the resulting :func:`.select` construct,
to remove same-named columns where one also refers to the other
via foreign key or WHERE clause equivalence.
.. versionchanged:: 0.8 the ``with_labels`` and ``reduce_columns``
keyword arguments were added.
"""
q = self.enable_eagerloads(False)
if with_labels:
q = q.with_labels()
q = q.statement
if reduce_columns:
q = q.reduce_columns()
return q.alias(name=name)
def cte(self, name=None, recursive=False):
"""Return the full SELECT statement represented by this
:class:`.Query` represented as a common table expression (CTE).
.. versionadded:: 0.7.6
Parameters and usage are the same as those of the
:meth:`.SelectBase.cte` method; see that method for
further details.
Here is the `Postgresql WITH
RECURSIVE example
<http://www.postgresql.org/docs/8.4/static/queries-with.html>`_.
Note that, in this example, the ``included_parts`` cte and the
``incl_alias`` alias of it are Core selectables, which
means the columns are accessed via the ``.c.`` attribute. The
``parts_alias`` object is an :func:`.orm.aliased` instance of the
``Part`` entity, so column-mapped attributes are available
directly::
from sqlalchemy.orm import aliased
class Part(Base):
__tablename__ = 'part'
part = Column(String, primary_key=True)
sub_part = Column(String, primary_key=True)
quantity = Column(Integer)
included_parts = session.query(
Part.sub_part,
Part.part,
Part.quantity).\\
filter(Part.part=="our part").\\
cte(name="included_parts", recursive=True)
incl_alias = aliased(included_parts, name="pr")
parts_alias = aliased(Part, name="p")
included_parts = included_parts.union_all(
session.query(
parts_alias.sub_part,
parts_alias.part,
parts_alias.quantity).\\
filter(parts_alias.part==incl_alias.c.sub_part)
)
q = session.query(
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).
label('total_quantity')
).\\
group_by(included_parts.c.sub_part)
.. seealso::
:meth:`.SelectBase.cte`
"""
return self.enable_eagerloads(False).\
statement.cte(name=name, recursive=recursive)
def label(self, name):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted
to a scalar subquery with a label of the given name.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.label`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.label(name)
def as_scalar(self):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted to a scalar subquery.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.as_scalar`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.as_scalar()
@property
def selectable(self):
"""Return the :class:`.Select` object emitted by this :class:`.Query`.
Used for :func:`.inspect` compatibility, this is equivalent to::
query.enable_eagerloads(False).with_labels().statement
"""
return self.__clause_element__()
def __clause_element__(self):
return self.enable_eagerloads(False).with_labels().statement
@_generative()
def enable_eagerloads(self, value):
"""Control whether or not eager joins and subqueries are
rendered.
When set to False, the returned Query will not render
eager joins regardless of :func:`~sqlalchemy.orm.joinedload`,
:func:`~sqlalchemy.orm.subqueryload` options
or mapper-level ``lazy='joined'``/``lazy='subquery'``
configurations.
This is used primarily when nesting the Query's
statement into a subquery or other
selectable, or when using :meth:`.Query.yield_per`.
"""
self._enable_eagerloads = value
def _no_yield_per(self, message):
raise sa_exc.InvalidRequestError(
"The yield_per Query option is currently not "
"compatible with %s eager loading. Please "
"specify lazyload('*') or query.enable_eagerloads(False) in "
"order to "
"proceed with query.yield_per()." % message)
@_generative()
def with_labels(self):
"""Apply column labels to the return value of Query.statement.
Indicates that this Query's `statement` accessor should return
a SELECT statement that applies labels to all columns in the
form <tablename>_<columnname>; this is commonly used to
disambiguate columns from multiple tables which have the same
name.
When the `Query` actually issues SQL to load rows, it always
uses column labeling.
"""
self._with_labels = True
@_generative()
def enable_assertions(self, value):
"""Control whether assertions are generated.
When set to False, the returned Query will
not assert its state before certain operations,
including that LIMIT/OFFSET has not been applied
when filter() is called, no criterion exists
when get() is called, and no "from_statement()"
exists when filter()/order_by()/group_by() etc.
is called. This more permissive mode is used by
custom Query subclasses to specify criterion or
other modifiers outside of the usual usage patterns.
Care should be taken to ensure that the usage
pattern is even possible. A statement applied
by from_statement() will override any criterion
set by filter() or order_by(), for example.
"""
self._enable_assertions = value
@property
def whereclause(self):
"""A readonly attribute which returns the current WHERE criterion for
this Query.
This returned value is a SQL expression construct, or ``None`` if no
criterion has been established.
"""
return self._criterion
@_generative()
def _with_current_path(self, path):
"""indicate that this query applies to objects loaded
within a certain path.
Used by deferred loaders (see strategies.py) which transfer
query options from an originating query to a newly generated
query intended for the deferred load.
"""
self._current_path = path
@_generative(_no_clauseelement_condition)
def with_polymorphic(self,
cls_or_mappers,
selectable=None,
polymorphic_on=None):
"""Load columns for inheriting classes.
:meth:`.Query.with_polymorphic` applies transformations
to the "main" mapped class represented by this :class:`.Query`.
The "main" mapped class here means the :class:`.Query`
object's first argument is a full class, i.e.
``session.query(SomeClass)``. These transformations allow additional
tables to be present in the FROM clause so that columns for a
joined-inheritance subclass are available in the query, both for the
purposes of load-time efficiency as well as the ability to use
these columns at query time.
See the documentation section :ref:`with_polymorphic` for
details on how this method is used.
.. versionchanged:: 0.8
A new and more flexible function
:func:`.orm.with_polymorphic` supersedes
:meth:`.Query.with_polymorphic`, as it can apply the equivalent
functionality to any set of columns or classes in the
:class:`.Query`, not just the "zero mapper". See that
function for a description of arguments.
"""
if not self._primary_entity:
raise sa_exc.InvalidRequestError(
"No primary mapper set up for this Query.")
entity = self._entities[0]._clone()
self._entities = [entity] + self._entities[1:]
entity.set_with_polymorphic(self,
cls_or_mappers,
selectable=selectable,
polymorphic_on=polymorphic_on)
@_generative()
def yield_per(self, count):
"""Yield only ``count`` rows at a time.
The purpose of this method is when fetching very large result sets
(> 10K rows), to batch results in sub-collections and yield them
out partially, so that the Python interpreter doesn't need to declare
very large areas of memory which is both time consuming and leads
to excessive memory use. The performance from fetching hundreds of
thousands of rows can often double when a suitable yield-per setting
(e.g. approximately 1000) is used, even with DBAPIs that buffer
rows (which are most).
The :meth:`.Query.yield_per` method **is not compatible with most
eager loading schemes, including subqueryload and joinedload with
collections**. For this reason, it may be helpful to disable
eager loads, either unconditionally with
:meth:`.Query.enable_eagerloads`::
q = sess.query(Object).yield_per(100).enable_eagerloads(False)
Or more selectively using :func:`.lazyload`; such as with
an asterisk to specify the default loader scheme::
q = sess.query(Object).yield_per(100).\\
options(lazyload('*'), joinedload(Object.some_related))
.. warning::
Use this method with caution; if the same instance is
present in more than one batch of rows, end-user changes
to attributes will be overwritten.
In particular, it's usually impossible to use this setting
with eagerly loaded collections (i.e. any lazy='joined' or
'subquery') since those collections will be cleared for a
new load when encountered in a subsequent result batch.
In the case of 'subquery' loading, the full result for all
rows is fetched which generally defeats the purpose of
:meth:`~sqlalchemy.orm.query.Query.yield_per`.
Also note that while
:meth:`~sqlalchemy.orm.query.Query.yield_per` will set the
``stream_results`` execution option to True, currently
this is only understood by
:mod:`~sqlalchemy.dialects.postgresql.psycopg2` dialect
which will stream results using server side cursors
instead of pre-buffer all rows for this query. Other
DBAPIs **pre-buffer all rows** before making them
available. The memory use of raw database rows is much less
than that of an ORM-mapped object, but should still be taken into
consideration when benchmarking.
.. seealso::
:meth:`.Query.enable_eagerloads`
"""
self._yield_per = count
self._execution_options = self._execution_options.union(
{"stream_results": True})
def get(self, ident):
"""Return an instance based on the given primary key identifier,
or ``None`` if not found.
E.g.::
my_user = session.query(User).get(5)
some_object = session.query(VersionedFoo).get((5, 10))
:meth:`~.Query.get` is special in that it provides direct
access to the identity map of the owning :class:`.Session`.
If the given primary key identifier is present
in the local identity map, the object is returned
directly from this collection and no SQL is emitted,
unless the object has been marked fully expired.
If not present,
a SELECT is performed in order to locate the object.
:meth:`~.Query.get` also will perform a check if
the object is present in the identity map and
marked as expired - a SELECT
is emitted to refresh the object as well as to
ensure that the row is still present.
If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
:meth:`~.Query.get` is only used to return a single
mapped instance, not multiple instances or
individual column constructs, and strictly
on a single primary key value. The originating
:class:`.Query` must be constructed in this way,
i.e. against a single mapped entity,
with no additional filtering criterion. Loading
options via :meth:`~.Query.options` may be applied
however, and will be used if the object is not
yet locally present.
A lazy-loading, many-to-one attribute configured
by :func:`.relationship`, using a simple
foreign-key-to-primary-key criterion, will also use an
operation equivalent to :meth:`~.Query.get` in order to retrieve
the target value from the local identity map
before querying the database. See :doc:`/orm/loading`
for further details on relationship loading.
:param ident: A scalar or tuple value representing
the primary key. For a composite primary key,
the order of identifiers corresponds in most cases
to that of the mapped :class:`.Table` object's
primary key columns. For a :func:`.mapper` that
was given the ``primary key`` argument during
construction, the order of identifiers corresponds
to the elements present in this collection.
:return: The object instance, or ``None``.
"""
# convert composite types to individual args
if hasattr(ident, '__composite_values__'):
ident = ident.__composite_values__()
ident = util.to_list(ident)
mapper = self._only_full_mapper_zero("get")
if len(ident) != len(mapper.primary_key):
raise sa_exc.InvalidRequestError(
"Incorrect number of values in identifier to formulate "
"primary key for query.get(); primary key columns are %s" %
','.join("'%s'" % c for c in mapper.primary_key))
key = mapper.identity_key_from_primary_key(ident)
if not self._populate_existing and \
not mapper.always_refresh and \
self._for_update_arg is None:
instance = loading.get_from_identity(
self.session, key, attributes.PASSIVE_OFF)
if instance is not None:
self._get_existing_condition()
# reject calls for id in identity map but class
# mismatch.
if not issubclass(instance.__class__, mapper.class_):
return None
return instance
return loading.load_on_ident(self, key)
@_generative()
def correlate(self, *args):
"""Return a :class:`.Query` construct which will correlate the given
FROM clauses to that of an enclosing :class:`.Query` or
:func:`~.expression.select`.
The method here accepts mapped classes, :func:`.aliased` constructs,
and :func:`.mapper` constructs as arguments, which are resolved into
expression constructs, in addition to appropriate expression
constructs.
The correlation arguments are ultimately passed to
:meth:`.Select.correlate` after coercion to expression constructs.
The correlation arguments take effect in such cases
as when :meth:`.Query.from_self` is used, or when
a subquery as returned by :meth:`.Query.subquery` is
embedded in another :func:`~.expression.select` construct.
"""
self._correlate = self._correlate.union(
_interpret_as_from(s)
if s is not None else None
for s in args)
@_generative()
def autoflush(self, setting):
"""Return a Query with a specific 'autoflush' setting.
Note that a Session with autoflush=False will
not autoflush, even if this flag is set to True at the
Query level. Therefore this flag is usually used only
to disable autoflush for a specific Query.
"""
self._autoflush = setting
@_generative()
def populate_existing(self):
"""Return a :class:`.Query` that will expire and refresh all instances
as they are loaded, or reused from the current :class:`.Session`.
:meth:`.populate_existing` does not improve behavior when
the ORM is used normally - the :class:`.Session` object's usual
behavior of maintaining a transaction and expiring all attributes
after rollback or commit handles object state automatically.
This method is not intended for general use.
"""
self._populate_existing = True
@_generative()
def _with_invoke_all_eagers(self, value):
"""Set the 'invoke all eagers' flag which causes joined- and
subquery loaders to traverse into already-loaded related objects
and collections.
Default is that of :attr:`.Query._invoke_all_eagers`.
"""
self._invoke_all_eagers = value
def with_parent(self, instance, property=None):
"""Add filtering criterion that relates the given instance
to a child object or collection, using its attribute state
as well as an established :func:`.relationship()`
configuration.
The method uses the :func:`.with_parent` function to generate
the clause, the result of which is passed to :meth:`.Query.filter`.
Parameters are the same as :func:`.with_parent`, with the exception
that the given property can be None, in which case a search is
performed against this :class:`.Query` object's target mapper.
"""
if property is None:
mapper = object_mapper(instance)
for prop in mapper.iterate_properties:
if isinstance(prop, properties.RelationshipProperty) and \
prop.mapper is self._mapper_zero():
property = prop
break
else:
raise sa_exc.InvalidRequestError(
"Could not locate a property which relates instances "
"of class '%s' to instances of class '%s'" %
(
self._mapper_zero().class_.__name__,
instance.__class__.__name__)
)
return self.filter(with_parent(instance, property))
@_generative()
def add_entity(self, entity, alias=None):
"""add a mapped entity to the list of result columns
to be returned."""
if alias is not None:
entity = aliased(entity, alias)
self._entities = list(self._entities)
m = _MapperEntity(self, entity)
self._set_entity_selectables([m])
@_generative()
def with_session(self, session):
"""Return a :class:`.Query` that will use the given :class:`.Session`.
"""
self.session = session
def from_self(self, *entities):
"""return a Query that selects from this Query's
SELECT statement.
\*entities - optional list of entities which will replace
those being selected.
"""
fromclause = self.with_labels().enable_eagerloads(False).\
statement.correlate(None)
q = self._from_selectable(fromclause)
q._enable_single_crit = False
if entities:
q._set_entities(entities)
return q
@_generative()
def _set_enable_single_crit(self, val):
self._enable_single_crit = val
@_generative()
def _from_selectable(self, fromclause):
for attr in (
'_statement', '_criterion',
'_order_by', '_group_by',
'_limit', '_offset',
'_joinpath', '_joinpoint',
'_distinct', '_having',
'_prefixes',
):
self.__dict__.pop(attr, None)
self._set_select_from([fromclause], True)
# this enables clause adaptation for non-ORM
# expressions.
self._orm_only_from_obj_alias = False
old_entities = self._entities
self._entities = []
for e in old_entities:
e.adapt_to_selectable(self, self._from_obj[0])
def values(self, *columns):
"""Return an iterator yielding result tuples corresponding
to the given list of columns"""
if not columns:
return iter(())
q = self._clone()
q._set_entities(columns, entity_wrapper=_ColumnEntity)
if not q._yield_per:
q._yield_per = 10
return iter(q)
_values = values
def value(self, column):
"""Return a scalar result corresponding to the given
column expression."""
try:
return next(self.values(column))[0]
except StopIteration:
return None
@_generative()
def with_entities(self, *entities):
"""Return a new :class:`.Query` replacing the SELECT list with the
given entities.
e.g.::
# Users, filtered on some arbitrary criterion
# and then ordered by related email address
q = session.query(User).\\
join(User.address).\\
filter(User.name.like('%ed%')).\\
order_by(Address.email)
# given *only* User.id==5, Address.email, and 'q', what
# would the *next* User in the result be ?
subq = q.with_entities(Address.email).\\
order_by(None).\\
filter(User.id==5).\\
subquery()
q = q.join((subq, subq.c.email < Address.email)).\\
limit(1)
.. versionadded:: 0.6.5
"""
self._set_entities(entities)
@_generative()
def add_columns(self, *column):
"""Add one or more column expressions to the list
of result columns to be returned."""
self._entities = list(self._entities)
l = len(self._entities)
for c in column:
_ColumnEntity(self, c)
# _ColumnEntity may add many entities if the
# given arg is a FROM clause
self._set_entity_selectables(self._entities[l:])
@util.pending_deprecation("0.7",
":meth:`.add_column` is superseded "
"by :meth:`.add_columns`",
False)
def add_column(self, column):
"""Add a column expression to the list of result columns to be
returned.
Pending deprecation: :meth:`.add_column` will be superseded by
:meth:`.add_columns`.
"""
return self.add_columns(column)
def options(self, *args):
"""Return a new Query object, applying the given list of
mapper options.
Most supplied options regard changing how column- and
relationship-mapped attributes are loaded. See the sections
:ref:`deferred` and :doc:`/orm/loading` for reference
documentation.
"""
return self._options(False, *args)
def _conditional_options(self, *args):
return self._options(True, *args)
@_generative()
def _options(self, conditional, *args):
# most MapperOptions write to the '_attributes' dictionary,
# so copy that as well
self._attributes = self._attributes.copy()
opts = tuple(util.flatten_iterator(args))
self._with_options = self._with_options + opts
if conditional:
for opt in opts:
opt.process_query_conditionally(self)
else:
for opt in opts:
opt.process_query(self)
def with_transformation(self, fn):
"""Return a new :class:`.Query` object transformed by
the given function.
E.g.::
def filter_something(criterion):
def transform(q):
return q.filter(criterion)
return transform
q = q.with_transformation(filter_something(x==5))
This allows ad-hoc recipes to be created for :class:`.Query`
objects. See the example at :ref:`hybrid_transformers`.
.. versionadded:: 0.7.4
"""
return fn(self)
@_generative()
def with_hint(self, selectable, text, dialect_name='*'):
"""Add an indexing or other executional context
hint for the given entity or selectable to
this :class:`.Query`.
Functionality is passed straight through to
:meth:`~sqlalchemy.sql.expression.Select.with_hint`,
with the addition that ``selectable`` can be a
:class:`.Table`, :class:`.Alias`, or ORM entity / mapped class
/etc.
.. seealso::
:meth:`.Query.with_statement_hint`
"""
if selectable is not None:
selectable = inspect(selectable).selectable
self._with_hints += ((selectable, text, dialect_name),)
def with_statement_hint(self, text, dialect_name='*'):
"""add a statement hint to this :class:`.Select`.
This method is similar to :meth:`.Select.with_hint` except that
it does not require an individual table, and instead applies to the
statement as a whole.
This feature calls down into :meth:`.Select.with_statement_hint`.
.. versionadded:: 1.0.0
.. seealso::
:meth:`.Query.with_hint`
"""
return self.with_hint(None, text, dialect_name)
@_generative()
def execution_options(self, **kwargs):
""" Set non-SQL options which take effect during execution.
The options are the same as those accepted by
:meth:`.Connection.execution_options`.
Note that the ``stream_results`` execution option is enabled
automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()`
method is used.
"""
self._execution_options = self._execution_options.union(kwargs)
@_generative()
def with_lockmode(self, mode):
"""Return a new :class:`.Query` object with the specified "locking mode",
which essentially refers to the ``FOR UPDATE`` clause.
.. deprecated:: 0.9.0 superseded by :meth:`.Query.with_for_update`.
:param mode: a string representing the desired locking mode.
Valid values are:
* ``None`` - translates to no lockmode
* ``'update'`` - translates to ``FOR UPDATE``
(standard SQL, supported by most dialects)
* ``'update_nowait'`` - translates to ``FOR UPDATE NOWAIT``
(supported by Oracle, PostgreSQL 8.1 upwards)
* ``'read'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL),
and ``FOR SHARE`` (for PostgreSQL)
.. seealso::
:meth:`.Query.with_for_update` - improved API for
specifying the ``FOR UPDATE`` clause.
"""
self._for_update_arg = LockmodeArg.parse_legacy_query(mode)
@_generative()
def with_for_update(self, read=False, nowait=False, of=None):
"""return a new :class:`.Query` with the specified options for the
``FOR UPDATE`` clause.
The behavior of this method is identical to that of
:meth:`.SelectBase.with_for_update`. When called with no arguments,
the resulting ``SELECT`` statement will have a ``FOR UPDATE`` clause
appended. When additional arguments are specified, backend-specific
options such as ``FOR UPDATE NOWAIT`` or ``LOCK IN SHARE MODE``
can take effect.
E.g.::
q = sess.query(User).with_for_update(nowait=True, of=User)
The above query on a Postgresql backend will render like::
SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT
.. versionadded:: 0.9.0 :meth:`.Query.with_for_update` supersedes
the :meth:`.Query.with_lockmode` method.
.. seealso::
:meth:`.GenerativeSelect.with_for_update` - Core level method with
full argument and behavioral description.
"""
self._for_update_arg = LockmodeArg(read=read, nowait=nowait, of=of)
@_generative()
def params(self, *args, **kwargs):
"""add values for bind parameters which may have been
specified in filter().
parameters may be specified using \**kwargs, or optionally a single
dictionary as the first positional argument. The reason for both is
that \**kwargs is convenient, however some parameter dictionaries
contain unicode keys in which case \**kwargs cannot be used.
"""
if len(args) == 1:
kwargs.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary.")
self._params = self._params.copy()
self._params.update(kwargs)
@_generative(_no_statement_condition, _no_limit_offset)
def filter(self, *criterion):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using SQL expressions.
e.g.::
session.query(MyClass).filter(MyClass.name == 'some name')
Multiple criteria are joined together by AND::
session.query(MyClass).\\
filter(MyClass.name == 'some name', MyClass.id > 5)
The criterion is any SQL expression object applicable to the
WHERE clause of a select. String expressions are coerced
into SQL expression constructs via the :func:`.text` construct.
.. versionchanged:: 0.7.5
Multiple criteria joined by AND.
.. seealso::
:meth:`.Query.filter_by` - filter on keyword expressions.
"""
for criterion in list(criterion):
criterion = expression._expression_literal_as_text(criterion)
criterion = self._adapt_clause(criterion, True, True)
if self._criterion is not None:
self._criterion = self._criterion & criterion
else:
self._criterion = criterion
def filter_by(self, **kwargs):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using keyword expressions.
e.g.::
session.query(MyClass).filter_by(name = 'some name')
Multiple criteria are joined together by AND::
session.query(MyClass).\\
filter_by(name = 'some name', id = 5)
The keyword expressions are extracted from the primary
entity of the query, or the last entity that was the
target of a call to :meth:`.Query.join`.
.. seealso::
:meth:`.Query.filter` - filter on SQL expressions.
"""
clauses = [_entity_descriptor(self._joinpoint_zero(), key) == value
for key, value in kwargs.items()]
return self.filter(sql.and_(*clauses))
@_generative(_no_statement_condition, _no_limit_offset)
def order_by(self, *criterion):
"""apply one or more ORDER BY criterion to the query and return
the newly resulting ``Query``
All existing ORDER BY settings can be suppressed by
passing ``None`` - this will suppress any ORDER BY configured
on mappers as well.
Alternatively, an existing ORDER BY setting on the Query
object can be entirely cancelled by passing ``False``
as the value - use this before calling methods where
an ORDER BY is invalid.
"""
if len(criterion) == 1:
if criterion[0] is False:
if '_order_by' in self.__dict__:
del self._order_by
return
if criterion[0] is None:
self._order_by = None
return
criterion = self._adapt_col_list(criterion)
if self._order_by is False or self._order_by is None:
self._order_by = criterion
else:
self._order_by = self._order_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def group_by(self, *criterion):
"""apply one or more GROUP BY criterion to the query and return
the newly resulting :class:`.Query`"""
criterion = list(chain(*[_orm_columns(c) for c in criterion]))
criterion = self._adapt_col_list(criterion)
if self._group_by is False:
self._group_by = criterion
else:
self._group_by = self._group_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def having(self, criterion):
"""apply a HAVING criterion to the query and return the
newly resulting :class:`.Query`.
:meth:`~.Query.having` is used in conjunction with
:meth:`~.Query.group_by`.
HAVING criterion makes it possible to use filters on aggregate
functions like COUNT, SUM, AVG, MAX, and MIN, eg.::
q = session.query(User.id).\\
join(User.addresses).\\
group_by(User.id).\\
having(func.count(Address.id) > 2)
"""
criterion = expression._expression_literal_as_text(criterion)
if criterion is not None and \
not isinstance(criterion, sql.ClauseElement):
raise sa_exc.ArgumentError(
"having() argument must be of type "
"sqlalchemy.sql.ClauseElement or string")
criterion = self._adapt_clause(criterion, True, True)
if self._having is not None:
self._having = self._having & criterion
else:
self._having = criterion
def union(self, *q):
"""Produce a UNION of this Query against one or more queries.
e.g.::
q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar')
q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo')
q3 = q1.union(q2)
The method accepts multiple Query objects so as to control
the level of nesting. A series of ``union()`` calls such as::
x.union(y).union(z).all()
will nest on each ``union()``, and produces::
SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION
SELECT * FROM y) UNION SELECT * FROM Z)
Whereas::
x.union(y, z).all()
produces::
SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION
SELECT * FROM Z)
Note that many database backends do not allow ORDER BY to
be rendered on a query called within UNION, EXCEPT, etc.
To disable all ORDER BY clauses including those configured
on mappers, issue ``query.order_by(None)`` - the resulting
:class:`.Query` object will not render ORDER BY within
its SELECT statement.
"""
return self._from_selectable(
expression.union(*([self] + list(q))))
def union_all(self, *q):
"""Produce a UNION ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.union_all(*([self] + list(q)))
)
def intersect(self, *q):
"""Produce an INTERSECT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect(*([self] + list(q)))
)
def intersect_all(self, *q):
"""Produce an INTERSECT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect_all(*([self] + list(q)))
)
def except_(self, *q):
"""Produce an EXCEPT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_(*([self] + list(q)))
)
def except_all(self, *q):
"""Produce an EXCEPT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_all(*([self] + list(q)))
)
def join(self, *props, **kwargs):
"""Create a SQL JOIN against this :class:`.Query` object's criterion
and apply generatively, returning the newly resulting :class:`.Query`.
**Simple Relationship Joins**
Consider a mapping between two classes ``User`` and ``Address``,
with a relationship ``User.addresses`` representing a collection
of ``Address`` objects associated with each ``User``. The most
common usage of :meth:`~.Query.join` is to create a JOIN along this
relationship, using the ``User.addresses`` attribute as an indicator
for how this should occur::
q = session.query(User).join(User.addresses)
Where above, the call to :meth:`~.Query.join` along ``User.addresses``
will result in SQL equivalent to::
SELECT user.* FROM user JOIN address ON user.id = address.user_id
In the above example we refer to ``User.addresses`` as passed to
:meth:`~.Query.join` as the *on clause*, that is, it indicates
how the "ON" portion of the JOIN should be constructed. For a
single-entity query such as the one above (i.e. we start by selecting
only from ``User`` and nothing else), the relationship can also be
specified by its string name::
q = session.query(User).join("addresses")
:meth:`~.Query.join` can also accommodate multiple
"on clause" arguments to produce a chain of joins, such as below
where a join across four related entities is constructed::
q = session.query(User).join("orders", "items", "keywords")
The above would be shorthand for three separate calls to
:meth:`~.Query.join`, each using an explicit attribute to indicate
the source entity::
q = session.query(User).\\
join(User.orders).\\
join(Order.items).\\
join(Item.keywords)
**Joins to a Target Entity or Selectable**
A second form of :meth:`~.Query.join` allows any mapped entity
or core selectable construct as a target. In this usage,
:meth:`~.Query.join` will attempt
to create a JOIN along the natural foreign key relationship between
two entities::
q = session.query(User).join(Address)
The above calling form of :meth:`~.Query.join` will raise an error if
either there are no foreign keys between the two entities, or if
there are multiple foreign key linkages between them. In the
above calling form, :meth:`~.Query.join` is called upon to
create the "on clause" automatically for us. The target can
be any mapped entity or selectable, such as a :class:`.Table`::
q = session.query(User).join(addresses_table)
**Joins to a Target with an ON Clause**
The third calling form allows both the target entity as well
as the ON clause to be passed explicitly. Suppose for
example we wanted to join to ``Address`` twice, using
an alias the second time. We use :func:`~sqlalchemy.orm.aliased`
to create a distinct alias of ``Address``, and join
to it using the ``target, onclause`` form, so that the
alias can be specified explicitly as the target along with
the relationship to instruct how the ON clause should proceed::
a_alias = aliased(Address)
q = session.query(User).\\
join(User.addresses).\\
join(a_alias, User.addresses).\\
filter(Address.email_address=='[email protected]').\\
filter(a_alias.email_address=='[email protected]')
Where above, the generated SQL would be similar to::
SELECT user.* FROM user
JOIN address ON user.id = address.user_id
JOIN address AS address_1 ON user.id=address_1.user_id
WHERE address.email_address = :email_address_1
AND address_1.email_address = :email_address_2
The two-argument calling form of :meth:`~.Query.join`
also allows us to construct arbitrary joins with SQL-oriented
"on clause" expressions, not relying upon configured relationships
at all. Any SQL expression can be passed as the ON clause
when using the two-argument form, which should refer to the target
entity in some way as well as an applicable source entity::
q = session.query(User).join(Address, User.id==Address.user_id)
.. versionchanged:: 0.7
In SQLAlchemy 0.6 and earlier, the two argument form of
:meth:`~.Query.join` requires the usage of a tuple:
``query(User).join((Address, User.id==Address.user_id))``\ .
This calling form is accepted in 0.7 and further, though
is not necessary unless multiple join conditions are passed to
a single :meth:`~.Query.join` call, which itself is also not
generally necessary as it is now equivalent to multiple
calls (this wasn't always the case).
**Advanced Join Targeting and Adaption**
There is a lot of flexibility in what the "target" can be when using
:meth:`~.Query.join`. As noted previously, it also accepts
:class:`.Table` constructs and other selectables such as
:func:`.alias` and :func:`.select` constructs, with either the one
or two-argument forms::
addresses_q = select([Address.user_id]).\\
where(Address.email_address.endswith("@bar.com")).\\
alias()
q = session.query(User).\\
join(addresses_q, addresses_q.c.user_id==User.id)
:meth:`~.Query.join` also features the ability to *adapt* a
:meth:`~sqlalchemy.orm.relationship` -driven ON clause to the target
selectable. Below we construct a JOIN from ``User`` to a subquery
against ``Address``, allowing the relationship denoted by
``User.addresses`` to *adapt* itself to the altered target::
address_subq = session.query(Address).\\
filter(Address.email_address == '[email protected]').\\
subquery()
q = session.query(User).join(address_subq, User.addresses)
Producing SQL similar to::
SELECT user.* FROM user
JOIN (
SELECT address.id AS id,
address.user_id AS user_id,
address.email_address AS email_address
FROM address
WHERE address.email_address = :email_address_1
) AS anon_1 ON user.id = anon_1.user_id
The above form allows one to fall back onto an explicit ON
clause at any time::
q = session.query(User).\\
join(address_subq, User.id==address_subq.c.user_id)
**Controlling what to Join From**
While :meth:`~.Query.join` exclusively deals with the "right"
side of the JOIN, we can also control the "left" side, in those
cases where it's needed, using :meth:`~.Query.select_from`.
Below we construct a query against ``Address`` but can still
make usage of ``User.addresses`` as our ON clause by instructing
the :class:`.Query` to select first from the ``User``
entity::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which will produce SQL similar to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
**Constructing Aliases Anonymously**
:meth:`~.Query.join` can construct anonymous aliases
using the ``aliased=True`` flag. This feature is useful
when a query is being joined algorithmically, such as
when querying self-referentially to an arbitrary depth::
q = session.query(Node).\\
join("children", "children", aliased=True)
When ``aliased=True`` is used, the actual "alias" construct
is not explicitly available. To work with it, methods such as
:meth:`.Query.filter` will adapt the incoming entity to
the last join point::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1')
When using automatic aliasing, the ``from_joinpoint=True``
argument can allow a multi-node join to be broken into
multiple calls to :meth:`~.Query.join`, so that
each path along the way can be further filtered::
q = session.query(Node).\\
join("children", aliased=True).\\
filter(Node.name='child 1').\\
join("children", aliased=True, from_joinpoint=True).\\
filter(Node.name == 'grandchild 1')
The filtering aliases above can then be reset back to the
original ``Node`` entity using :meth:`~.Query.reset_joinpoint`::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1').\\
reset_joinpoint().\\
filter(Node.name == 'parent 1)
For an example of ``aliased=True``, see the distribution
example :ref:`examples_xmlpersistence` which illustrates
an XPath-like query system using algorithmic joins.
:param \*props: A collection of one or more join conditions,
each consisting of a relationship-bound attribute or string
relationship name representing an "on clause", or a single
target entity, or a tuple in the form of ``(target, onclause)``.
A special two-argument calling form of the form ``target, onclause``
is also accepted.
:param aliased=False: If True, indicate that the JOIN target should be
anonymously aliased. Subsequent calls to :meth:`~.Query.filter`
and similar will adapt the incoming criterion to the target
alias, until :meth:`~.Query.reset_joinpoint` is called.
:param isouter=False: If True, the join used will be a left outer join,
just as if the :meth:`.Query.outerjoin` method were called. This
flag is here to maintain consistency with the same flag as accepted
by :meth:`.FromClause.join` and other Core constructs.
.. versionadded:: 1.0.0
:param from_joinpoint=False: When using ``aliased=True``, a setting
of True here will cause the join to be from the most recent
joined target, rather than starting back from the original
FROM clauses of the query.
.. seealso::
:ref:`ormtutorial_joins` in the ORM tutorial.
:ref:`inheritance_toplevel` for details on how
:meth:`~.Query.join` is used for inheritance relationships.
:func:`.orm.join` - a standalone ORM-level join function,
used internally by :meth:`.Query.join`, which in previous
SQLAlchemy versions was the primary ORM-level joining interface.
"""
aliased, from_joinpoint, isouter = kwargs.pop('aliased', False),\
kwargs.pop('from_joinpoint', False),\
kwargs.pop('isouter', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
','.join(kwargs.keys))
isouter = isouter
return self._join(props,
outerjoin=isouter, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def outerjoin(self, *props, **kwargs):
"""Create a left outer join against this ``Query`` object's criterion
and apply generatively, returning the newly resulting ``Query``.
Usage is the same as the ``join()`` method.
"""
aliased, from_joinpoint = kwargs.pop('aliased', False), \
kwargs.pop('from_joinpoint', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
','.join(kwargs))
return self._join(props,
outerjoin=True, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def _update_joinpoint(self, jp):
self._joinpoint = jp
# copy backwards to the root of the _joinpath
# dict, so that no existing dict in the path is mutated
while 'prev' in jp:
f, prev = jp['prev']
prev = prev.copy()
prev[f] = jp
jp['prev'] = (f, prev)
jp = prev
self._joinpath = jp
@_generative(_no_statement_condition, _no_limit_offset)
def _join(self, keys, outerjoin, create_aliases, from_joinpoint):
"""consumes arguments from join() or outerjoin(), places them into a
consistent format with which to form the actual JOIN constructs.
"""
if not from_joinpoint:
self._reset_joinpoint()
if len(keys) == 2 and \
isinstance(keys[0], (expression.FromClause,
type, AliasedClass)) and \
isinstance(keys[1], (str, expression.ClauseElement,
interfaces.PropComparator)):
# detect 2-arg form of join and
# convert to a tuple.
keys = (keys,)
for arg1 in util.to_list(keys):
if isinstance(arg1, tuple):
# "tuple" form of join, multiple
# tuples are accepted as well. The simpler
# "2-arg" form is preferred. May deprecate
# the "tuple" usage.
arg1, arg2 = arg1
else:
arg2 = None
# determine onclause/right_entity. there
# is a little bit of legacy behavior still at work here
# which means they might be in either order. may possibly
# lock this down to (right_entity, onclause) in 0.6.
if isinstance(
arg1, (interfaces.PropComparator, util.string_types)):
right_entity, onclause = arg2, arg1
else:
right_entity, onclause = arg1, arg2
left_entity = prop = None
if isinstance(onclause, interfaces.PropComparator):
of_type = getattr(onclause, '_of_type', None)
else:
of_type = None
if isinstance(onclause, util.string_types):
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity, onclause)
onclause = descriptor
# check for q.join(Class.propname, from_joinpoint=True)
# and Class is that of the current joinpoint
elif from_joinpoint and \
isinstance(onclause, interfaces.PropComparator):
left_entity = onclause._parententity
info = inspect(self._joinpoint_zero())
left_mapper, left_selectable, left_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', None)
if left_mapper is left_entity:
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity,
onclause.key)
onclause = descriptor
if isinstance(onclause, interfaces.PropComparator):
if right_entity is None:
if of_type:
right_entity = of_type
else:
right_entity = onclause.property.mapper
left_entity = onclause._parententity
prop = onclause.property
if not isinstance(onclause, attributes.QueryableAttribute):
onclause = prop
if not create_aliases:
# check for this path already present.
# don't render in that case.
edge = (left_entity, right_entity, prop.key)
if edge in self._joinpoint:
# The child's prev reference might be stale --
# it could point to a parent older than the
# current joinpoint. If this is the case,
# then we need to update it and then fix the
# tree's spine with _update_joinpoint. Copy
# and then mutate the child, which might be
# shared by a different query object.
jp = self._joinpoint[edge].copy()
jp['prev'] = (edge, self._joinpoint)
self._update_joinpoint(jp)
continue
elif onclause is not None and right_entity is None:
# TODO: no coverage here
raise NotImplementedError("query.join(a==b) not supported.")
self._join_left_to_right(
left_entity,
right_entity, onclause,
outerjoin, create_aliases, prop)
def _join_left_to_right(self, left, right,
onclause, outerjoin, create_aliases, prop):
"""append a JOIN to the query's from clause."""
self._polymorphic_adapters = self._polymorphic_adapters.copy()
if left is None:
if self._from_obj:
left = self._from_obj[0]
elif self._entities:
left = self._entities[0].entity_zero_or_selectable
if left is None:
raise sa_exc.InvalidRequestError(
"Don't know how to join from %s; please use "
"select_from() to establish the left "
"entity/selectable of this join" % self._entities[0])
if left is right and \
not create_aliases:
raise sa_exc.InvalidRequestError(
"Can't construct a join from %s to %s, they "
"are the same entity" %
(left, right))
l_info = inspect(left)
r_info = inspect(right)
overlap = False
if not create_aliases:
right_mapper = getattr(r_info, "mapper", None)
# if the target is a joined inheritance mapping,
# be more liberal about auto-aliasing.
if right_mapper and (
right_mapper.with_polymorphic or
isinstance(right_mapper.mapped_table, expression.Join)
):
for from_obj in self._from_obj or [l_info.selectable]:
if sql_util.selectables_overlap(
l_info.selectable, from_obj) and \
sql_util.selectables_overlap(
from_obj, r_info.selectable):
overlap = True
break
if (overlap or not create_aliases) and \
l_info.selectable is r_info.selectable:
raise sa_exc.InvalidRequestError(
"Can't join table/selectable '%s' to itself" %
l_info.selectable)
right, onclause = self._prepare_right_side(
r_info, right, onclause,
create_aliases,
prop, overlap)
# if joining on a MapperProperty path,
# track the path to prevent redundant joins
if not create_aliases and prop:
self._update_joinpoint({
'_joinpoint_entity': right,
'prev': ((left, right, prop.key), self._joinpoint)
})
else:
self._joinpoint = {'_joinpoint_entity': right}
self._join_to_left(l_info, left, right, onclause, outerjoin)
def _prepare_right_side(self, r_info, right, onclause, create_aliases,
prop, overlap):
info = r_info
right_mapper, right_selectable, right_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', False)
if right_mapper:
self._join_entities += (info, )
if right_mapper and prop and \
not right_mapper.common_parent(prop.mapper):
raise sa_exc.InvalidRequestError(
"Join target %s does not correspond to "
"the right side of join condition %s" % (right, onclause)
)
if not right_mapper and prop:
right_mapper = prop.mapper
need_adapter = False
if right_mapper and right is right_selectable:
if not right_selectable.is_derived_from(
right_mapper.mapped_table):
raise sa_exc.InvalidRequestError(
"Selectable '%s' is not derived from '%s'" %
(right_selectable.description,
right_mapper.mapped_table.description))
if isinstance(right_selectable, expression.SelectBase):
# TODO: this isn't even covered now!
right_selectable = right_selectable.alias()
need_adapter = True
right = aliased(right_mapper, right_selectable)
aliased_entity = right_mapper and \
not right_is_aliased and \
(
right_mapper.with_polymorphic and isinstance(
right_mapper._with_polymorphic_selectable,
expression.Alias)
or
overlap # test for overlap:
# orm/inheritance/relationships.py
# SelfReferentialM2MTest
)
if not need_adapter and (create_aliases or aliased_entity):
right = aliased(right, flat=True)
need_adapter = True
# if an alias() of the right side was generated here,
# apply an adapter to all subsequent filter() calls
# until reset_joinpoint() is called.
if need_adapter:
self._filter_aliases = ORMAdapter(
right,
equivalents=right_mapper and
right_mapper._equivalent_columns or {},
chain_to=self._filter_aliases)
# if the onclause is a ClauseElement, adapt it with any
# adapters that are in place right now
if isinstance(onclause, expression.ClauseElement):
onclause = self._adapt_clause(onclause, True, True)
# if an alias() on the right side was generated,
# which is intended to wrap a the right side in a subquery,
# ensure that columns retrieved from this target in the result
# set are also adapted.
if aliased_entity and not create_aliases:
self._mapper_loads_polymorphically_with(
right_mapper,
ORMAdapter(
right,
equivalents=right_mapper._equivalent_columns
)
)
return right, onclause
def _join_to_left(self, l_info, left, right, onclause, outerjoin):
info = l_info
left_mapper = getattr(info, 'mapper', None)
left_selectable = info.selectable
if self._from_obj:
replace_clause_index, clause = sql_util.find_join_source(
self._from_obj,
left_selectable)
if clause is not None:
try:
clause = orm_join(clause,
right,
onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = \
self._from_obj[:replace_clause_index] + \
(clause, ) + \
self._from_obj[replace_clause_index + 1:]
return
if left_mapper:
for ent in self._entities:
if ent.corresponds_to(left):
clause = ent.selectable
break
else:
clause = left
else:
clause = left_selectable
assert clause is not None
try:
clause = orm_join(clause, right, onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = self._from_obj + (clause,)
def _reset_joinpoint(self):
self._joinpoint = self._joinpath
self._filter_aliases = None
@_generative(_no_statement_condition)
def reset_joinpoint(self):
"""Return a new :class:`.Query`, where the "join point" has
been reset back to the base FROM entities of the query.
This method is usually used in conjunction with the
``aliased=True`` feature of the :meth:`~.Query.join`
method. See the example in :meth:`~.Query.join` for how
this is used.
"""
self._reset_joinpoint()
@_generative(_no_clauseelement_condition)
def select_from(self, *from_obj):
"""Set the FROM clause of this :class:`.Query` explicitly.
:meth:`.Query.select_from` is often used in conjunction with
:meth:`.Query.join` in order to control which entity is selected
from on the "left" side of the join.
The entity or selectable object here effectively replaces the
"left edge" of any calls to :meth:`~.Query.join`, when no
joinpoint is otherwise established - usually, the default "join
point" is the leftmost entity in the :class:`~.Query` object's
list of entities to be selected.
A typical example::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which produces SQL equivalent to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
:param \*from_obj: collection of one or more entities to apply
to the FROM clause. Entities can be mapped classes,
:class:`.AliasedClass` objects, :class:`.Mapper` objects
as well as core :class:`.FromClause` elements like subqueries.
.. versionchanged:: 0.9
This method no longer applies the given FROM object
to be the selectable from which matching entities
select from; the :meth:`.select_entity_from` method
now accomplishes this. See that method for a description
of this behavior.
.. seealso::
:meth:`~.Query.join`
:meth:`.Query.select_entity_from`
"""
self._set_select_from(from_obj, False)
@_generative(_no_clauseelement_condition)
def select_entity_from(self, from_obj):
"""Set the FROM clause of this :class:`.Query` to a
core selectable, applying it as a replacement FROM clause
for corresponding mapped entities.
This method is similar to the :meth:`.Query.select_from`
method, in that it sets the FROM clause of the query. However,
where :meth:`.Query.select_from` only affects what is placed
in the FROM, this method also applies the given selectable
to replace the FROM which the selected entities would normally
select from.
The given ``from_obj`` must be an instance of a :class:`.FromClause`,
e.g. a :func:`.select` or :class:`.Alias` construct.
An example would be a :class:`.Query` that selects ``User`` entities,
but uses :meth:`.Query.select_entity_from` to have the entities
selected from a :func:`.select` construct instead of the
base ``user`` table::
select_stmt = select([User]).where(User.id == 7)
q = session.query(User).\\
select_entity_from(select_stmt).\\
filter(User.name == 'ed')
The query generated will select ``User`` entities directly
from the given :func:`.select` construct, and will be::
SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name
FROM (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE anon_1.name = :name_1
Notice above that even the WHERE criterion was "adapted" such that
the ``anon_1`` subquery effectively replaces all references to the
``user`` table, except for the one that it refers to internally.
Compare this to :meth:`.Query.select_from`, which as of
version 0.9, does not affect existing entities. The
statement below::
q = session.query(User).\\
select_from(select_stmt).\\
filter(User.name == 'ed')
Produces SQL where both the ``user`` table as well as the
``select_stmt`` construct are present as separate elements
in the FROM clause. No "adaptation" of the ``user`` table
is applied::
SELECT "user".id AS user_id, "user".name AS user_name
FROM "user", (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE "user".name = :name_1
:meth:`.Query.select_entity_from` maintains an older
behavior of :meth:`.Query.select_from`. In modern usage,
similar results can also be achieved using :func:`.aliased`::
select_stmt = select([User]).where(User.id == 7)
user_from_select = aliased(User, select_stmt.alias())
q = session.query(user_from_select)
:param from_obj: a :class:`.FromClause` object that will replace
the FROM clause of this :class:`.Query`.
.. seealso::
:meth:`.Query.select_from`
.. versionadded:: 0.8
:meth:`.Query.select_entity_from` was added to specify
the specific behavior of entity replacement, however
the :meth:`.Query.select_from` maintains this behavior
as well until 0.9.
"""
self._set_select_from([from_obj], True)
def __getitem__(self, item):
if isinstance(item, slice):
start, stop, step = util.decode_slice(item)
if isinstance(stop, int) and \
isinstance(start, int) and \
stop - start <= 0:
return []
# perhaps we should execute a count() here so that we
# can still use LIMIT/OFFSET ?
elif (isinstance(start, int) and start < 0) \
or (isinstance(stop, int) and stop < 0):
return list(self)[item]
res = self.slice(start, stop)
if step is not None:
return list(res)[None:None:item.step]
else:
return list(res)
else:
if item == -1:
return list(self)[-1]
else:
return list(self[item:item + 1])[0]
@_generative(_no_statement_condition)
def slice(self, start, stop):
"""apply LIMIT/OFFSET to the ``Query`` based on a "
"range and return the newly resulting ``Query``."""
if start is not None and stop is not None:
self._offset = (self._offset or 0) + start
self._limit = stop - start
elif start is None and stop is not None:
self._limit = stop
elif start is not None and stop is None:
self._offset = (self._offset or 0) + start
if self._offset == 0:
self._offset = None
@_generative(_no_statement_condition)
def limit(self, limit):
"""Apply a ``LIMIT`` to the query and return the newly resulting
``Query``.
"""
self._limit = limit
@_generative(_no_statement_condition)
def offset(self, offset):
"""Apply an ``OFFSET`` to the query and return the newly resulting
``Query``.
"""
self._offset = offset
@_generative(_no_statement_condition)
def distinct(self, *criterion):
"""Apply a ``DISTINCT`` to the query and return the newly resulting
``Query``.
:param \*expr: optional column expressions. When present,
the Postgresql dialect will render a ``DISTINCT ON (<expressions>>)``
construct.
"""
if not criterion:
self._distinct = True
else:
criterion = self._adapt_col_list(criterion)
if isinstance(self._distinct, list):
self._distinct += criterion
else:
self._distinct = criterion
@_generative()
def prefix_with(self, *prefixes):
"""Apply the prefixes to the query and return the newly resulting
``Query``.
:param \*prefixes: optional prefixes, typically strings,
not using any commas. In particular is useful for MySQL keywords.
e.g.::
query = sess.query(User.name).\\
prefix_with('HIGH_PRIORITY').\\
prefix_with('SQL_SMALL_RESULT', 'ALL')
Would render::
SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL users.name AS users_name
FROM users
.. versionadded:: 0.7.7
"""
if self._prefixes:
self._prefixes += prefixes
else:
self._prefixes = prefixes
def all(self):
"""Return the results represented by this ``Query`` as a list.
This results in an execution of the underlying query.
"""
return list(self)
@_generative(_no_clauseelement_condition)
def from_statement(self, statement):
"""Execute the given SELECT statement and return results.
This method bypasses all internal statement compilation, and the
statement is executed without modification.
The statement is typically either a :func:`~.expression.text`
or :func:`~.expression.select` construct, and should return the set
of columns
appropriate to the entity class represented by this :class:`.Query`.
.. seealso::
:ref:`orm_tutorial_literal_sql` - usage examples in the
ORM tutorial
"""
statement = expression._expression_literal_as_text(statement)
if not isinstance(statement,
(expression.TextClause,
expression.SelectBase)):
raise sa_exc.ArgumentError(
"from_statement accepts text(), select(), "
"and union() objects only.")
self._statement = statement
def first(self):
"""Return the first result of this ``Query`` or
None if the result doesn't contain any row.
first() applies a limit of one within the generated SQL, so that
only one primary entity row is generated on the server side
(note this may consist of multiple result rows if join-loaded
collections are present).
Calling ``first()`` results in an execution of the underlying query.
"""
if self._statement is not None:
ret = list(self)[0:1]
else:
ret = list(self[0:1])
if len(ret) > 0:
return ret[0]
else:
return None
def one(self):
"""Return exactly one result or raise an exception.
Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects
no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
if multiple object identities are returned, or if multiple
rows are returned for a query that does not return object
identities.
Note that an entity query, that is, one which selects one or
more mapped classes as opposed to individual column attributes,
may ultimately represent many rows but only one row of
unique entity or entities - this is a successful result for one().
Calling ``one()`` results in an execution of the underlying query.
.. versionchanged:: 0.6
``one()`` fully fetches all results instead of applying
any kind of limit, so that the "unique"-ing of entities does not
conceal multiple object identities.
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
raise orm_exc.NoResultFound("No row was found for one()")
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one()")
def scalar(self):
"""Return the first element of the first result or None
if no rows present. If multiple rows are returned,
raises MultipleResultsFound.
>>> session.query(Item).scalar()
<Item>
>>> session.query(Item.id).scalar()
1
>>> session.query(Item.id).filter(Item.id < 0).scalar()
None
>>> session.query(Item.id, Item.name).scalar()
1
>>> session.query(func.count(Parent.id)).scalar()
20
This results in an execution of the underlying query.
"""
try:
ret = self.one()
if not isinstance(ret, tuple):
return ret
return ret[0]
except orm_exc.NoResultFound:
return None
def __iter__(self):
context = self._compile_context()
context.statement.use_labels = True
if self._autoflush and not self._populate_existing:
self.session._autoflush()
return self._execute_and_instances(context)
def _connection_from_session(self, **kw):
conn = self.session.connection(
**kw)
if self._execution_options:
conn = conn.execution_options(**self._execution_options)
return conn
def _execute_and_instances(self, querycontext):
conn = self._connection_from_session(
mapper=self._mapper_zero_or_none(),
clause=querycontext.statement,
close_with_result=True)
result = conn.execute(querycontext.statement, self._params)
return loading.instances(self, result, querycontext)
@property
def column_descriptions(self):
"""Return metadata about the columns which would be
returned by this :class:`.Query`.
Format is a list of dictionaries::
user_alias = aliased(User, name='user2')
q = sess.query(User, User.id, user_alias)
# this expression:
q.column_descriptions
# would return:
[
{
'name':'User',
'type':User,
'aliased':False,
'expr':User,
},
{
'name':'id',
'type':Integer(),
'aliased':False,
'expr':User.id,
},
{
'name':'user2',
'type':User,
'aliased':True,
'expr':user_alias
}
]
"""
return [
{
'name': ent._label_name,
'type': ent.type,
'aliased': getattr(ent, 'is_aliased_class', False),
'expr': ent.expr
}
for ent in self._entities
]
def instances(self, cursor, __context=None):
"""Given a ResultProxy cursor as returned by connection.execute(),
return an ORM result as an iterator.
e.g.::
result = engine.execute("select * from users")
for u in session.query(User).instances(result):
print u
"""
context = __context
if context is None:
context = QueryContext(self)
return loading.instances(self, cursor, context)
def merge_result(self, iterator, load=True):
"""Merge a result into this :class:`.Query` object's Session.
Given an iterator returned by a :class:`.Query` of the same structure
as this one, return an identical iterator of results, with all mapped
instances merged into the session using :meth:`.Session.merge`. This
is an optimized method which will merge all mapped instances,
preserving the structure of the result rows and unmapped columns with
less method overhead than that of calling :meth:`.Session.merge`
explicitly for each value.
The structure of the results is determined based on the column list of
this :class:`.Query` - if these do not correspond, unchecked errors
will occur.
The 'load' argument is the same as that of :meth:`.Session.merge`.
For an example of how :meth:`~.Query.merge_result` is used, see
the source code for the example :ref:`examples_caching`, where
:meth:`~.Query.merge_result` is used to efficiently restore state
from a cache back into a target :class:`.Session`.
"""
return loading.merge_result(self, iterator, load)
@property
def _select_args(self):
return {
'limit': self._limit,
'offset': self._offset,
'distinct': self._distinct,
'prefixes': self._prefixes,
'group_by': self._group_by or None,
'having': self._having
}
@property
def _should_nest_selectable(self):
kwargs = self._select_args
return (kwargs.get('limit') is not None or
kwargs.get('offset') is not None or
kwargs.get('distinct', False))
def exists(self):
"""A convenience method that turns a query into an EXISTS subquery
of the form EXISTS (SELECT 1 FROM ... WHERE ...).
e.g.::
q = session.query(User).filter(User.name == 'fred')
session.query(q.exists())
Producing SQL similar to::
SELECT EXISTS (
SELECT 1 FROM users WHERE users.name = :name_1
) AS anon_1
The EXISTS construct is usually used in the WHERE clause::
session.query(User.id).filter(q.exists()).scalar()
Note that some databases such as SQL Server don't allow an
EXISTS expression to be present in the columns clause of a
SELECT. To select a simple boolean value based on the exists
as a WHERE, use :func:`.literal`::
from sqlalchemy import literal
session.query(literal(True)).filter(q.exists()).scalar()
.. versionadded:: 0.8.1
"""
# .add_columns() for the case that we are a query().select_from(X),
# so that ".statement" can be produced (#2995) but also without
# omitting the FROM clause from a query(X) (#2818);
# .with_only_columns() after we have a core select() so that
# we get just "SELECT 1" without any entities.
return sql.exists(self.add_columns('1').with_labels().
statement.with_only_columns([1]))
def count(self):
"""Return a count of rows this Query would return.
This generates the SQL for this Query as follows::
SELECT count(1) AS count_1 FROM (
SELECT <rest of query follows...>
) AS anon_1
.. versionchanged:: 0.7
The above scheme is newly refined as of 0.7b3.
For fine grained control over specific columns
to count, to skip the usage of a subquery or
otherwise control of the FROM clause,
or to use other aggregate functions,
use :attr:`~sqlalchemy.sql.expression.func`
expressions in conjunction
with :meth:`~.Session.query`, i.e.::
from sqlalchemy import func
# count User records, without
# using a subquery.
session.query(func.count(User.id))
# return count of user "id" grouped
# by "name"
session.query(func.count(User.id)).\\
group_by(User.name)
from sqlalchemy import distinct
# count distinct "name" values
session.query(func.count(distinct(User.name)))
"""
col = sql.func.count(sql.literal_column('*'))
return self.from_self(col).scalar()
def delete(self, synchronize_session='evaluate'):
"""Perform a bulk delete query.
Deletes rows matched by this query from the database.
:param synchronize_session: chooses the strategy for the removal of
matched objects from the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, objects may still remain in
the session which were in fact deleted which can lead to confusing
results if they are accessed via get() or already loaded
collections.
``'fetch'`` - performs a select query before the delete to find
objects that are matched by the delete query and need to be
removed from the session. Matched objects are removed from the
session.
``'evaluate'`` - Evaluate the query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an error is raised. In that case you probably
want to use the 'fetch' strategy as a fallback.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
This method has several key caveats:
* The method does **not** offer in-Python cascading of relationships
- it is assumed that ON DELETE CASCADE/SET NULL/etc. is configured
for any foreign key references which require it, otherwise the
database may emit an integrity violation if foreign key references
are being enforced.
After the DELETE, dependent objects in the :class:`.Session` which
were impacted by an ON DELETE may not contain the current
state, or may have been deleted. This issue is resolved once the
:class:`.Session` is expired,
which normally occurs upon :meth:`.Session.commit` or can be forced
by using :meth:`.Session.expire_all`. Accessing an expired object
whose row has been deleted will invoke a SELECT to locate the
row; when the row is not found, an
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
* The :meth:`.MapperEvents.before_delete` and
:meth:`.MapperEvents.after_delete`
events are **not** invoked from this method. Instead, the
:meth:`.SessionEvents.after_bulk_delete` method is provided to act
upon a mass DELETE of entity rows.
.. seealso::
:meth:`.Query.update`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
# TODO: cascades need handling.
delete_op = persistence.BulkDelete.factory(
self, synchronize_session)
delete_op.exec_()
return delete_op.rowcount
def update(self, values, synchronize_session='evaluate'):
"""Perform a bulk update query.
Updates rows matched by this query in the database.
E.g.::
sess.query(User).filter(User.age == 25).\
update({User.age: User.age - 10}, synchronize_session='fetch')
sess.query(User).filter(User.age == 25).\
update({"age": User.age - 10}, synchronize_session='evaluate')
:param values: a dictionary with attributes names, or alternatively
mapped attributes or SQL expressions, as keys, and literal
values or sql expressions as values.
.. versionchanged:: 1.0.0 - string names in the values dictionary
are now resolved against the mapped entity; previously, these
strings were passed as literal column names with no mapper-level
translation.
:param synchronize_session: chooses the strategy to update the
attributes on objects in the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, updated objects may still
remain in the session with stale values on their attributes, which
can lead to confusing results.
``'fetch'`` - performs a select query before the update to find
objects that are matched by the update query. The updated
attributes are expired on matched objects.
``'evaluate'`` - Evaluate the Query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an exception is raised.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
This method has several key caveats:
* The method does **not** offer in-Python cascading of relationships
- it is assumed that ON UPDATE CASCADE is configured for any foreign
key references which require it, otherwise the database may emit an
integrity violation if foreign key references are being enforced.
After the UPDATE, dependent objects in the :class:`.Session` which
were impacted by an ON UPDATE CASCADE may not contain the current
state; this issue is resolved once the :class:`.Session` is expired,
which normally occurs upon :meth:`.Session.commit` or can be forced
by using :meth:`.Session.expire_all`.
* The method supports multiple table updates, as
detailed in :ref:`multi_table_updates`, and this behavior does
extend to support updates of joined-inheritance and other multiple
table mappings. However, the **join condition of an inheritance
mapper is currently not automatically rendered**.
Care must be taken in any multiple-table update to explicitly
include the joining condition between those tables, even in mappings
where this is normally automatic.
E.g. if a class ``Engineer`` subclasses ``Employee``, an UPDATE of
the ``Engineer`` local table using criteria against the ``Employee``
local table might look like::
session.query(Engineer).\\
filter(Engineer.id == Employee.id).\\
filter(Employee.name == 'dilbert').\\
update({"engineer_type": "programmer"})
* The :meth:`.MapperEvents.before_update` and
:meth:`.MapperEvents.after_update`
events are **not** invoked from this method. Instead, the
:meth:`.SessionEvents.after_bulk_update` method is provided to act
upon a mass UPDATE of entity rows.
.. seealso::
:meth:`.Query.delete`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
update_op = persistence.BulkUpdate.factory(
self, synchronize_session, values)
update_op.exec_()
return update_op.rowcount
def _compile_context(self, labels=True):
context = QueryContext(self)
if context.statement is not None:
return context
context.labels = labels
context._for_update_arg = self._for_update_arg
for entity in self._entities:
entity.setup_context(self, context)
for rec in context.create_eager_joins:
strategy = rec[0]
strategy(*rec[1:])
if context.from_clause:
# "load from explicit FROMs" mode,
# i.e. when select_from() or join() is used
context.froms = list(context.from_clause)
else:
# "load from discrete FROMs" mode,
# i.e. when each _MappedEntity has its own FROM
context.froms = context.froms
if self._enable_single_crit:
self._adjust_for_single_inheritance(context)
if not context.primary_columns:
if self._only_load_props:
raise sa_exc.InvalidRequestError(
"No column-based properties specified for "
"refresh operation. Use session.expire() "
"to reload collections and related items.")
else:
raise sa_exc.InvalidRequestError(
"Query contains no columns with which to "
"SELECT from.")
if context.multi_row_eager_loaders and self._should_nest_selectable:
context.statement = self._compound_eager_statement(context)
else:
context.statement = self._simple_statement(context)
return context
def _compound_eager_statement(self, context):
# for eager joins present and LIMIT/OFFSET/DISTINCT,
# wrap the query inside a select,
# then append eager joins onto that
if context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
else:
context.order_by = None
order_by_col_expr = []
inner = sql.select(
context.primary_columns + order_by_col_expr,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
# TODO: this order_by is only needed if
# LIMIT/OFFSET is present in self._select_args,
# else the application on the outside is enough
order_by=context.order_by,
**self._select_args
)
for hint in self._with_hints:
inner = inner.with_hint(*hint)
if self._correlate:
inner = inner.correlate(*self._correlate)
inner = inner.alias()
equivs = self.__all_equivs()
context.adapter = sql_util.ColumnAdapter(inner, equivs)
statement = sql.select(
[inner] + context.secondary_columns,
use_labels=context.labels)
statement._for_update_arg = context._for_update_arg
from_clause = inner
for eager_join in context.eager_joins.values():
# EagerLoader places a 'stop_on' attribute on the join,
# giving us a marker as to where the "splice point" of
# the join should be
from_clause = sql_util.splice_joins(
from_clause,
eager_join, eager_join.stop_on)
statement.append_from(from_clause)
if context.order_by:
statement.append_order_by(
*context.adapter.copy_and_process(
context.order_by
)
)
statement.append_order_by(*context.eager_order_by)
return statement
def _simple_statement(self, context):
if not context.order_by:
context.order_by = None
if self._distinct and context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
context.primary_columns += order_by_col_expr
context.froms += tuple(context.eager_joins.values())
statement = sql.select(
context.primary_columns +
context.secondary_columns,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
order_by=context.order_by,
**self._select_args
)
statement._for_update_arg = context._for_update_arg
for hint in self._with_hints:
statement = statement.with_hint(*hint)
if self._correlate:
statement = statement.correlate(*self._correlate)
if context.eager_order_by:
statement.append_order_by(*context.eager_order_by)
return statement
def _adjust_for_single_inheritance(self, context):
"""Apply single-table-inheritance filtering.
For all distinct single-table-inheritance mappers represented in
the columns clause of this query, add criterion to the WHERE
clause of the given QueryContext such that only the appropriate
subtypes are selected from the total results.
"""
for (ext_info, adapter) in set(self._mapper_adapter_map.values()):
if ext_info in self._join_entities:
continue
single_crit = ext_info.mapper._single_table_criterion
if single_crit is not None:
if adapter:
single_crit = adapter.traverse(single_crit)
single_crit = self._adapt_clause(single_crit, False, False)
context.whereclause = sql.and_(
sql.True_._ifnone(context.whereclause),
single_crit)
def __str__(self):
return str(self._compile_context().statement)
from ..sql.selectable import ForUpdateArg
class LockmodeArg(ForUpdateArg):
@classmethod
def parse_legacy_query(self, mode):
if mode in (None, False):
return None
if mode == "read":
read = True
nowait = False
elif mode == "update":
read = nowait = False
elif mode == "update_nowait":
nowait = True
read = False
else:
raise sa_exc.ArgumentError(
"Unknown with_lockmode argument: %r" % mode)
return LockmodeArg(read=read, nowait=nowait)
class _QueryEntity(object):
"""represent an entity column returned within a Query result."""
def __new__(cls, *args, **kwargs):
if cls is _QueryEntity:
entity = args[1]
if not isinstance(entity, util.string_types) and \
_is_mapped_class(entity):
cls = _MapperEntity
elif isinstance(entity, Bundle):
cls = _BundleEntity
else:
cls = _ColumnEntity
return object.__new__(cls)
def _clone(self):
q = self.__class__.__new__(self.__class__)
q.__dict__ = self.__dict__.copy()
return q
class _MapperEntity(_QueryEntity):
"""mapper/class/AliasedClass entity"""
def __init__(self, query, entity):
if not query._primary_entity:
query._primary_entity = self
query._entities.append(self)
self.entities = [entity]
self.expr = entity
supports_single_entity = True
def setup_entity(self, ext_info, aliased_adapter):
self.mapper = ext_info.mapper
self.aliased_adapter = aliased_adapter
self.selectable = ext_info.selectable
self.is_aliased_class = ext_info.is_aliased_class
self._with_polymorphic = ext_info.with_polymorphic_mappers
self._polymorphic_discriminator = \
ext_info.polymorphic_on
self.entity_zero = ext_info
if ext_info.is_aliased_class:
self._label_name = self.entity_zero.name
else:
self._label_name = self.mapper.class_.__name__
self.path = self.entity_zero._path_registry
def set_with_polymorphic(self, query, cls_or_mappers,
selectable, polymorphic_on):
"""Receive an update from a call to query.with_polymorphic().
Note the newer style of using a free standing with_polymporphic()
construct doesn't make use of this method.
"""
if self.is_aliased_class:
# TODO: invalidrequest ?
raise NotImplementedError(
"Can't use with_polymorphic() against "
"an Aliased object"
)
if cls_or_mappers is None:
query._reset_polymorphic_adapter(self.mapper)
return
mappers, from_obj = self.mapper._with_polymorphic_args(
cls_or_mappers, selectable)
self._with_polymorphic = mappers
self._polymorphic_discriminator = polymorphic_on
self.selectable = from_obj
query._mapper_loads_polymorphically_with(
self.mapper, sql_util.ColumnAdapter(
from_obj, self.mapper._equivalent_columns))
filter_fn = id
@property
def type(self):
return self.mapper.class_
@property
def entity_zero_or_selectable(self):
return self.entity_zero
def corresponds_to(self, entity):
if entity.is_aliased_class:
if self.is_aliased_class:
if entity._base_alias is self.entity_zero._base_alias:
return True
return False
elif self.is_aliased_class:
if self.entity_zero._use_mapper_path:
return entity in self._with_polymorphic
else:
return entity is self.entity_zero
return entity.common_parent(self.entity_zero)
def adapt_to_selectable(self, query, sel):
query._entities.append(self)
def _get_entity_clauses(self, query, context):
adapter = None
if not self.is_aliased_class:
if query._polymorphic_adapters:
adapter = query._polymorphic_adapters.get(self.mapper, None)
else:
adapter = self.aliased_adapter
if adapter:
if query._from_obj_alias:
ret = adapter.wrap(query._from_obj_alias)
else:
ret = adapter
else:
ret = query._from_obj_alias
return ret
def row_processor(self, query, context, result):
adapter = self._get_entity_clauses(query, context)
if context.adapter and adapter:
adapter = adapter.wrap(context.adapter)
elif not adapter:
adapter = context.adapter
# polymorphic mappers which have concrete tables in
# their hierarchy usually
# require row aliasing unconditionally.
if not adapter and self.mapper._requires_row_aliasing:
adapter = sql_util.ColumnAdapter(
self.selectable,
self.mapper._equivalent_columns)
if query._primary_entity is self:
_instance = loading.instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
only_load_props=query._only_load_props,
refresh_state=context.refresh_state,
polymorphic_discriminator=self._polymorphic_discriminator
)
else:
_instance = loading.instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
polymorphic_discriminator=self._polymorphic_discriminator
)
return _instance, self._label_name
def setup_context(self, query, context):
adapter = self._get_entity_clauses(query, context)
# if self._adapted_selectable is None:
context.froms += (self.selectable,)
if context.order_by is False and self.mapper.order_by:
context.order_by = self.mapper.order_by
# apply adaptation to the mapper's order_by if needed.
if adapter:
context.order_by = adapter.adapt_list(
util.to_list(
context.order_by
)
)
if self._with_polymorphic:
poly_properties = self.mapper._iterate_polymorphic_properties(
self._with_polymorphic)
else:
poly_properties = self.mapper._polymorphic_properties
for value in poly_properties:
if query._only_load_props and \
value.key not in query._only_load_props:
continue
value.setup(
context,
self,
self.path,
adapter,
only_load_props=query._only_load_props,
column_collection=context.primary_columns
)
if self._polymorphic_discriminator is not None and \
self._polymorphic_discriminator \
is not self.mapper.polymorphic_on:
if adapter:
pd = adapter.columns[self._polymorphic_discriminator]
else:
pd = self._polymorphic_discriminator
context.primary_columns.append(pd)
def __str__(self):
return str(self.mapper)
@inspection._self_inspects
class Bundle(object):
"""A grouping of SQL expressions that are returned by a :class:`.Query`
under one namespace.
The :class:`.Bundle` essentially allows nesting of the tuple-based
results returned by a column-oriented :class:`.Query` object. It also
is extensible via simple subclassing, where the primary capability
to override is that of how the set of expressions should be returned,
allowing post-processing as well as custom return types, without
involving ORM identity-mapped classes.
.. versionadded:: 0.9.0
.. seealso::
:ref:`bundles`
"""
single_entity = False
"""If True, queries for a single Bundle will be returned as a single
entity, rather than an element within a keyed tuple."""
def __init__(self, name, *exprs, **kw):
"""Construct a new :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
for row in session.query(bn).filter(
bn.c.x == 5).filter(bn.c.y == 4):
print(row.mybundle.x, row.mybundle.y)
:param name: name of the bundle.
:param \*exprs: columns or SQL expressions comprising the bundle.
:param single_entity=False: if True, rows for this :class:`.Bundle`
can be returned as a "single entity" outside of any enclosing tuple
in the same manner as a mapped entity.
"""
self.name = self._label = name
self.exprs = exprs
self.c = self.columns = ColumnCollection()
self.columns.update((getattr(col, "key", col._label), col)
for col in exprs)
self.single_entity = kw.pop('single_entity', self.single_entity)
columns = None
"""A namespace of SQL expressions referred to by this :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
q = sess.query(bn).filter(bn.c.x == 5)
Nesting of bundles is also supported::
b1 = Bundle("b1",
Bundle('b2', MyClass.a, MyClass.b),
Bundle('b3', MyClass.x, MyClass.y)
)
q = sess.query(b1).filter(
b1.c.b2.c.a == 5).filter(b1.c.b3.c.y == 9)
.. seealso::
:attr:`.Bundle.c`
"""
c = None
"""An alias for :attr:`.Bundle.columns`."""
def _clone(self):
cloned = self.__class__.__new__(self.__class__)
cloned.__dict__.update(self.__dict__)
return cloned
def __clause_element__(self):
return expression.ClauseList(group=False, *self.c)
@property
def clauses(self):
return self.__clause_element__().clauses
def label(self, name):
"""Provide a copy of this :class:`.Bundle` passing a new label."""
cloned = self._clone()
cloned.name = name
return cloned
def create_row_processor(self, query, procs, labels):
"""Produce the "row processing" function for this :class:`.Bundle`.
May be overridden by subclasses.
.. seealso::
:ref:`bundles` - includes an example of subclassing.
"""
keyed_tuple = util.lightweight_named_tuple('result', labels)
def proc(row):
return keyed_tuple([proc(row) for proc in procs])
return proc
class _BundleEntity(_QueryEntity):
def __init__(self, query, bundle, setup_entities=True):
query._entities.append(self)
self.bundle = self.expr = bundle
self.type = type(bundle)
self._label_name = bundle.name
self._entities = []
if setup_entities:
for expr in bundle.exprs:
if isinstance(expr, Bundle):
_BundleEntity(self, expr)
else:
_ColumnEntity(self, expr, namespace=self)
self.entities = ()
self.filter_fn = lambda item: item
self.supports_single_entity = self.bundle.single_entity
@property
def entity_zero(self):
for ent in self._entities:
ezero = ent.entity_zero
if ezero is not None:
return ezero
else:
return None
def corresponds_to(self, entity):
# TODO: this seems to have no effect for
# _ColumnEntity either
return False
@property
def entity_zero_or_selectable(self):
for ent in self._entities:
ezero = ent.entity_zero_or_selectable
if ezero is not None:
return ezero
else:
return None
def adapt_to_selectable(self, query, sel):
c = _BundleEntity(query, self.bundle, setup_entities=False)
# c._label_name = self._label_name
# c.entity_zero = self.entity_zero
# c.entities = self.entities
for ent in self._entities:
ent.adapt_to_selectable(c, sel)
def setup_entity(self, ext_info, aliased_adapter):
for ent in self._entities:
ent.setup_entity(ext_info, aliased_adapter)
def setup_context(self, query, context):
for ent in self._entities:
ent.setup_context(query, context)
def row_processor(self, query, context, result):
procs, labels = zip(
*[ent.row_processor(query, context, result)
for ent in self._entities]
)
proc = self.bundle.create_row_processor(query, procs, labels)
return proc, self._label_name
class _ColumnEntity(_QueryEntity):
"""Column/expression based entity."""
def __init__(self, query, column, namespace=None):
self.expr = column
self.namespace = namespace
if isinstance(column, util.string_types):
column = sql.literal_column(column)
self._label_name = column.name
elif isinstance(column, (
attributes.QueryableAttribute,
interfaces.PropComparator
)):
self._label_name = column.key
column = column._query_clause_element()
else:
self._label_name = getattr(column, 'key', None)
if not isinstance(column, expression.ColumnElement) and \
hasattr(column, '_select_iterable'):
for c in column._select_iterable:
if c is column:
break
_ColumnEntity(query, c, namespace=column)
else:
return
elif isinstance(column, Bundle):
_BundleEntity(query, column)
return
if not isinstance(column, sql.ColumnElement):
raise sa_exc.InvalidRequestError(
"SQL expression, column, or mapped entity "
"expected - got '%r'" % (column, )
)
self.type = type_ = column.type
if type_.hashable:
self.filter_fn = lambda item: item
else:
counter = util.counter()
self.filter_fn = lambda item: counter()
# If the Column is unnamed, give it a
# label() so that mutable column expressions
# can be located in the result even
# if the expression's identity has been changed
# due to adaption.
if not column._label and not getattr(column, 'is_literal', False):
column = column.label(self._label_name)
query._entities.append(self)
self.column = column
self.froms = set()
# look for ORM entities represented within the
# given expression. Try to count only entities
# for columns whose FROM object is in the actual list
# of FROMs for the overall expression - this helps
# subqueries which were built from ORM constructs from
# leaking out their entities into the main select construct
self.actual_froms = actual_froms = set(column._from_objects)
self.entities = util.OrderedSet(
elem._annotations['parententity']
for elem in visitors.iterate(column, {})
if 'parententity' in elem._annotations
and actual_froms.intersection(elem._from_objects)
)
if self.entities:
self.entity_zero = list(self.entities)[0]
elif self.namespace is not None:
self.entity_zero = self.namespace
else:
self.entity_zero = None
supports_single_entity = False
@property
def entity_zero_or_selectable(self):
if self.entity_zero is not None:
return self.entity_zero
elif self.actual_froms:
return list(self.actual_froms)[0]
else:
return None
def adapt_to_selectable(self, query, sel):
c = _ColumnEntity(query, sel.corresponding_column(self.column))
c._label_name = self._label_name
c.entity_zero = self.entity_zero
c.entities = self.entities
def setup_entity(self, ext_info, aliased_adapter):
if 'selectable' not in self.__dict__:
self.selectable = ext_info.selectable
self.froms.add(ext_info.selectable)
def corresponds_to(self, entity):
# TODO: just returning False here,
# no tests fail
if self.entity_zero is None:
return False
elif _is_aliased_class(entity):
# TODO: polymorphic subclasses ?
return entity is self.entity_zero
else:
return not _is_aliased_class(self.entity_zero) and \
entity.common_parent(self.entity_zero)
def _resolve_expr_against_query_aliases(self, query, expr, context):
return query._adapt_clause(expr, False, True)
def row_processor(self, query, context, result):
column = self._resolve_expr_against_query_aliases(
query, self.column, context)
if context.adapter:
column = context.adapter.columns[column]
getter = result._getter(column)
return getter, self._label_name
def setup_context(self, query, context):
column = self._resolve_expr_against_query_aliases(
query, self.column, context)
context.froms += tuple(self.froms)
context.primary_columns.append(column)
def __str__(self):
return str(self.column)
class QueryContext(object):
multi_row_eager_loaders = False
adapter = None
froms = ()
for_update = None
def __init__(self, query):
if query._statement is not None:
if isinstance(query._statement, expression.SelectBase) and \
not query._statement._textual and \
not query._statement.use_labels:
self.statement = query._statement.apply_labels()
else:
self.statement = query._statement
else:
self.statement = None
self.from_clause = query._from_obj
self.whereclause = query._criterion
self.order_by = query._order_by
self.query = query
self.session = query.session
self.populate_existing = query._populate_existing
self.invoke_all_eagers = query._invoke_all_eagers
self.version_check = query._version_check
self.refresh_state = query._refresh_state
self.primary_columns = []
self.secondary_columns = []
self.eager_order_by = []
self.eager_joins = {}
self.create_eager_joins = []
self.propagate_options = set(o for o in query._with_options if
o.propagate_to_loaders)
self.attributes = query._attributes.copy()
class AliasOption(interfaces.MapperOption):
def __init__(self, alias):
"""Return a :class:`.MapperOption` that will indicate to the :class:`.Query`
that the main table has been aliased.
This is a seldom-used option to suit the
very rare case that :func:`.contains_eager`
is being used in conjunction with a user-defined SELECT
statement that aliases the parent table. E.g.::
# define an aliased UNION called 'ulist'
ulist = users.select(users.c.user_id==7).\\
union(users.select(users.c.user_id>7)).\\
alias('ulist')
# add on an eager load of "addresses"
statement = ulist.outerjoin(addresses).\\
select().apply_labels()
# create query, indicating "ulist" will be an
# alias for the main table, "addresses"
# property should be eager loaded
query = session.query(User).options(
contains_alias(ulist),
contains_eager(User.addresses))
# then get results via the statement
results = query.from_statement(statement).all()
:param alias: is the string name of an alias, or a
:class:`~.sql.expression.Alias` object representing
the alias.
"""
self.alias = alias
def process_query(self, query):
if isinstance(self.alias, util.string_types):
alias = query._mapper_zero().mapped_table.alias(self.alias)
else:
alias = self.alias
query._from_obj_alias = sql_util.ColumnAdapter(alias)
|
create_row_processor
|
Produce the "row processing" function for this :class:`.Bundle`.
May be overridden by subclasses.
.. seealso::
:ref:`bundles` - includes an example of subclassing.
|
# orm/query.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The Query class and support.
Defines the :class:`.Query` class, the central
construct used by the ORM to construct database queries.
The :class:`.Query` class should not be confused with the
:class:`.Select` class, which defines database
SELECT operations at the SQL (non-ORM) level. ``Query`` differs from
``Select`` in that it returns ORM-mapped objects and interacts with an
ORM session, whereas the ``Select`` construct interacts directly with the
database to return iterable result sets.
"""
from itertools import chain
from . import (
attributes, interfaces, object_mapper, persistence,
exc as orm_exc, loading
)
from .base import _entity_descriptor, _is_aliased_class, \
_is_mapped_class, _orm_columns, _generative
from .path_registry import PathRegistry
from .util import (
AliasedClass, ORMAdapter, join as orm_join, with_parent, aliased
)
from .. import sql, util, log, exc as sa_exc, inspect, inspection
from ..sql.expression import _interpret_as_from
from ..sql import (
util as sql_util,
expression, visitors
)
from ..sql.base import ColumnCollection
from . import properties
__all__ = ['Query', 'QueryContext', 'aliased']
_path_registry = PathRegistry.root
@inspection._self_inspects
@log.class_logger
class Query(object):
"""ORM-level SQL construction object.
:class:`.Query` is the source of all SELECT statements generated by the
ORM, both those formulated by end-user query operations as well as by
high level internal operations such as related collection loading. It
features a generative interface whereby successive calls return a new
:class:`.Query` object, a copy of the former with additional
criteria and options associated with it.
:class:`.Query` objects are normally initially generated using the
:meth:`~.Session.query` method of :class:`.Session`. For a full
walkthrough of :class:`.Query` usage, see the
:ref:`ormtutorial_toplevel`.
"""
_enable_eagerloads = True
_enable_assertions = True
_with_labels = False
_criterion = None
_yield_per = None
_order_by = False
_group_by = False
_having = None
_distinct = False
_prefixes = None
_offset = None
_limit = None
_for_update_arg = None
_statement = None
_correlate = frozenset()
_populate_existing = False
_invoke_all_eagers = True
_version_check = False
_autoflush = True
_only_load_props = None
_refresh_state = None
_from_obj = ()
_join_entities = ()
_select_from_entity = None
_mapper_adapter_map = {}
_filter_aliases = None
_from_obj_alias = None
_joinpath = _joinpoint = util.immutabledict()
_execution_options = util.immutabledict()
_params = util.immutabledict()
_attributes = util.immutabledict()
_with_options = ()
_with_hints = ()
_enable_single_crit = True
_current_path = _path_registry
def __init__(self, entities, session=None):
self.session = session
self._polymorphic_adapters = {}
self._set_entities(entities)
def _set_entities(self, entities, entity_wrapper=None):
if entity_wrapper is None:
entity_wrapper = _QueryEntity
self._entities = []
self._primary_entity = None
for ent in util.to_list(entities):
entity_wrapper(self, ent)
self._set_entity_selectables(self._entities)
def _set_entity_selectables(self, entities):
self._mapper_adapter_map = d = self._mapper_adapter_map.copy()
for ent in entities:
for entity in ent.entities:
if entity not in d:
ext_info = inspect(entity)
if not ext_info.is_aliased_class and \
ext_info.mapper.with_polymorphic:
if ext_info.mapper.mapped_table not in \
self._polymorphic_adapters:
self._mapper_loads_polymorphically_with(
ext_info.mapper,
sql_util.ColumnAdapter(
ext_info.selectable,
ext_info.mapper._equivalent_columns
)
)
aliased_adapter = None
elif ext_info.is_aliased_class:
aliased_adapter = ext_info._adapter
else:
aliased_adapter = None
d[entity] = (
ext_info,
aliased_adapter
)
ent.setup_entity(*d[entity])
def _mapper_loads_polymorphically_with(self, mapper, adapter):
for m2 in mapper._with_polymorphic_mappers or [mapper]:
self._polymorphic_adapters[m2] = adapter
for m in m2.iterate_to_root():
self._polymorphic_adapters[m.local_table] = adapter
def _set_select_from(self, obj, set_base_alias):
fa = []
select_from_alias = None
for from_obj in obj:
info = inspect(from_obj)
if hasattr(info, 'mapper') and \
(info.is_mapper or info.is_aliased_class):
self._select_from_entity = from_obj
if set_base_alias:
raise sa_exc.ArgumentError(
"A selectable (FromClause) instance is "
"expected when the base alias is being set.")
fa.append(info.selectable)
elif not info.is_selectable:
raise sa_exc.ArgumentError(
"argument is not a mapped class, mapper, "
"aliased(), or FromClause instance.")
else:
if isinstance(from_obj, expression.SelectBase):
from_obj = from_obj.alias()
if set_base_alias:
select_from_alias = from_obj
fa.append(from_obj)
self._from_obj = tuple(fa)
if set_base_alias and \
len(self._from_obj) == 1 and \
isinstance(select_from_alias, expression.Alias):
equivs = self.__all_equivs()
self._from_obj_alias = sql_util.ColumnAdapter(
self._from_obj[0], equivs)
def _reset_polymorphic_adapter(self, mapper):
for m2 in mapper._with_polymorphic_mappers:
self._polymorphic_adapters.pop(m2, None)
for m in m2.iterate_to_root():
self._polymorphic_adapters.pop(m.local_table, None)
def _adapt_polymorphic_element(self, element):
if "parententity" in element._annotations:
search = element._annotations['parententity']
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
if isinstance(element, expression.FromClause):
search = element
elif hasattr(element, 'table'):
search = element.table
else:
return None
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
def _adapt_col_list(self, cols):
return [
self._adapt_clause(
expression._literal_as_label_reference(o),
True, True)
for o in cols
]
@_generative()
def _adapt_all_clauses(self):
self._orm_only_adapt = False
def _adapt_clause(self, clause, as_filter, orm_only):
"""Adapt incoming clauses to transformations which
have been applied within this query."""
adapters = []
# do we adapt all expression elements or only those
# tagged as 'ORM' constructs ?
orm_only = getattr(self, '_orm_only_adapt', orm_only)
if as_filter and self._filter_aliases:
for fa in self._filter_aliases._visitor_iterator:
adapters.append(
(
orm_only, fa.replace
)
)
if self._from_obj_alias:
# for the "from obj" alias, apply extra rule to the
# 'ORM only' check, if this query were generated from a
# subquery of itself, i.e. _from_selectable(), apply adaption
# to all SQL constructs.
adapters.append(
(
getattr(self, '_orm_only_from_obj_alias', orm_only),
self._from_obj_alias.replace
)
)
if self._polymorphic_adapters:
adapters.append(
(
orm_only, self._adapt_polymorphic_element
)
)
if not adapters:
return clause
def replace(elem):
for _orm_only, adapter in adapters:
# if 'orm only', look for ORM annotations
# in the element before adapting.
if not _orm_only or \
'_orm_adapt' in elem._annotations or \
"parententity" in elem._annotations:
e = adapter(elem)
if e is not None:
return e
return visitors.replacement_traverse(
clause,
{},
replace
)
def _entity_zero(self):
return self._entities[0]
def _mapper_zero(self):
return self._select_from_entity or \
self._entity_zero().entity_zero
@property
def _mapper_entities(self):
for ent in self._entities:
if isinstance(ent, _MapperEntity):
yield ent
def _joinpoint_zero(self):
return self._joinpoint.get(
'_joinpoint_entity',
self._mapper_zero()
)
def _mapper_zero_or_none(self):
if self._primary_entity:
return self._primary_entity.mapper
else:
return None
def _only_mapper_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._mapper_zero()
def _only_full_mapper_zero(self, methname):
if self._entities != [self._primary_entity]:
raise sa_exc.InvalidRequestError(
"%s() can only be used against "
"a single mapped class." % methname)
return self._primary_entity.entity_zero
def _only_entity_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._entity_zero()
def __all_equivs(self):
equivs = {}
for ent in self._mapper_entities:
equivs.update(ent.mapper._equivalent_columns)
return equivs
def _get_condition(self):
return self._no_criterion_condition(
"get", order_by=False, distinct=False)
def _get_existing_condition(self):
self._no_criterion_assertion("get", order_by=False, distinct=False)
def _no_criterion_assertion(self, meth, order_by=True, distinct=True):
if not self._enable_assertions:
return
if self._criterion is not None or \
self._statement is not None or self._from_obj or \
self._limit is not None or self._offset is not None or \
self._group_by or (order_by and self._order_by) or \
(distinct and self._distinct):
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
def _no_criterion_condition(self, meth, order_by=True, distinct=True):
self._no_criterion_assertion(meth, order_by, distinct)
self._from_obj = ()
self._statement = self._criterion = None
self._order_by = self._group_by = self._distinct = False
def _no_clauseelement_condition(self, meth):
if not self._enable_assertions:
return
if self._order_by:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
self._no_criterion_condition(meth)
def _no_statement_condition(self, meth):
if not self._enable_assertions:
return
if self._statement is not None:
raise sa_exc.InvalidRequestError(
("Query.%s() being called on a Query with an existing full "
"statement - can't apply criterion.") % meth)
def _no_limit_offset(self, meth):
if not self._enable_assertions:
return
if self._limit is not None or self._offset is not None:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a Query which already has LIMIT "
"or OFFSET applied. To modify the row-limited results of a "
" Query, call from_self() first. "
"Otherwise, call %s() before limit() or offset() "
"are applied."
% (meth, meth)
)
def _no_select_modifiers(self, meth):
if not self._enable_assertions:
return
for attr, methname, notset in (
('_limit', 'limit()', None),
('_offset', 'offset()', None),
('_order_by', 'order_by()', False),
('_group_by', 'group_by()', False),
('_distinct', 'distinct()', False),
):
if getattr(self, attr) is not notset:
raise sa_exc.InvalidRequestError(
"Can't call Query.%s() when %s has been called" %
(meth, methname)
)
def _get_options(self, populate_existing=None,
version_check=None,
only_load_props=None,
refresh_state=None):
if populate_existing:
self._populate_existing = populate_existing
if version_check:
self._version_check = version_check
if refresh_state:
self._refresh_state = refresh_state
if only_load_props:
self._only_load_props = set(only_load_props)
return self
def _clone(self):
cls = self.__class__
q = cls.__new__(cls)
q.__dict__ = self.__dict__.copy()
return q
@property
def statement(self):
"""The full SELECT statement represented by this Query.
The statement by default will not have disambiguating labels
applied to the construct unless with_labels(True) is called
first.
"""
stmt = self._compile_context(labels=self._with_labels).\
statement
if self._params:
stmt = stmt.params(self._params)
# TODO: there's no tests covering effects of
# the annotation not being there
return stmt._annotate({'no_replacement_traverse': True})
def subquery(self, name=None, with_labels=False, reduce_columns=False):
"""return the full SELECT statement represented by
this :class:`.Query`, embedded within an :class:`.Alias`.
Eager JOIN generation within the query is disabled.
:param name: string name to be assigned as the alias;
this is passed through to :meth:`.FromClause.alias`.
If ``None``, a name will be deterministically generated
at compile time.
:param with_labels: if True, :meth:`.with_labels` will be called
on the :class:`.Query` first to apply table-qualified labels
to all columns.
:param reduce_columns: if True, :meth:`.Select.reduce_columns` will
be called on the resulting :func:`.select` construct,
to remove same-named columns where one also refers to the other
via foreign key or WHERE clause equivalence.
.. versionchanged:: 0.8 the ``with_labels`` and ``reduce_columns``
keyword arguments were added.
"""
q = self.enable_eagerloads(False)
if with_labels:
q = q.with_labels()
q = q.statement
if reduce_columns:
q = q.reduce_columns()
return q.alias(name=name)
def cte(self, name=None, recursive=False):
"""Return the full SELECT statement represented by this
:class:`.Query` represented as a common table expression (CTE).
.. versionadded:: 0.7.6
Parameters and usage are the same as those of the
:meth:`.SelectBase.cte` method; see that method for
further details.
Here is the `Postgresql WITH
RECURSIVE example
<http://www.postgresql.org/docs/8.4/static/queries-with.html>`_.
Note that, in this example, the ``included_parts`` cte and the
``incl_alias`` alias of it are Core selectables, which
means the columns are accessed via the ``.c.`` attribute. The
``parts_alias`` object is an :func:`.orm.aliased` instance of the
``Part`` entity, so column-mapped attributes are available
directly::
from sqlalchemy.orm import aliased
class Part(Base):
__tablename__ = 'part'
part = Column(String, primary_key=True)
sub_part = Column(String, primary_key=True)
quantity = Column(Integer)
included_parts = session.query(
Part.sub_part,
Part.part,
Part.quantity).\\
filter(Part.part=="our part").\\
cte(name="included_parts", recursive=True)
incl_alias = aliased(included_parts, name="pr")
parts_alias = aliased(Part, name="p")
included_parts = included_parts.union_all(
session.query(
parts_alias.sub_part,
parts_alias.part,
parts_alias.quantity).\\
filter(parts_alias.part==incl_alias.c.sub_part)
)
q = session.query(
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).
label('total_quantity')
).\\
group_by(included_parts.c.sub_part)
.. seealso::
:meth:`.SelectBase.cte`
"""
return self.enable_eagerloads(False).\
statement.cte(name=name, recursive=recursive)
def label(self, name):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted
to a scalar subquery with a label of the given name.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.label`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.label(name)
def as_scalar(self):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted to a scalar subquery.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.as_scalar`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.as_scalar()
@property
def selectable(self):
"""Return the :class:`.Select` object emitted by this :class:`.Query`.
Used for :func:`.inspect` compatibility, this is equivalent to::
query.enable_eagerloads(False).with_labels().statement
"""
return self.__clause_element__()
def __clause_element__(self):
return self.enable_eagerloads(False).with_labels().statement
@_generative()
def enable_eagerloads(self, value):
"""Control whether or not eager joins and subqueries are
rendered.
When set to False, the returned Query will not render
eager joins regardless of :func:`~sqlalchemy.orm.joinedload`,
:func:`~sqlalchemy.orm.subqueryload` options
or mapper-level ``lazy='joined'``/``lazy='subquery'``
configurations.
This is used primarily when nesting the Query's
statement into a subquery or other
selectable, or when using :meth:`.Query.yield_per`.
"""
self._enable_eagerloads = value
def _no_yield_per(self, message):
raise sa_exc.InvalidRequestError(
"The yield_per Query option is currently not "
"compatible with %s eager loading. Please "
"specify lazyload('*') or query.enable_eagerloads(False) in "
"order to "
"proceed with query.yield_per()." % message)
@_generative()
def with_labels(self):
"""Apply column labels to the return value of Query.statement.
Indicates that this Query's `statement` accessor should return
a SELECT statement that applies labels to all columns in the
form <tablename>_<columnname>; this is commonly used to
disambiguate columns from multiple tables which have the same
name.
When the `Query` actually issues SQL to load rows, it always
uses column labeling.
"""
self._with_labels = True
@_generative()
def enable_assertions(self, value):
"""Control whether assertions are generated.
When set to False, the returned Query will
not assert its state before certain operations,
including that LIMIT/OFFSET has not been applied
when filter() is called, no criterion exists
when get() is called, and no "from_statement()"
exists when filter()/order_by()/group_by() etc.
is called. This more permissive mode is used by
custom Query subclasses to specify criterion or
other modifiers outside of the usual usage patterns.
Care should be taken to ensure that the usage
pattern is even possible. A statement applied
by from_statement() will override any criterion
set by filter() or order_by(), for example.
"""
self._enable_assertions = value
@property
def whereclause(self):
"""A readonly attribute which returns the current WHERE criterion for
this Query.
This returned value is a SQL expression construct, or ``None`` if no
criterion has been established.
"""
return self._criterion
@_generative()
def _with_current_path(self, path):
"""indicate that this query applies to objects loaded
within a certain path.
Used by deferred loaders (see strategies.py) which transfer
query options from an originating query to a newly generated
query intended for the deferred load.
"""
self._current_path = path
@_generative(_no_clauseelement_condition)
def with_polymorphic(self,
cls_or_mappers,
selectable=None,
polymorphic_on=None):
"""Load columns for inheriting classes.
:meth:`.Query.with_polymorphic` applies transformations
to the "main" mapped class represented by this :class:`.Query`.
The "main" mapped class here means the :class:`.Query`
object's first argument is a full class, i.e.
``session.query(SomeClass)``. These transformations allow additional
tables to be present in the FROM clause so that columns for a
joined-inheritance subclass are available in the query, both for the
purposes of load-time efficiency as well as the ability to use
these columns at query time.
See the documentation section :ref:`with_polymorphic` for
details on how this method is used.
.. versionchanged:: 0.8
A new and more flexible function
:func:`.orm.with_polymorphic` supersedes
:meth:`.Query.with_polymorphic`, as it can apply the equivalent
functionality to any set of columns or classes in the
:class:`.Query`, not just the "zero mapper". See that
function for a description of arguments.
"""
if not self._primary_entity:
raise sa_exc.InvalidRequestError(
"No primary mapper set up for this Query.")
entity = self._entities[0]._clone()
self._entities = [entity] + self._entities[1:]
entity.set_with_polymorphic(self,
cls_or_mappers,
selectable=selectable,
polymorphic_on=polymorphic_on)
@_generative()
def yield_per(self, count):
"""Yield only ``count`` rows at a time.
The purpose of this method is when fetching very large result sets
(> 10K rows), to batch results in sub-collections and yield them
out partially, so that the Python interpreter doesn't need to declare
very large areas of memory which is both time consuming and leads
to excessive memory use. The performance from fetching hundreds of
thousands of rows can often double when a suitable yield-per setting
(e.g. approximately 1000) is used, even with DBAPIs that buffer
rows (which are most).
The :meth:`.Query.yield_per` method **is not compatible with most
eager loading schemes, including subqueryload and joinedload with
collections**. For this reason, it may be helpful to disable
eager loads, either unconditionally with
:meth:`.Query.enable_eagerloads`::
q = sess.query(Object).yield_per(100).enable_eagerloads(False)
Or more selectively using :func:`.lazyload`; such as with
an asterisk to specify the default loader scheme::
q = sess.query(Object).yield_per(100).\\
options(lazyload('*'), joinedload(Object.some_related))
.. warning::
Use this method with caution; if the same instance is
present in more than one batch of rows, end-user changes
to attributes will be overwritten.
In particular, it's usually impossible to use this setting
with eagerly loaded collections (i.e. any lazy='joined' or
'subquery') since those collections will be cleared for a
new load when encountered in a subsequent result batch.
In the case of 'subquery' loading, the full result for all
rows is fetched which generally defeats the purpose of
:meth:`~sqlalchemy.orm.query.Query.yield_per`.
Also note that while
:meth:`~sqlalchemy.orm.query.Query.yield_per` will set the
``stream_results`` execution option to True, currently
this is only understood by
:mod:`~sqlalchemy.dialects.postgresql.psycopg2` dialect
which will stream results using server side cursors
instead of pre-buffer all rows for this query. Other
DBAPIs **pre-buffer all rows** before making them
available. The memory use of raw database rows is much less
than that of an ORM-mapped object, but should still be taken into
consideration when benchmarking.
.. seealso::
:meth:`.Query.enable_eagerloads`
"""
self._yield_per = count
self._execution_options = self._execution_options.union(
{"stream_results": True})
def get(self, ident):
"""Return an instance based on the given primary key identifier,
or ``None`` if not found.
E.g.::
my_user = session.query(User).get(5)
some_object = session.query(VersionedFoo).get((5, 10))
:meth:`~.Query.get` is special in that it provides direct
access to the identity map of the owning :class:`.Session`.
If the given primary key identifier is present
in the local identity map, the object is returned
directly from this collection and no SQL is emitted,
unless the object has been marked fully expired.
If not present,
a SELECT is performed in order to locate the object.
:meth:`~.Query.get` also will perform a check if
the object is present in the identity map and
marked as expired - a SELECT
is emitted to refresh the object as well as to
ensure that the row is still present.
If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
:meth:`~.Query.get` is only used to return a single
mapped instance, not multiple instances or
individual column constructs, and strictly
on a single primary key value. The originating
:class:`.Query` must be constructed in this way,
i.e. against a single mapped entity,
with no additional filtering criterion. Loading
options via :meth:`~.Query.options` may be applied
however, and will be used if the object is not
yet locally present.
A lazy-loading, many-to-one attribute configured
by :func:`.relationship`, using a simple
foreign-key-to-primary-key criterion, will also use an
operation equivalent to :meth:`~.Query.get` in order to retrieve
the target value from the local identity map
before querying the database. See :doc:`/orm/loading`
for further details on relationship loading.
:param ident: A scalar or tuple value representing
the primary key. For a composite primary key,
the order of identifiers corresponds in most cases
to that of the mapped :class:`.Table` object's
primary key columns. For a :func:`.mapper` that
was given the ``primary key`` argument during
construction, the order of identifiers corresponds
to the elements present in this collection.
:return: The object instance, or ``None``.
"""
# convert composite types to individual args
if hasattr(ident, '__composite_values__'):
ident = ident.__composite_values__()
ident = util.to_list(ident)
mapper = self._only_full_mapper_zero("get")
if len(ident) != len(mapper.primary_key):
raise sa_exc.InvalidRequestError(
"Incorrect number of values in identifier to formulate "
"primary key for query.get(); primary key columns are %s" %
','.join("'%s'" % c for c in mapper.primary_key))
key = mapper.identity_key_from_primary_key(ident)
if not self._populate_existing and \
not mapper.always_refresh and \
self._for_update_arg is None:
instance = loading.get_from_identity(
self.session, key, attributes.PASSIVE_OFF)
if instance is not None:
self._get_existing_condition()
# reject calls for id in identity map but class
# mismatch.
if not issubclass(instance.__class__, mapper.class_):
return None
return instance
return loading.load_on_ident(self, key)
@_generative()
def correlate(self, *args):
"""Return a :class:`.Query` construct which will correlate the given
FROM clauses to that of an enclosing :class:`.Query` or
:func:`~.expression.select`.
The method here accepts mapped classes, :func:`.aliased` constructs,
and :func:`.mapper` constructs as arguments, which are resolved into
expression constructs, in addition to appropriate expression
constructs.
The correlation arguments are ultimately passed to
:meth:`.Select.correlate` after coercion to expression constructs.
The correlation arguments take effect in such cases
as when :meth:`.Query.from_self` is used, or when
a subquery as returned by :meth:`.Query.subquery` is
embedded in another :func:`~.expression.select` construct.
"""
self._correlate = self._correlate.union(
_interpret_as_from(s)
if s is not None else None
for s in args)
@_generative()
def autoflush(self, setting):
"""Return a Query with a specific 'autoflush' setting.
Note that a Session with autoflush=False will
not autoflush, even if this flag is set to True at the
Query level. Therefore this flag is usually used only
to disable autoflush for a specific Query.
"""
self._autoflush = setting
@_generative()
def populate_existing(self):
"""Return a :class:`.Query` that will expire and refresh all instances
as they are loaded, or reused from the current :class:`.Session`.
:meth:`.populate_existing` does not improve behavior when
the ORM is used normally - the :class:`.Session` object's usual
behavior of maintaining a transaction and expiring all attributes
after rollback or commit handles object state automatically.
This method is not intended for general use.
"""
self._populate_existing = True
@_generative()
def _with_invoke_all_eagers(self, value):
"""Set the 'invoke all eagers' flag which causes joined- and
subquery loaders to traverse into already-loaded related objects
and collections.
Default is that of :attr:`.Query._invoke_all_eagers`.
"""
self._invoke_all_eagers = value
def with_parent(self, instance, property=None):
"""Add filtering criterion that relates the given instance
to a child object or collection, using its attribute state
as well as an established :func:`.relationship()`
configuration.
The method uses the :func:`.with_parent` function to generate
the clause, the result of which is passed to :meth:`.Query.filter`.
Parameters are the same as :func:`.with_parent`, with the exception
that the given property can be None, in which case a search is
performed against this :class:`.Query` object's target mapper.
"""
if property is None:
mapper = object_mapper(instance)
for prop in mapper.iterate_properties:
if isinstance(prop, properties.RelationshipProperty) and \
prop.mapper is self._mapper_zero():
property = prop
break
else:
raise sa_exc.InvalidRequestError(
"Could not locate a property which relates instances "
"of class '%s' to instances of class '%s'" %
(
self._mapper_zero().class_.__name__,
instance.__class__.__name__)
)
return self.filter(with_parent(instance, property))
@_generative()
def add_entity(self, entity, alias=None):
"""add a mapped entity to the list of result columns
to be returned."""
if alias is not None:
entity = aliased(entity, alias)
self._entities = list(self._entities)
m = _MapperEntity(self, entity)
self._set_entity_selectables([m])
@_generative()
def with_session(self, session):
"""Return a :class:`.Query` that will use the given :class:`.Session`.
"""
self.session = session
def from_self(self, *entities):
"""return a Query that selects from this Query's
SELECT statement.
\*entities - optional list of entities which will replace
those being selected.
"""
fromclause = self.with_labels().enable_eagerloads(False).\
statement.correlate(None)
q = self._from_selectable(fromclause)
q._enable_single_crit = False
if entities:
q._set_entities(entities)
return q
@_generative()
def _set_enable_single_crit(self, val):
self._enable_single_crit = val
@_generative()
def _from_selectable(self, fromclause):
for attr in (
'_statement', '_criterion',
'_order_by', '_group_by',
'_limit', '_offset',
'_joinpath', '_joinpoint',
'_distinct', '_having',
'_prefixes',
):
self.__dict__.pop(attr, None)
self._set_select_from([fromclause], True)
# this enables clause adaptation for non-ORM
# expressions.
self._orm_only_from_obj_alias = False
old_entities = self._entities
self._entities = []
for e in old_entities:
e.adapt_to_selectable(self, self._from_obj[0])
def values(self, *columns):
"""Return an iterator yielding result tuples corresponding
to the given list of columns"""
if not columns:
return iter(())
q = self._clone()
q._set_entities(columns, entity_wrapper=_ColumnEntity)
if not q._yield_per:
q._yield_per = 10
return iter(q)
_values = values
def value(self, column):
"""Return a scalar result corresponding to the given
column expression."""
try:
return next(self.values(column))[0]
except StopIteration:
return None
@_generative()
def with_entities(self, *entities):
"""Return a new :class:`.Query` replacing the SELECT list with the
given entities.
e.g.::
# Users, filtered on some arbitrary criterion
# and then ordered by related email address
q = session.query(User).\\
join(User.address).\\
filter(User.name.like('%ed%')).\\
order_by(Address.email)
# given *only* User.id==5, Address.email, and 'q', what
# would the *next* User in the result be ?
subq = q.with_entities(Address.email).\\
order_by(None).\\
filter(User.id==5).\\
subquery()
q = q.join((subq, subq.c.email < Address.email)).\\
limit(1)
.. versionadded:: 0.6.5
"""
self._set_entities(entities)
@_generative()
def add_columns(self, *column):
"""Add one or more column expressions to the list
of result columns to be returned."""
self._entities = list(self._entities)
l = len(self._entities)
for c in column:
_ColumnEntity(self, c)
# _ColumnEntity may add many entities if the
# given arg is a FROM clause
self._set_entity_selectables(self._entities[l:])
@util.pending_deprecation("0.7",
":meth:`.add_column` is superseded "
"by :meth:`.add_columns`",
False)
def add_column(self, column):
"""Add a column expression to the list of result columns to be
returned.
Pending deprecation: :meth:`.add_column` will be superseded by
:meth:`.add_columns`.
"""
return self.add_columns(column)
def options(self, *args):
"""Return a new Query object, applying the given list of
mapper options.
Most supplied options regard changing how column- and
relationship-mapped attributes are loaded. See the sections
:ref:`deferred` and :doc:`/orm/loading` for reference
documentation.
"""
return self._options(False, *args)
def _conditional_options(self, *args):
return self._options(True, *args)
@_generative()
def _options(self, conditional, *args):
# most MapperOptions write to the '_attributes' dictionary,
# so copy that as well
self._attributes = self._attributes.copy()
opts = tuple(util.flatten_iterator(args))
self._with_options = self._with_options + opts
if conditional:
for opt in opts:
opt.process_query_conditionally(self)
else:
for opt in opts:
opt.process_query(self)
def with_transformation(self, fn):
"""Return a new :class:`.Query` object transformed by
the given function.
E.g.::
def filter_something(criterion):
def transform(q):
return q.filter(criterion)
return transform
q = q.with_transformation(filter_something(x==5))
This allows ad-hoc recipes to be created for :class:`.Query`
objects. See the example at :ref:`hybrid_transformers`.
.. versionadded:: 0.7.4
"""
return fn(self)
@_generative()
def with_hint(self, selectable, text, dialect_name='*'):
"""Add an indexing or other executional context
hint for the given entity or selectable to
this :class:`.Query`.
Functionality is passed straight through to
:meth:`~sqlalchemy.sql.expression.Select.with_hint`,
with the addition that ``selectable`` can be a
:class:`.Table`, :class:`.Alias`, or ORM entity / mapped class
/etc.
.. seealso::
:meth:`.Query.with_statement_hint`
"""
if selectable is not None:
selectable = inspect(selectable).selectable
self._with_hints += ((selectable, text, dialect_name),)
def with_statement_hint(self, text, dialect_name='*'):
"""add a statement hint to this :class:`.Select`.
This method is similar to :meth:`.Select.with_hint` except that
it does not require an individual table, and instead applies to the
statement as a whole.
This feature calls down into :meth:`.Select.with_statement_hint`.
.. versionadded:: 1.0.0
.. seealso::
:meth:`.Query.with_hint`
"""
return self.with_hint(None, text, dialect_name)
@_generative()
def execution_options(self, **kwargs):
""" Set non-SQL options which take effect during execution.
The options are the same as those accepted by
:meth:`.Connection.execution_options`.
Note that the ``stream_results`` execution option is enabled
automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()`
method is used.
"""
self._execution_options = self._execution_options.union(kwargs)
@_generative()
def with_lockmode(self, mode):
"""Return a new :class:`.Query` object with the specified "locking mode",
which essentially refers to the ``FOR UPDATE`` clause.
.. deprecated:: 0.9.0 superseded by :meth:`.Query.with_for_update`.
:param mode: a string representing the desired locking mode.
Valid values are:
* ``None`` - translates to no lockmode
* ``'update'`` - translates to ``FOR UPDATE``
(standard SQL, supported by most dialects)
* ``'update_nowait'`` - translates to ``FOR UPDATE NOWAIT``
(supported by Oracle, PostgreSQL 8.1 upwards)
* ``'read'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL),
and ``FOR SHARE`` (for PostgreSQL)
.. seealso::
:meth:`.Query.with_for_update` - improved API for
specifying the ``FOR UPDATE`` clause.
"""
self._for_update_arg = LockmodeArg.parse_legacy_query(mode)
@_generative()
def with_for_update(self, read=False, nowait=False, of=None):
"""return a new :class:`.Query` with the specified options for the
``FOR UPDATE`` clause.
The behavior of this method is identical to that of
:meth:`.SelectBase.with_for_update`. When called with no arguments,
the resulting ``SELECT`` statement will have a ``FOR UPDATE`` clause
appended. When additional arguments are specified, backend-specific
options such as ``FOR UPDATE NOWAIT`` or ``LOCK IN SHARE MODE``
can take effect.
E.g.::
q = sess.query(User).with_for_update(nowait=True, of=User)
The above query on a Postgresql backend will render like::
SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT
.. versionadded:: 0.9.0 :meth:`.Query.with_for_update` supersedes
the :meth:`.Query.with_lockmode` method.
.. seealso::
:meth:`.GenerativeSelect.with_for_update` - Core level method with
full argument and behavioral description.
"""
self._for_update_arg = LockmodeArg(read=read, nowait=nowait, of=of)
@_generative()
def params(self, *args, **kwargs):
"""add values for bind parameters which may have been
specified in filter().
parameters may be specified using \**kwargs, or optionally a single
dictionary as the first positional argument. The reason for both is
that \**kwargs is convenient, however some parameter dictionaries
contain unicode keys in which case \**kwargs cannot be used.
"""
if len(args) == 1:
kwargs.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary.")
self._params = self._params.copy()
self._params.update(kwargs)
@_generative(_no_statement_condition, _no_limit_offset)
def filter(self, *criterion):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using SQL expressions.
e.g.::
session.query(MyClass).filter(MyClass.name == 'some name')
Multiple criteria are joined together by AND::
session.query(MyClass).\\
filter(MyClass.name == 'some name', MyClass.id > 5)
The criterion is any SQL expression object applicable to the
WHERE clause of a select. String expressions are coerced
into SQL expression constructs via the :func:`.text` construct.
.. versionchanged:: 0.7.5
Multiple criteria joined by AND.
.. seealso::
:meth:`.Query.filter_by` - filter on keyword expressions.
"""
for criterion in list(criterion):
criterion = expression._expression_literal_as_text(criterion)
criterion = self._adapt_clause(criterion, True, True)
if self._criterion is not None:
self._criterion = self._criterion & criterion
else:
self._criterion = criterion
def filter_by(self, **kwargs):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using keyword expressions.
e.g.::
session.query(MyClass).filter_by(name = 'some name')
Multiple criteria are joined together by AND::
session.query(MyClass).\\
filter_by(name = 'some name', id = 5)
The keyword expressions are extracted from the primary
entity of the query, or the last entity that was the
target of a call to :meth:`.Query.join`.
.. seealso::
:meth:`.Query.filter` - filter on SQL expressions.
"""
clauses = [_entity_descriptor(self._joinpoint_zero(), key) == value
for key, value in kwargs.items()]
return self.filter(sql.and_(*clauses))
@_generative(_no_statement_condition, _no_limit_offset)
def order_by(self, *criterion):
"""apply one or more ORDER BY criterion to the query and return
the newly resulting ``Query``
All existing ORDER BY settings can be suppressed by
passing ``None`` - this will suppress any ORDER BY configured
on mappers as well.
Alternatively, an existing ORDER BY setting on the Query
object can be entirely cancelled by passing ``False``
as the value - use this before calling methods where
an ORDER BY is invalid.
"""
if len(criterion) == 1:
if criterion[0] is False:
if '_order_by' in self.__dict__:
del self._order_by
return
if criterion[0] is None:
self._order_by = None
return
criterion = self._adapt_col_list(criterion)
if self._order_by is False or self._order_by is None:
self._order_by = criterion
else:
self._order_by = self._order_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def group_by(self, *criterion):
"""apply one or more GROUP BY criterion to the query and return
the newly resulting :class:`.Query`"""
criterion = list(chain(*[_orm_columns(c) for c in criterion]))
criterion = self._adapt_col_list(criterion)
if self._group_by is False:
self._group_by = criterion
else:
self._group_by = self._group_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def having(self, criterion):
"""apply a HAVING criterion to the query and return the
newly resulting :class:`.Query`.
:meth:`~.Query.having` is used in conjunction with
:meth:`~.Query.group_by`.
HAVING criterion makes it possible to use filters on aggregate
functions like COUNT, SUM, AVG, MAX, and MIN, eg.::
q = session.query(User.id).\\
join(User.addresses).\\
group_by(User.id).\\
having(func.count(Address.id) > 2)
"""
criterion = expression._expression_literal_as_text(criterion)
if criterion is not None and \
not isinstance(criterion, sql.ClauseElement):
raise sa_exc.ArgumentError(
"having() argument must be of type "
"sqlalchemy.sql.ClauseElement or string")
criterion = self._adapt_clause(criterion, True, True)
if self._having is not None:
self._having = self._having & criterion
else:
self._having = criterion
def union(self, *q):
"""Produce a UNION of this Query against one or more queries.
e.g.::
q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar')
q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo')
q3 = q1.union(q2)
The method accepts multiple Query objects so as to control
the level of nesting. A series of ``union()`` calls such as::
x.union(y).union(z).all()
will nest on each ``union()``, and produces::
SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION
SELECT * FROM y) UNION SELECT * FROM Z)
Whereas::
x.union(y, z).all()
produces::
SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION
SELECT * FROM Z)
Note that many database backends do not allow ORDER BY to
be rendered on a query called within UNION, EXCEPT, etc.
To disable all ORDER BY clauses including those configured
on mappers, issue ``query.order_by(None)`` - the resulting
:class:`.Query` object will not render ORDER BY within
its SELECT statement.
"""
return self._from_selectable(
expression.union(*([self] + list(q))))
def union_all(self, *q):
"""Produce a UNION ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.union_all(*([self] + list(q)))
)
def intersect(self, *q):
"""Produce an INTERSECT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect(*([self] + list(q)))
)
def intersect_all(self, *q):
"""Produce an INTERSECT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect_all(*([self] + list(q)))
)
def except_(self, *q):
"""Produce an EXCEPT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_(*([self] + list(q)))
)
def except_all(self, *q):
"""Produce an EXCEPT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_all(*([self] + list(q)))
)
def join(self, *props, **kwargs):
"""Create a SQL JOIN against this :class:`.Query` object's criterion
and apply generatively, returning the newly resulting :class:`.Query`.
**Simple Relationship Joins**
Consider a mapping between two classes ``User`` and ``Address``,
with a relationship ``User.addresses`` representing a collection
of ``Address`` objects associated with each ``User``. The most
common usage of :meth:`~.Query.join` is to create a JOIN along this
relationship, using the ``User.addresses`` attribute as an indicator
for how this should occur::
q = session.query(User).join(User.addresses)
Where above, the call to :meth:`~.Query.join` along ``User.addresses``
will result in SQL equivalent to::
SELECT user.* FROM user JOIN address ON user.id = address.user_id
In the above example we refer to ``User.addresses`` as passed to
:meth:`~.Query.join` as the *on clause*, that is, it indicates
how the "ON" portion of the JOIN should be constructed. For a
single-entity query such as the one above (i.e. we start by selecting
only from ``User`` and nothing else), the relationship can also be
specified by its string name::
q = session.query(User).join("addresses")
:meth:`~.Query.join` can also accommodate multiple
"on clause" arguments to produce a chain of joins, such as below
where a join across four related entities is constructed::
q = session.query(User).join("orders", "items", "keywords")
The above would be shorthand for three separate calls to
:meth:`~.Query.join`, each using an explicit attribute to indicate
the source entity::
q = session.query(User).\\
join(User.orders).\\
join(Order.items).\\
join(Item.keywords)
**Joins to a Target Entity or Selectable**
A second form of :meth:`~.Query.join` allows any mapped entity
or core selectable construct as a target. In this usage,
:meth:`~.Query.join` will attempt
to create a JOIN along the natural foreign key relationship between
two entities::
q = session.query(User).join(Address)
The above calling form of :meth:`~.Query.join` will raise an error if
either there are no foreign keys between the two entities, or if
there are multiple foreign key linkages between them. In the
above calling form, :meth:`~.Query.join` is called upon to
create the "on clause" automatically for us. The target can
be any mapped entity or selectable, such as a :class:`.Table`::
q = session.query(User).join(addresses_table)
**Joins to a Target with an ON Clause**
The third calling form allows both the target entity as well
as the ON clause to be passed explicitly. Suppose for
example we wanted to join to ``Address`` twice, using
an alias the second time. We use :func:`~sqlalchemy.orm.aliased`
to create a distinct alias of ``Address``, and join
to it using the ``target, onclause`` form, so that the
alias can be specified explicitly as the target along with
the relationship to instruct how the ON clause should proceed::
a_alias = aliased(Address)
q = session.query(User).\\
join(User.addresses).\\
join(a_alias, User.addresses).\\
filter(Address.email_address=='[email protected]').\\
filter(a_alias.email_address=='[email protected]')
Where above, the generated SQL would be similar to::
SELECT user.* FROM user
JOIN address ON user.id = address.user_id
JOIN address AS address_1 ON user.id=address_1.user_id
WHERE address.email_address = :email_address_1
AND address_1.email_address = :email_address_2
The two-argument calling form of :meth:`~.Query.join`
also allows us to construct arbitrary joins with SQL-oriented
"on clause" expressions, not relying upon configured relationships
at all. Any SQL expression can be passed as the ON clause
when using the two-argument form, which should refer to the target
entity in some way as well as an applicable source entity::
q = session.query(User).join(Address, User.id==Address.user_id)
.. versionchanged:: 0.7
In SQLAlchemy 0.6 and earlier, the two argument form of
:meth:`~.Query.join` requires the usage of a tuple:
``query(User).join((Address, User.id==Address.user_id))``\ .
This calling form is accepted in 0.7 and further, though
is not necessary unless multiple join conditions are passed to
a single :meth:`~.Query.join` call, which itself is also not
generally necessary as it is now equivalent to multiple
calls (this wasn't always the case).
**Advanced Join Targeting and Adaption**
There is a lot of flexibility in what the "target" can be when using
:meth:`~.Query.join`. As noted previously, it also accepts
:class:`.Table` constructs and other selectables such as
:func:`.alias` and :func:`.select` constructs, with either the one
or two-argument forms::
addresses_q = select([Address.user_id]).\\
where(Address.email_address.endswith("@bar.com")).\\
alias()
q = session.query(User).\\
join(addresses_q, addresses_q.c.user_id==User.id)
:meth:`~.Query.join` also features the ability to *adapt* a
:meth:`~sqlalchemy.orm.relationship` -driven ON clause to the target
selectable. Below we construct a JOIN from ``User`` to a subquery
against ``Address``, allowing the relationship denoted by
``User.addresses`` to *adapt* itself to the altered target::
address_subq = session.query(Address).\\
filter(Address.email_address == '[email protected]').\\
subquery()
q = session.query(User).join(address_subq, User.addresses)
Producing SQL similar to::
SELECT user.* FROM user
JOIN (
SELECT address.id AS id,
address.user_id AS user_id,
address.email_address AS email_address
FROM address
WHERE address.email_address = :email_address_1
) AS anon_1 ON user.id = anon_1.user_id
The above form allows one to fall back onto an explicit ON
clause at any time::
q = session.query(User).\\
join(address_subq, User.id==address_subq.c.user_id)
**Controlling what to Join From**
While :meth:`~.Query.join` exclusively deals with the "right"
side of the JOIN, we can also control the "left" side, in those
cases where it's needed, using :meth:`~.Query.select_from`.
Below we construct a query against ``Address`` but can still
make usage of ``User.addresses`` as our ON clause by instructing
the :class:`.Query` to select first from the ``User``
entity::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which will produce SQL similar to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
**Constructing Aliases Anonymously**
:meth:`~.Query.join` can construct anonymous aliases
using the ``aliased=True`` flag. This feature is useful
when a query is being joined algorithmically, such as
when querying self-referentially to an arbitrary depth::
q = session.query(Node).\\
join("children", "children", aliased=True)
When ``aliased=True`` is used, the actual "alias" construct
is not explicitly available. To work with it, methods such as
:meth:`.Query.filter` will adapt the incoming entity to
the last join point::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1')
When using automatic aliasing, the ``from_joinpoint=True``
argument can allow a multi-node join to be broken into
multiple calls to :meth:`~.Query.join`, so that
each path along the way can be further filtered::
q = session.query(Node).\\
join("children", aliased=True).\\
filter(Node.name='child 1').\\
join("children", aliased=True, from_joinpoint=True).\\
filter(Node.name == 'grandchild 1')
The filtering aliases above can then be reset back to the
original ``Node`` entity using :meth:`~.Query.reset_joinpoint`::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1').\\
reset_joinpoint().\\
filter(Node.name == 'parent 1)
For an example of ``aliased=True``, see the distribution
example :ref:`examples_xmlpersistence` which illustrates
an XPath-like query system using algorithmic joins.
:param \*props: A collection of one or more join conditions,
each consisting of a relationship-bound attribute or string
relationship name representing an "on clause", or a single
target entity, or a tuple in the form of ``(target, onclause)``.
A special two-argument calling form of the form ``target, onclause``
is also accepted.
:param aliased=False: If True, indicate that the JOIN target should be
anonymously aliased. Subsequent calls to :meth:`~.Query.filter`
and similar will adapt the incoming criterion to the target
alias, until :meth:`~.Query.reset_joinpoint` is called.
:param isouter=False: If True, the join used will be a left outer join,
just as if the :meth:`.Query.outerjoin` method were called. This
flag is here to maintain consistency with the same flag as accepted
by :meth:`.FromClause.join` and other Core constructs.
.. versionadded:: 1.0.0
:param from_joinpoint=False: When using ``aliased=True``, a setting
of True here will cause the join to be from the most recent
joined target, rather than starting back from the original
FROM clauses of the query.
.. seealso::
:ref:`ormtutorial_joins` in the ORM tutorial.
:ref:`inheritance_toplevel` for details on how
:meth:`~.Query.join` is used for inheritance relationships.
:func:`.orm.join` - a standalone ORM-level join function,
used internally by :meth:`.Query.join`, which in previous
SQLAlchemy versions was the primary ORM-level joining interface.
"""
aliased, from_joinpoint, isouter = kwargs.pop('aliased', False),\
kwargs.pop('from_joinpoint', False),\
kwargs.pop('isouter', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
','.join(kwargs.keys))
isouter = isouter
return self._join(props,
outerjoin=isouter, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def outerjoin(self, *props, **kwargs):
"""Create a left outer join against this ``Query`` object's criterion
and apply generatively, returning the newly resulting ``Query``.
Usage is the same as the ``join()`` method.
"""
aliased, from_joinpoint = kwargs.pop('aliased', False), \
kwargs.pop('from_joinpoint', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
','.join(kwargs))
return self._join(props,
outerjoin=True, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def _update_joinpoint(self, jp):
self._joinpoint = jp
# copy backwards to the root of the _joinpath
# dict, so that no existing dict in the path is mutated
while 'prev' in jp:
f, prev = jp['prev']
prev = prev.copy()
prev[f] = jp
jp['prev'] = (f, prev)
jp = prev
self._joinpath = jp
@_generative(_no_statement_condition, _no_limit_offset)
def _join(self, keys, outerjoin, create_aliases, from_joinpoint):
"""consumes arguments from join() or outerjoin(), places them into a
consistent format with which to form the actual JOIN constructs.
"""
if not from_joinpoint:
self._reset_joinpoint()
if len(keys) == 2 and \
isinstance(keys[0], (expression.FromClause,
type, AliasedClass)) and \
isinstance(keys[1], (str, expression.ClauseElement,
interfaces.PropComparator)):
# detect 2-arg form of join and
# convert to a tuple.
keys = (keys,)
for arg1 in util.to_list(keys):
if isinstance(arg1, tuple):
# "tuple" form of join, multiple
# tuples are accepted as well. The simpler
# "2-arg" form is preferred. May deprecate
# the "tuple" usage.
arg1, arg2 = arg1
else:
arg2 = None
# determine onclause/right_entity. there
# is a little bit of legacy behavior still at work here
# which means they might be in either order. may possibly
# lock this down to (right_entity, onclause) in 0.6.
if isinstance(
arg1, (interfaces.PropComparator, util.string_types)):
right_entity, onclause = arg2, arg1
else:
right_entity, onclause = arg1, arg2
left_entity = prop = None
if isinstance(onclause, interfaces.PropComparator):
of_type = getattr(onclause, '_of_type', None)
else:
of_type = None
if isinstance(onclause, util.string_types):
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity, onclause)
onclause = descriptor
# check for q.join(Class.propname, from_joinpoint=True)
# and Class is that of the current joinpoint
elif from_joinpoint and \
isinstance(onclause, interfaces.PropComparator):
left_entity = onclause._parententity
info = inspect(self._joinpoint_zero())
left_mapper, left_selectable, left_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', None)
if left_mapper is left_entity:
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity,
onclause.key)
onclause = descriptor
if isinstance(onclause, interfaces.PropComparator):
if right_entity is None:
if of_type:
right_entity = of_type
else:
right_entity = onclause.property.mapper
left_entity = onclause._parententity
prop = onclause.property
if not isinstance(onclause, attributes.QueryableAttribute):
onclause = prop
if not create_aliases:
# check for this path already present.
# don't render in that case.
edge = (left_entity, right_entity, prop.key)
if edge in self._joinpoint:
# The child's prev reference might be stale --
# it could point to a parent older than the
# current joinpoint. If this is the case,
# then we need to update it and then fix the
# tree's spine with _update_joinpoint. Copy
# and then mutate the child, which might be
# shared by a different query object.
jp = self._joinpoint[edge].copy()
jp['prev'] = (edge, self._joinpoint)
self._update_joinpoint(jp)
continue
elif onclause is not None and right_entity is None:
# TODO: no coverage here
raise NotImplementedError("query.join(a==b) not supported.")
self._join_left_to_right(
left_entity,
right_entity, onclause,
outerjoin, create_aliases, prop)
def _join_left_to_right(self, left, right,
onclause, outerjoin, create_aliases, prop):
"""append a JOIN to the query's from clause."""
self._polymorphic_adapters = self._polymorphic_adapters.copy()
if left is None:
if self._from_obj:
left = self._from_obj[0]
elif self._entities:
left = self._entities[0].entity_zero_or_selectable
if left is None:
raise sa_exc.InvalidRequestError(
"Don't know how to join from %s; please use "
"select_from() to establish the left "
"entity/selectable of this join" % self._entities[0])
if left is right and \
not create_aliases:
raise sa_exc.InvalidRequestError(
"Can't construct a join from %s to %s, they "
"are the same entity" %
(left, right))
l_info = inspect(left)
r_info = inspect(right)
overlap = False
if not create_aliases:
right_mapper = getattr(r_info, "mapper", None)
# if the target is a joined inheritance mapping,
# be more liberal about auto-aliasing.
if right_mapper and (
right_mapper.with_polymorphic or
isinstance(right_mapper.mapped_table, expression.Join)
):
for from_obj in self._from_obj or [l_info.selectable]:
if sql_util.selectables_overlap(
l_info.selectable, from_obj) and \
sql_util.selectables_overlap(
from_obj, r_info.selectable):
overlap = True
break
if (overlap or not create_aliases) and \
l_info.selectable is r_info.selectable:
raise sa_exc.InvalidRequestError(
"Can't join table/selectable '%s' to itself" %
l_info.selectable)
right, onclause = self._prepare_right_side(
r_info, right, onclause,
create_aliases,
prop, overlap)
# if joining on a MapperProperty path,
# track the path to prevent redundant joins
if not create_aliases and prop:
self._update_joinpoint({
'_joinpoint_entity': right,
'prev': ((left, right, prop.key), self._joinpoint)
})
else:
self._joinpoint = {'_joinpoint_entity': right}
self._join_to_left(l_info, left, right, onclause, outerjoin)
def _prepare_right_side(self, r_info, right, onclause, create_aliases,
prop, overlap):
info = r_info
right_mapper, right_selectable, right_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', False)
if right_mapper:
self._join_entities += (info, )
if right_mapper and prop and \
not right_mapper.common_parent(prop.mapper):
raise sa_exc.InvalidRequestError(
"Join target %s does not correspond to "
"the right side of join condition %s" % (right, onclause)
)
if not right_mapper and prop:
right_mapper = prop.mapper
need_adapter = False
if right_mapper and right is right_selectable:
if not right_selectable.is_derived_from(
right_mapper.mapped_table):
raise sa_exc.InvalidRequestError(
"Selectable '%s' is not derived from '%s'" %
(right_selectable.description,
right_mapper.mapped_table.description))
if isinstance(right_selectable, expression.SelectBase):
# TODO: this isn't even covered now!
right_selectable = right_selectable.alias()
need_adapter = True
right = aliased(right_mapper, right_selectable)
aliased_entity = right_mapper and \
not right_is_aliased and \
(
right_mapper.with_polymorphic and isinstance(
right_mapper._with_polymorphic_selectable,
expression.Alias)
or
overlap # test for overlap:
# orm/inheritance/relationships.py
# SelfReferentialM2MTest
)
if not need_adapter and (create_aliases or aliased_entity):
right = aliased(right, flat=True)
need_adapter = True
# if an alias() of the right side was generated here,
# apply an adapter to all subsequent filter() calls
# until reset_joinpoint() is called.
if need_adapter:
self._filter_aliases = ORMAdapter(
right,
equivalents=right_mapper and
right_mapper._equivalent_columns or {},
chain_to=self._filter_aliases)
# if the onclause is a ClauseElement, adapt it with any
# adapters that are in place right now
if isinstance(onclause, expression.ClauseElement):
onclause = self._adapt_clause(onclause, True, True)
# if an alias() on the right side was generated,
# which is intended to wrap a the right side in a subquery,
# ensure that columns retrieved from this target in the result
# set are also adapted.
if aliased_entity and not create_aliases:
self._mapper_loads_polymorphically_with(
right_mapper,
ORMAdapter(
right,
equivalents=right_mapper._equivalent_columns
)
)
return right, onclause
def _join_to_left(self, l_info, left, right, onclause, outerjoin):
info = l_info
left_mapper = getattr(info, 'mapper', None)
left_selectable = info.selectable
if self._from_obj:
replace_clause_index, clause = sql_util.find_join_source(
self._from_obj,
left_selectable)
if clause is not None:
try:
clause = orm_join(clause,
right,
onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = \
self._from_obj[:replace_clause_index] + \
(clause, ) + \
self._from_obj[replace_clause_index + 1:]
return
if left_mapper:
for ent in self._entities:
if ent.corresponds_to(left):
clause = ent.selectable
break
else:
clause = left
else:
clause = left_selectable
assert clause is not None
try:
clause = orm_join(clause, right, onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = self._from_obj + (clause,)
def _reset_joinpoint(self):
self._joinpoint = self._joinpath
self._filter_aliases = None
@_generative(_no_statement_condition)
def reset_joinpoint(self):
"""Return a new :class:`.Query`, where the "join point" has
been reset back to the base FROM entities of the query.
This method is usually used in conjunction with the
``aliased=True`` feature of the :meth:`~.Query.join`
method. See the example in :meth:`~.Query.join` for how
this is used.
"""
self._reset_joinpoint()
@_generative(_no_clauseelement_condition)
def select_from(self, *from_obj):
"""Set the FROM clause of this :class:`.Query` explicitly.
:meth:`.Query.select_from` is often used in conjunction with
:meth:`.Query.join` in order to control which entity is selected
from on the "left" side of the join.
The entity or selectable object here effectively replaces the
"left edge" of any calls to :meth:`~.Query.join`, when no
joinpoint is otherwise established - usually, the default "join
point" is the leftmost entity in the :class:`~.Query` object's
list of entities to be selected.
A typical example::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which produces SQL equivalent to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
:param \*from_obj: collection of one or more entities to apply
to the FROM clause. Entities can be mapped classes,
:class:`.AliasedClass` objects, :class:`.Mapper` objects
as well as core :class:`.FromClause` elements like subqueries.
.. versionchanged:: 0.9
This method no longer applies the given FROM object
to be the selectable from which matching entities
select from; the :meth:`.select_entity_from` method
now accomplishes this. See that method for a description
of this behavior.
.. seealso::
:meth:`~.Query.join`
:meth:`.Query.select_entity_from`
"""
self._set_select_from(from_obj, False)
@_generative(_no_clauseelement_condition)
def select_entity_from(self, from_obj):
"""Set the FROM clause of this :class:`.Query` to a
core selectable, applying it as a replacement FROM clause
for corresponding mapped entities.
This method is similar to the :meth:`.Query.select_from`
method, in that it sets the FROM clause of the query. However,
where :meth:`.Query.select_from` only affects what is placed
in the FROM, this method also applies the given selectable
to replace the FROM which the selected entities would normally
select from.
The given ``from_obj`` must be an instance of a :class:`.FromClause`,
e.g. a :func:`.select` or :class:`.Alias` construct.
An example would be a :class:`.Query` that selects ``User`` entities,
but uses :meth:`.Query.select_entity_from` to have the entities
selected from a :func:`.select` construct instead of the
base ``user`` table::
select_stmt = select([User]).where(User.id == 7)
q = session.query(User).\\
select_entity_from(select_stmt).\\
filter(User.name == 'ed')
The query generated will select ``User`` entities directly
from the given :func:`.select` construct, and will be::
SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name
FROM (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE anon_1.name = :name_1
Notice above that even the WHERE criterion was "adapted" such that
the ``anon_1`` subquery effectively replaces all references to the
``user`` table, except for the one that it refers to internally.
Compare this to :meth:`.Query.select_from`, which as of
version 0.9, does not affect existing entities. The
statement below::
q = session.query(User).\\
select_from(select_stmt).\\
filter(User.name == 'ed')
Produces SQL where both the ``user`` table as well as the
``select_stmt`` construct are present as separate elements
in the FROM clause. No "adaptation" of the ``user`` table
is applied::
SELECT "user".id AS user_id, "user".name AS user_name
FROM "user", (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE "user".name = :name_1
:meth:`.Query.select_entity_from` maintains an older
behavior of :meth:`.Query.select_from`. In modern usage,
similar results can also be achieved using :func:`.aliased`::
select_stmt = select([User]).where(User.id == 7)
user_from_select = aliased(User, select_stmt.alias())
q = session.query(user_from_select)
:param from_obj: a :class:`.FromClause` object that will replace
the FROM clause of this :class:`.Query`.
.. seealso::
:meth:`.Query.select_from`
.. versionadded:: 0.8
:meth:`.Query.select_entity_from` was added to specify
the specific behavior of entity replacement, however
the :meth:`.Query.select_from` maintains this behavior
as well until 0.9.
"""
self._set_select_from([from_obj], True)
def __getitem__(self, item):
if isinstance(item, slice):
start, stop, step = util.decode_slice(item)
if isinstance(stop, int) and \
isinstance(start, int) and \
stop - start <= 0:
return []
# perhaps we should execute a count() here so that we
# can still use LIMIT/OFFSET ?
elif (isinstance(start, int) and start < 0) \
or (isinstance(stop, int) and stop < 0):
return list(self)[item]
res = self.slice(start, stop)
if step is not None:
return list(res)[None:None:item.step]
else:
return list(res)
else:
if item == -1:
return list(self)[-1]
else:
return list(self[item:item + 1])[0]
@_generative(_no_statement_condition)
def slice(self, start, stop):
"""apply LIMIT/OFFSET to the ``Query`` based on a "
"range and return the newly resulting ``Query``."""
if start is not None and stop is not None:
self._offset = (self._offset or 0) + start
self._limit = stop - start
elif start is None and stop is not None:
self._limit = stop
elif start is not None and stop is None:
self._offset = (self._offset or 0) + start
if self._offset == 0:
self._offset = None
@_generative(_no_statement_condition)
def limit(self, limit):
"""Apply a ``LIMIT`` to the query and return the newly resulting
``Query``.
"""
self._limit = limit
@_generative(_no_statement_condition)
def offset(self, offset):
"""Apply an ``OFFSET`` to the query and return the newly resulting
``Query``.
"""
self._offset = offset
@_generative(_no_statement_condition)
def distinct(self, *criterion):
"""Apply a ``DISTINCT`` to the query and return the newly resulting
``Query``.
:param \*expr: optional column expressions. When present,
the Postgresql dialect will render a ``DISTINCT ON (<expressions>>)``
construct.
"""
if not criterion:
self._distinct = True
else:
criterion = self._adapt_col_list(criterion)
if isinstance(self._distinct, list):
self._distinct += criterion
else:
self._distinct = criterion
@_generative()
def prefix_with(self, *prefixes):
"""Apply the prefixes to the query and return the newly resulting
``Query``.
:param \*prefixes: optional prefixes, typically strings,
not using any commas. In particular is useful for MySQL keywords.
e.g.::
query = sess.query(User.name).\\
prefix_with('HIGH_PRIORITY').\\
prefix_with('SQL_SMALL_RESULT', 'ALL')
Would render::
SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL users.name AS users_name
FROM users
.. versionadded:: 0.7.7
"""
if self._prefixes:
self._prefixes += prefixes
else:
self._prefixes = prefixes
def all(self):
"""Return the results represented by this ``Query`` as a list.
This results in an execution of the underlying query.
"""
return list(self)
@_generative(_no_clauseelement_condition)
def from_statement(self, statement):
"""Execute the given SELECT statement and return results.
This method bypasses all internal statement compilation, and the
statement is executed without modification.
The statement is typically either a :func:`~.expression.text`
or :func:`~.expression.select` construct, and should return the set
of columns
appropriate to the entity class represented by this :class:`.Query`.
.. seealso::
:ref:`orm_tutorial_literal_sql` - usage examples in the
ORM tutorial
"""
statement = expression._expression_literal_as_text(statement)
if not isinstance(statement,
(expression.TextClause,
expression.SelectBase)):
raise sa_exc.ArgumentError(
"from_statement accepts text(), select(), "
"and union() objects only.")
self._statement = statement
def first(self):
"""Return the first result of this ``Query`` or
None if the result doesn't contain any row.
first() applies a limit of one within the generated SQL, so that
only one primary entity row is generated on the server side
(note this may consist of multiple result rows if join-loaded
collections are present).
Calling ``first()`` results in an execution of the underlying query.
"""
if self._statement is not None:
ret = list(self)[0:1]
else:
ret = list(self[0:1])
if len(ret) > 0:
return ret[0]
else:
return None
def one(self):
"""Return exactly one result or raise an exception.
Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects
no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
if multiple object identities are returned, or if multiple
rows are returned for a query that does not return object
identities.
Note that an entity query, that is, one which selects one or
more mapped classes as opposed to individual column attributes,
may ultimately represent many rows but only one row of
unique entity or entities - this is a successful result for one().
Calling ``one()`` results in an execution of the underlying query.
.. versionchanged:: 0.6
``one()`` fully fetches all results instead of applying
any kind of limit, so that the "unique"-ing of entities does not
conceal multiple object identities.
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
raise orm_exc.NoResultFound("No row was found for one()")
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one()")
def scalar(self):
"""Return the first element of the first result or None
if no rows present. If multiple rows are returned,
raises MultipleResultsFound.
>>> session.query(Item).scalar()
<Item>
>>> session.query(Item.id).scalar()
1
>>> session.query(Item.id).filter(Item.id < 0).scalar()
None
>>> session.query(Item.id, Item.name).scalar()
1
>>> session.query(func.count(Parent.id)).scalar()
20
This results in an execution of the underlying query.
"""
try:
ret = self.one()
if not isinstance(ret, tuple):
return ret
return ret[0]
except orm_exc.NoResultFound:
return None
def __iter__(self):
context = self._compile_context()
context.statement.use_labels = True
if self._autoflush and not self._populate_existing:
self.session._autoflush()
return self._execute_and_instances(context)
def _connection_from_session(self, **kw):
conn = self.session.connection(
**kw)
if self._execution_options:
conn = conn.execution_options(**self._execution_options)
return conn
def _execute_and_instances(self, querycontext):
conn = self._connection_from_session(
mapper=self._mapper_zero_or_none(),
clause=querycontext.statement,
close_with_result=True)
result = conn.execute(querycontext.statement, self._params)
return loading.instances(self, result, querycontext)
@property
def column_descriptions(self):
"""Return metadata about the columns which would be
returned by this :class:`.Query`.
Format is a list of dictionaries::
user_alias = aliased(User, name='user2')
q = sess.query(User, User.id, user_alias)
# this expression:
q.column_descriptions
# would return:
[
{
'name':'User',
'type':User,
'aliased':False,
'expr':User,
},
{
'name':'id',
'type':Integer(),
'aliased':False,
'expr':User.id,
},
{
'name':'user2',
'type':User,
'aliased':True,
'expr':user_alias
}
]
"""
return [
{
'name': ent._label_name,
'type': ent.type,
'aliased': getattr(ent, 'is_aliased_class', False),
'expr': ent.expr
}
for ent in self._entities
]
def instances(self, cursor, __context=None):
"""Given a ResultProxy cursor as returned by connection.execute(),
return an ORM result as an iterator.
e.g.::
result = engine.execute("select * from users")
for u in session.query(User).instances(result):
print u
"""
context = __context
if context is None:
context = QueryContext(self)
return loading.instances(self, cursor, context)
def merge_result(self, iterator, load=True):
"""Merge a result into this :class:`.Query` object's Session.
Given an iterator returned by a :class:`.Query` of the same structure
as this one, return an identical iterator of results, with all mapped
instances merged into the session using :meth:`.Session.merge`. This
is an optimized method which will merge all mapped instances,
preserving the structure of the result rows and unmapped columns with
less method overhead than that of calling :meth:`.Session.merge`
explicitly for each value.
The structure of the results is determined based on the column list of
this :class:`.Query` - if these do not correspond, unchecked errors
will occur.
The 'load' argument is the same as that of :meth:`.Session.merge`.
For an example of how :meth:`~.Query.merge_result` is used, see
the source code for the example :ref:`examples_caching`, where
:meth:`~.Query.merge_result` is used to efficiently restore state
from a cache back into a target :class:`.Session`.
"""
return loading.merge_result(self, iterator, load)
@property
def _select_args(self):
return {
'limit': self._limit,
'offset': self._offset,
'distinct': self._distinct,
'prefixes': self._prefixes,
'group_by': self._group_by or None,
'having': self._having
}
@property
def _should_nest_selectable(self):
kwargs = self._select_args
return (kwargs.get('limit') is not None or
kwargs.get('offset') is not None or
kwargs.get('distinct', False))
def exists(self):
"""A convenience method that turns a query into an EXISTS subquery
of the form EXISTS (SELECT 1 FROM ... WHERE ...).
e.g.::
q = session.query(User).filter(User.name == 'fred')
session.query(q.exists())
Producing SQL similar to::
SELECT EXISTS (
SELECT 1 FROM users WHERE users.name = :name_1
) AS anon_1
The EXISTS construct is usually used in the WHERE clause::
session.query(User.id).filter(q.exists()).scalar()
Note that some databases such as SQL Server don't allow an
EXISTS expression to be present in the columns clause of a
SELECT. To select a simple boolean value based on the exists
as a WHERE, use :func:`.literal`::
from sqlalchemy import literal
session.query(literal(True)).filter(q.exists()).scalar()
.. versionadded:: 0.8.1
"""
# .add_columns() for the case that we are a query().select_from(X),
# so that ".statement" can be produced (#2995) but also without
# omitting the FROM clause from a query(X) (#2818);
# .with_only_columns() after we have a core select() so that
# we get just "SELECT 1" without any entities.
return sql.exists(self.add_columns('1').with_labels().
statement.with_only_columns([1]))
def count(self):
"""Return a count of rows this Query would return.
This generates the SQL for this Query as follows::
SELECT count(1) AS count_1 FROM (
SELECT <rest of query follows...>
) AS anon_1
.. versionchanged:: 0.7
The above scheme is newly refined as of 0.7b3.
For fine grained control over specific columns
to count, to skip the usage of a subquery or
otherwise control of the FROM clause,
or to use other aggregate functions,
use :attr:`~sqlalchemy.sql.expression.func`
expressions in conjunction
with :meth:`~.Session.query`, i.e.::
from sqlalchemy import func
# count User records, without
# using a subquery.
session.query(func.count(User.id))
# return count of user "id" grouped
# by "name"
session.query(func.count(User.id)).\\
group_by(User.name)
from sqlalchemy import distinct
# count distinct "name" values
session.query(func.count(distinct(User.name)))
"""
col = sql.func.count(sql.literal_column('*'))
return self.from_self(col).scalar()
def delete(self, synchronize_session='evaluate'):
"""Perform a bulk delete query.
Deletes rows matched by this query from the database.
:param synchronize_session: chooses the strategy for the removal of
matched objects from the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, objects may still remain in
the session which were in fact deleted which can lead to confusing
results if they are accessed via get() or already loaded
collections.
``'fetch'`` - performs a select query before the delete to find
objects that are matched by the delete query and need to be
removed from the session. Matched objects are removed from the
session.
``'evaluate'`` - Evaluate the query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an error is raised. In that case you probably
want to use the 'fetch' strategy as a fallback.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
This method has several key caveats:
* The method does **not** offer in-Python cascading of relationships
- it is assumed that ON DELETE CASCADE/SET NULL/etc. is configured
for any foreign key references which require it, otherwise the
database may emit an integrity violation if foreign key references
are being enforced.
After the DELETE, dependent objects in the :class:`.Session` which
were impacted by an ON DELETE may not contain the current
state, or may have been deleted. This issue is resolved once the
:class:`.Session` is expired,
which normally occurs upon :meth:`.Session.commit` or can be forced
by using :meth:`.Session.expire_all`. Accessing an expired object
whose row has been deleted will invoke a SELECT to locate the
row; when the row is not found, an
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
* The :meth:`.MapperEvents.before_delete` and
:meth:`.MapperEvents.after_delete`
events are **not** invoked from this method. Instead, the
:meth:`.SessionEvents.after_bulk_delete` method is provided to act
upon a mass DELETE of entity rows.
.. seealso::
:meth:`.Query.update`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
# TODO: cascades need handling.
delete_op = persistence.BulkDelete.factory(
self, synchronize_session)
delete_op.exec_()
return delete_op.rowcount
def update(self, values, synchronize_session='evaluate'):
"""Perform a bulk update query.
Updates rows matched by this query in the database.
E.g.::
sess.query(User).filter(User.age == 25).\
update({User.age: User.age - 10}, synchronize_session='fetch')
sess.query(User).filter(User.age == 25).\
update({"age": User.age - 10}, synchronize_session='evaluate')
:param values: a dictionary with attributes names, or alternatively
mapped attributes or SQL expressions, as keys, and literal
values or sql expressions as values.
.. versionchanged:: 1.0.0 - string names in the values dictionary
are now resolved against the mapped entity; previously, these
strings were passed as literal column names with no mapper-level
translation.
:param synchronize_session: chooses the strategy to update the
attributes on objects in the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, updated objects may still
remain in the session with stale values on their attributes, which
can lead to confusing results.
``'fetch'`` - performs a select query before the update to find
objects that are matched by the update query. The updated
attributes are expired on matched objects.
``'evaluate'`` - Evaluate the Query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an exception is raised.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
This method has several key caveats:
* The method does **not** offer in-Python cascading of relationships
- it is assumed that ON UPDATE CASCADE is configured for any foreign
key references which require it, otherwise the database may emit an
integrity violation if foreign key references are being enforced.
After the UPDATE, dependent objects in the :class:`.Session` which
were impacted by an ON UPDATE CASCADE may not contain the current
state; this issue is resolved once the :class:`.Session` is expired,
which normally occurs upon :meth:`.Session.commit` or can be forced
by using :meth:`.Session.expire_all`.
* The method supports multiple table updates, as
detailed in :ref:`multi_table_updates`, and this behavior does
extend to support updates of joined-inheritance and other multiple
table mappings. However, the **join condition of an inheritance
mapper is currently not automatically rendered**.
Care must be taken in any multiple-table update to explicitly
include the joining condition between those tables, even in mappings
where this is normally automatic.
E.g. if a class ``Engineer`` subclasses ``Employee``, an UPDATE of
the ``Engineer`` local table using criteria against the ``Employee``
local table might look like::
session.query(Engineer).\\
filter(Engineer.id == Employee.id).\\
filter(Employee.name == 'dilbert').\\
update({"engineer_type": "programmer"})
* The :meth:`.MapperEvents.before_update` and
:meth:`.MapperEvents.after_update`
events are **not** invoked from this method. Instead, the
:meth:`.SessionEvents.after_bulk_update` method is provided to act
upon a mass UPDATE of entity rows.
.. seealso::
:meth:`.Query.delete`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
update_op = persistence.BulkUpdate.factory(
self, synchronize_session, values)
update_op.exec_()
return update_op.rowcount
def _compile_context(self, labels=True):
context = QueryContext(self)
if context.statement is not None:
return context
context.labels = labels
context._for_update_arg = self._for_update_arg
for entity in self._entities:
entity.setup_context(self, context)
for rec in context.create_eager_joins:
strategy = rec[0]
strategy(*rec[1:])
if context.from_clause:
# "load from explicit FROMs" mode,
# i.e. when select_from() or join() is used
context.froms = list(context.from_clause)
else:
# "load from discrete FROMs" mode,
# i.e. when each _MappedEntity has its own FROM
context.froms = context.froms
if self._enable_single_crit:
self._adjust_for_single_inheritance(context)
if not context.primary_columns:
if self._only_load_props:
raise sa_exc.InvalidRequestError(
"No column-based properties specified for "
"refresh operation. Use session.expire() "
"to reload collections and related items.")
else:
raise sa_exc.InvalidRequestError(
"Query contains no columns with which to "
"SELECT from.")
if context.multi_row_eager_loaders and self._should_nest_selectable:
context.statement = self._compound_eager_statement(context)
else:
context.statement = self._simple_statement(context)
return context
def _compound_eager_statement(self, context):
# for eager joins present and LIMIT/OFFSET/DISTINCT,
# wrap the query inside a select,
# then append eager joins onto that
if context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
else:
context.order_by = None
order_by_col_expr = []
inner = sql.select(
context.primary_columns + order_by_col_expr,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
# TODO: this order_by is only needed if
# LIMIT/OFFSET is present in self._select_args,
# else the application on the outside is enough
order_by=context.order_by,
**self._select_args
)
for hint in self._with_hints:
inner = inner.with_hint(*hint)
if self._correlate:
inner = inner.correlate(*self._correlate)
inner = inner.alias()
equivs = self.__all_equivs()
context.adapter = sql_util.ColumnAdapter(inner, equivs)
statement = sql.select(
[inner] + context.secondary_columns,
use_labels=context.labels)
statement._for_update_arg = context._for_update_arg
from_clause = inner
for eager_join in context.eager_joins.values():
# EagerLoader places a 'stop_on' attribute on the join,
# giving us a marker as to where the "splice point" of
# the join should be
from_clause = sql_util.splice_joins(
from_clause,
eager_join, eager_join.stop_on)
statement.append_from(from_clause)
if context.order_by:
statement.append_order_by(
*context.adapter.copy_and_process(
context.order_by
)
)
statement.append_order_by(*context.eager_order_by)
return statement
def _simple_statement(self, context):
if not context.order_by:
context.order_by = None
if self._distinct and context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
context.primary_columns += order_by_col_expr
context.froms += tuple(context.eager_joins.values())
statement = sql.select(
context.primary_columns +
context.secondary_columns,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
order_by=context.order_by,
**self._select_args
)
statement._for_update_arg = context._for_update_arg
for hint in self._with_hints:
statement = statement.with_hint(*hint)
if self._correlate:
statement = statement.correlate(*self._correlate)
if context.eager_order_by:
statement.append_order_by(*context.eager_order_by)
return statement
def _adjust_for_single_inheritance(self, context):
"""Apply single-table-inheritance filtering.
For all distinct single-table-inheritance mappers represented in
the columns clause of this query, add criterion to the WHERE
clause of the given QueryContext such that only the appropriate
subtypes are selected from the total results.
"""
for (ext_info, adapter) in set(self._mapper_adapter_map.values()):
if ext_info in self._join_entities:
continue
single_crit = ext_info.mapper._single_table_criterion
if single_crit is not None:
if adapter:
single_crit = adapter.traverse(single_crit)
single_crit = self._adapt_clause(single_crit, False, False)
context.whereclause = sql.and_(
sql.True_._ifnone(context.whereclause),
single_crit)
def __str__(self):
return str(self._compile_context().statement)
from ..sql.selectable import ForUpdateArg
class LockmodeArg(ForUpdateArg):
@classmethod
def parse_legacy_query(self, mode):
if mode in (None, False):
return None
if mode == "read":
read = True
nowait = False
elif mode == "update":
read = nowait = False
elif mode == "update_nowait":
nowait = True
read = False
else:
raise sa_exc.ArgumentError(
"Unknown with_lockmode argument: %r" % mode)
return LockmodeArg(read=read, nowait=nowait)
class _QueryEntity(object):
"""represent an entity column returned within a Query result."""
def __new__(cls, *args, **kwargs):
if cls is _QueryEntity:
entity = args[1]
if not isinstance(entity, util.string_types) and \
_is_mapped_class(entity):
cls = _MapperEntity
elif isinstance(entity, Bundle):
cls = _BundleEntity
else:
cls = _ColumnEntity
return object.__new__(cls)
def _clone(self):
q = self.__class__.__new__(self.__class__)
q.__dict__ = self.__dict__.copy()
return q
class _MapperEntity(_QueryEntity):
"""mapper/class/AliasedClass entity"""
def __init__(self, query, entity):
if not query._primary_entity:
query._primary_entity = self
query._entities.append(self)
self.entities = [entity]
self.expr = entity
supports_single_entity = True
def setup_entity(self, ext_info, aliased_adapter):
self.mapper = ext_info.mapper
self.aliased_adapter = aliased_adapter
self.selectable = ext_info.selectable
self.is_aliased_class = ext_info.is_aliased_class
self._with_polymorphic = ext_info.with_polymorphic_mappers
self._polymorphic_discriminator = \
ext_info.polymorphic_on
self.entity_zero = ext_info
if ext_info.is_aliased_class:
self._label_name = self.entity_zero.name
else:
self._label_name = self.mapper.class_.__name__
self.path = self.entity_zero._path_registry
def set_with_polymorphic(self, query, cls_or_mappers,
selectable, polymorphic_on):
"""Receive an update from a call to query.with_polymorphic().
Note the newer style of using a free standing with_polymporphic()
construct doesn't make use of this method.
"""
if self.is_aliased_class:
# TODO: invalidrequest ?
raise NotImplementedError(
"Can't use with_polymorphic() against "
"an Aliased object"
)
if cls_or_mappers is None:
query._reset_polymorphic_adapter(self.mapper)
return
mappers, from_obj = self.mapper._with_polymorphic_args(
cls_or_mappers, selectable)
self._with_polymorphic = mappers
self._polymorphic_discriminator = polymorphic_on
self.selectable = from_obj
query._mapper_loads_polymorphically_with(
self.mapper, sql_util.ColumnAdapter(
from_obj, self.mapper._equivalent_columns))
filter_fn = id
@property
def type(self):
return self.mapper.class_
@property
def entity_zero_or_selectable(self):
return self.entity_zero
def corresponds_to(self, entity):
if entity.is_aliased_class:
if self.is_aliased_class:
if entity._base_alias is self.entity_zero._base_alias:
return True
return False
elif self.is_aliased_class:
if self.entity_zero._use_mapper_path:
return entity in self._with_polymorphic
else:
return entity is self.entity_zero
return entity.common_parent(self.entity_zero)
def adapt_to_selectable(self, query, sel):
query._entities.append(self)
def _get_entity_clauses(self, query, context):
adapter = None
if not self.is_aliased_class:
if query._polymorphic_adapters:
adapter = query._polymorphic_adapters.get(self.mapper, None)
else:
adapter = self.aliased_adapter
if adapter:
if query._from_obj_alias:
ret = adapter.wrap(query._from_obj_alias)
else:
ret = adapter
else:
ret = query._from_obj_alias
return ret
def row_processor(self, query, context, result):
adapter = self._get_entity_clauses(query, context)
if context.adapter and adapter:
adapter = adapter.wrap(context.adapter)
elif not adapter:
adapter = context.adapter
# polymorphic mappers which have concrete tables in
# their hierarchy usually
# require row aliasing unconditionally.
if not adapter and self.mapper._requires_row_aliasing:
adapter = sql_util.ColumnAdapter(
self.selectable,
self.mapper._equivalent_columns)
if query._primary_entity is self:
_instance = loading.instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
only_load_props=query._only_load_props,
refresh_state=context.refresh_state,
polymorphic_discriminator=self._polymorphic_discriminator
)
else:
_instance = loading.instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
polymorphic_discriminator=self._polymorphic_discriminator
)
return _instance, self._label_name
def setup_context(self, query, context):
adapter = self._get_entity_clauses(query, context)
# if self._adapted_selectable is None:
context.froms += (self.selectable,)
if context.order_by is False and self.mapper.order_by:
context.order_by = self.mapper.order_by
# apply adaptation to the mapper's order_by if needed.
if adapter:
context.order_by = adapter.adapt_list(
util.to_list(
context.order_by
)
)
if self._with_polymorphic:
poly_properties = self.mapper._iterate_polymorphic_properties(
self._with_polymorphic)
else:
poly_properties = self.mapper._polymorphic_properties
for value in poly_properties:
if query._only_load_props and \
value.key not in query._only_load_props:
continue
value.setup(
context,
self,
self.path,
adapter,
only_load_props=query._only_load_props,
column_collection=context.primary_columns
)
if self._polymorphic_discriminator is not None and \
self._polymorphic_discriminator \
is not self.mapper.polymorphic_on:
if adapter:
pd = adapter.columns[self._polymorphic_discriminator]
else:
pd = self._polymorphic_discriminator
context.primary_columns.append(pd)
def __str__(self):
return str(self.mapper)
@inspection._self_inspects
class Bundle(object):
"""A grouping of SQL expressions that are returned by a :class:`.Query`
under one namespace.
The :class:`.Bundle` essentially allows nesting of the tuple-based
results returned by a column-oriented :class:`.Query` object. It also
is extensible via simple subclassing, where the primary capability
to override is that of how the set of expressions should be returned,
allowing post-processing as well as custom return types, without
involving ORM identity-mapped classes.
.. versionadded:: 0.9.0
.. seealso::
:ref:`bundles`
"""
single_entity = False
"""If True, queries for a single Bundle will be returned as a single
entity, rather than an element within a keyed tuple."""
def __init__(self, name, *exprs, **kw):
"""Construct a new :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
for row in session.query(bn).filter(
bn.c.x == 5).filter(bn.c.y == 4):
print(row.mybundle.x, row.mybundle.y)
:param name: name of the bundle.
:param \*exprs: columns or SQL expressions comprising the bundle.
:param single_entity=False: if True, rows for this :class:`.Bundle`
can be returned as a "single entity" outside of any enclosing tuple
in the same manner as a mapped entity.
"""
self.name = self._label = name
self.exprs = exprs
self.c = self.columns = ColumnCollection()
self.columns.update((getattr(col, "key", col._label), col)
for col in exprs)
self.single_entity = kw.pop('single_entity', self.single_entity)
columns = None
"""A namespace of SQL expressions referred to by this :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
q = sess.query(bn).filter(bn.c.x == 5)
Nesting of bundles is also supported::
b1 = Bundle("b1",
Bundle('b2', MyClass.a, MyClass.b),
Bundle('b3', MyClass.x, MyClass.y)
)
q = sess.query(b1).filter(
b1.c.b2.c.a == 5).filter(b1.c.b3.c.y == 9)
.. seealso::
:attr:`.Bundle.c`
"""
c = None
"""An alias for :attr:`.Bundle.columns`."""
def _clone(self):
cloned = self.__class__.__new__(self.__class__)
cloned.__dict__.update(self.__dict__)
return cloned
def __clause_element__(self):
return expression.ClauseList(group=False, *self.c)
@property
def clauses(self):
return self.__clause_element__().clauses
def label(self, name):
"""Provide a copy of this :class:`.Bundle` passing a new label."""
cloned = self._clone()
cloned.name = name
return cloned
# MASKED: create_row_processor function (lines 3371-3385)
class _BundleEntity(_QueryEntity):
def __init__(self, query, bundle, setup_entities=True):
query._entities.append(self)
self.bundle = self.expr = bundle
self.type = type(bundle)
self._label_name = bundle.name
self._entities = []
if setup_entities:
for expr in bundle.exprs:
if isinstance(expr, Bundle):
_BundleEntity(self, expr)
else:
_ColumnEntity(self, expr, namespace=self)
self.entities = ()
self.filter_fn = lambda item: item
self.supports_single_entity = self.bundle.single_entity
@property
def entity_zero(self):
for ent in self._entities:
ezero = ent.entity_zero
if ezero is not None:
return ezero
else:
return None
def corresponds_to(self, entity):
# TODO: this seems to have no effect for
# _ColumnEntity either
return False
@property
def entity_zero_or_selectable(self):
for ent in self._entities:
ezero = ent.entity_zero_or_selectable
if ezero is not None:
return ezero
else:
return None
def adapt_to_selectable(self, query, sel):
c = _BundleEntity(query, self.bundle, setup_entities=False)
# c._label_name = self._label_name
# c.entity_zero = self.entity_zero
# c.entities = self.entities
for ent in self._entities:
ent.adapt_to_selectable(c, sel)
def setup_entity(self, ext_info, aliased_adapter):
for ent in self._entities:
ent.setup_entity(ext_info, aliased_adapter)
def setup_context(self, query, context):
for ent in self._entities:
ent.setup_context(query, context)
def row_processor(self, query, context, result):
procs, labels = zip(
*[ent.row_processor(query, context, result)
for ent in self._entities]
)
proc = self.bundle.create_row_processor(query, procs, labels)
return proc, self._label_name
class _ColumnEntity(_QueryEntity):
"""Column/expression based entity."""
def __init__(self, query, column, namespace=None):
self.expr = column
self.namespace = namespace
if isinstance(column, util.string_types):
column = sql.literal_column(column)
self._label_name = column.name
elif isinstance(column, (
attributes.QueryableAttribute,
interfaces.PropComparator
)):
self._label_name = column.key
column = column._query_clause_element()
else:
self._label_name = getattr(column, 'key', None)
if not isinstance(column, expression.ColumnElement) and \
hasattr(column, '_select_iterable'):
for c in column._select_iterable:
if c is column:
break
_ColumnEntity(query, c, namespace=column)
else:
return
elif isinstance(column, Bundle):
_BundleEntity(query, column)
return
if not isinstance(column, sql.ColumnElement):
raise sa_exc.InvalidRequestError(
"SQL expression, column, or mapped entity "
"expected - got '%r'" % (column, )
)
self.type = type_ = column.type
if type_.hashable:
self.filter_fn = lambda item: item
else:
counter = util.counter()
self.filter_fn = lambda item: counter()
# If the Column is unnamed, give it a
# label() so that mutable column expressions
# can be located in the result even
# if the expression's identity has been changed
# due to adaption.
if not column._label and not getattr(column, 'is_literal', False):
column = column.label(self._label_name)
query._entities.append(self)
self.column = column
self.froms = set()
# look for ORM entities represented within the
# given expression. Try to count only entities
# for columns whose FROM object is in the actual list
# of FROMs for the overall expression - this helps
# subqueries which were built from ORM constructs from
# leaking out their entities into the main select construct
self.actual_froms = actual_froms = set(column._from_objects)
self.entities = util.OrderedSet(
elem._annotations['parententity']
for elem in visitors.iterate(column, {})
if 'parententity' in elem._annotations
and actual_froms.intersection(elem._from_objects)
)
if self.entities:
self.entity_zero = list(self.entities)[0]
elif self.namespace is not None:
self.entity_zero = self.namespace
else:
self.entity_zero = None
supports_single_entity = False
@property
def entity_zero_or_selectable(self):
if self.entity_zero is not None:
return self.entity_zero
elif self.actual_froms:
return list(self.actual_froms)[0]
else:
return None
def adapt_to_selectable(self, query, sel):
c = _ColumnEntity(query, sel.corresponding_column(self.column))
c._label_name = self._label_name
c.entity_zero = self.entity_zero
c.entities = self.entities
def setup_entity(self, ext_info, aliased_adapter):
if 'selectable' not in self.__dict__:
self.selectable = ext_info.selectable
self.froms.add(ext_info.selectable)
def corresponds_to(self, entity):
# TODO: just returning False here,
# no tests fail
if self.entity_zero is None:
return False
elif _is_aliased_class(entity):
# TODO: polymorphic subclasses ?
return entity is self.entity_zero
else:
return not _is_aliased_class(self.entity_zero) and \
entity.common_parent(self.entity_zero)
def _resolve_expr_against_query_aliases(self, query, expr, context):
return query._adapt_clause(expr, False, True)
def row_processor(self, query, context, result):
column = self._resolve_expr_against_query_aliases(
query, self.column, context)
if context.adapter:
column = context.adapter.columns[column]
getter = result._getter(column)
return getter, self._label_name
def setup_context(self, query, context):
column = self._resolve_expr_against_query_aliases(
query, self.column, context)
context.froms += tuple(self.froms)
context.primary_columns.append(column)
def __str__(self):
return str(self.column)
class QueryContext(object):
multi_row_eager_loaders = False
adapter = None
froms = ()
for_update = None
def __init__(self, query):
if query._statement is not None:
if isinstance(query._statement, expression.SelectBase) and \
not query._statement._textual and \
not query._statement.use_labels:
self.statement = query._statement.apply_labels()
else:
self.statement = query._statement
else:
self.statement = None
self.from_clause = query._from_obj
self.whereclause = query._criterion
self.order_by = query._order_by
self.query = query
self.session = query.session
self.populate_existing = query._populate_existing
self.invoke_all_eagers = query._invoke_all_eagers
self.version_check = query._version_check
self.refresh_state = query._refresh_state
self.primary_columns = []
self.secondary_columns = []
self.eager_order_by = []
self.eager_joins = {}
self.create_eager_joins = []
self.propagate_options = set(o for o in query._with_options if
o.propagate_to_loaders)
self.attributes = query._attributes.copy()
class AliasOption(interfaces.MapperOption):
def __init__(self, alias):
"""Return a :class:`.MapperOption` that will indicate to the :class:`.Query`
that the main table has been aliased.
This is a seldom-used option to suit the
very rare case that :func:`.contains_eager`
is being used in conjunction with a user-defined SELECT
statement that aliases the parent table. E.g.::
# define an aliased UNION called 'ulist'
ulist = users.select(users.c.user_id==7).\\
union(users.select(users.c.user_id>7)).\\
alias('ulist')
# add on an eager load of "addresses"
statement = ulist.outerjoin(addresses).\\
select().apply_labels()
# create query, indicating "ulist" will be an
# alias for the main table, "addresses"
# property should be eager loaded
query = session.query(User).options(
contains_alias(ulist),
contains_eager(User.addresses))
# then get results via the statement
results = query.from_statement(statement).all()
:param alias: is the string name of an alias, or a
:class:`~.sql.expression.Alias` object representing
the alias.
"""
self.alias = alias
def process_query(self, query):
if isinstance(self.alias, util.string_types):
alias = query._mapper_zero().mapped_table.alias(self.alias)
else:
alias = self.alias
query._from_obj_alias = sql_util.ColumnAdapter(alias)
|
def create_row_processor(self, query, procs, labels):
"""Produce the "row processing" function for this :class:`.Bundle`.
May be overridden by subclasses.
.. seealso::
:ref:`bundles` - includes an example of subclassing.
"""
keyed_tuple = util.lightweight_named_tuple('result', labels)
def proc(row):
return keyed_tuple([proc(row) for proc in procs])
return proc
| 3,371 | 3,385 |
# orm/query.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The Query class and support.
Defines the :class:`.Query` class, the central
construct used by the ORM to construct database queries.
The :class:`.Query` class should not be confused with the
:class:`.Select` class, which defines database
SELECT operations at the SQL (non-ORM) level. ``Query`` differs from
``Select`` in that it returns ORM-mapped objects and interacts with an
ORM session, whereas the ``Select`` construct interacts directly with the
database to return iterable result sets.
"""
from itertools import chain
from . import (
attributes, interfaces, object_mapper, persistence,
exc as orm_exc, loading
)
from .base import _entity_descriptor, _is_aliased_class, \
_is_mapped_class, _orm_columns, _generative
from .path_registry import PathRegistry
from .util import (
AliasedClass, ORMAdapter, join as orm_join, with_parent, aliased
)
from .. import sql, util, log, exc as sa_exc, inspect, inspection
from ..sql.expression import _interpret_as_from
from ..sql import (
util as sql_util,
expression, visitors
)
from ..sql.base import ColumnCollection
from . import properties
__all__ = ['Query', 'QueryContext', 'aliased']
_path_registry = PathRegistry.root
@inspection._self_inspects
@log.class_logger
class Query(object):
"""ORM-level SQL construction object.
:class:`.Query` is the source of all SELECT statements generated by the
ORM, both those formulated by end-user query operations as well as by
high level internal operations such as related collection loading. It
features a generative interface whereby successive calls return a new
:class:`.Query` object, a copy of the former with additional
criteria and options associated with it.
:class:`.Query` objects are normally initially generated using the
:meth:`~.Session.query` method of :class:`.Session`. For a full
walkthrough of :class:`.Query` usage, see the
:ref:`ormtutorial_toplevel`.
"""
_enable_eagerloads = True
_enable_assertions = True
_with_labels = False
_criterion = None
_yield_per = None
_order_by = False
_group_by = False
_having = None
_distinct = False
_prefixes = None
_offset = None
_limit = None
_for_update_arg = None
_statement = None
_correlate = frozenset()
_populate_existing = False
_invoke_all_eagers = True
_version_check = False
_autoflush = True
_only_load_props = None
_refresh_state = None
_from_obj = ()
_join_entities = ()
_select_from_entity = None
_mapper_adapter_map = {}
_filter_aliases = None
_from_obj_alias = None
_joinpath = _joinpoint = util.immutabledict()
_execution_options = util.immutabledict()
_params = util.immutabledict()
_attributes = util.immutabledict()
_with_options = ()
_with_hints = ()
_enable_single_crit = True
_current_path = _path_registry
def __init__(self, entities, session=None):
self.session = session
self._polymorphic_adapters = {}
self._set_entities(entities)
def _set_entities(self, entities, entity_wrapper=None):
if entity_wrapper is None:
entity_wrapper = _QueryEntity
self._entities = []
self._primary_entity = None
for ent in util.to_list(entities):
entity_wrapper(self, ent)
self._set_entity_selectables(self._entities)
def _set_entity_selectables(self, entities):
self._mapper_adapter_map = d = self._mapper_adapter_map.copy()
for ent in entities:
for entity in ent.entities:
if entity not in d:
ext_info = inspect(entity)
if not ext_info.is_aliased_class and \
ext_info.mapper.with_polymorphic:
if ext_info.mapper.mapped_table not in \
self._polymorphic_adapters:
self._mapper_loads_polymorphically_with(
ext_info.mapper,
sql_util.ColumnAdapter(
ext_info.selectable,
ext_info.mapper._equivalent_columns
)
)
aliased_adapter = None
elif ext_info.is_aliased_class:
aliased_adapter = ext_info._adapter
else:
aliased_adapter = None
d[entity] = (
ext_info,
aliased_adapter
)
ent.setup_entity(*d[entity])
def _mapper_loads_polymorphically_with(self, mapper, adapter):
for m2 in mapper._with_polymorphic_mappers or [mapper]:
self._polymorphic_adapters[m2] = adapter
for m in m2.iterate_to_root():
self._polymorphic_adapters[m.local_table] = adapter
def _set_select_from(self, obj, set_base_alias):
fa = []
select_from_alias = None
for from_obj in obj:
info = inspect(from_obj)
if hasattr(info, 'mapper') and \
(info.is_mapper or info.is_aliased_class):
self._select_from_entity = from_obj
if set_base_alias:
raise sa_exc.ArgumentError(
"A selectable (FromClause) instance is "
"expected when the base alias is being set.")
fa.append(info.selectable)
elif not info.is_selectable:
raise sa_exc.ArgumentError(
"argument is not a mapped class, mapper, "
"aliased(), or FromClause instance.")
else:
if isinstance(from_obj, expression.SelectBase):
from_obj = from_obj.alias()
if set_base_alias:
select_from_alias = from_obj
fa.append(from_obj)
self._from_obj = tuple(fa)
if set_base_alias and \
len(self._from_obj) == 1 and \
isinstance(select_from_alias, expression.Alias):
equivs = self.__all_equivs()
self._from_obj_alias = sql_util.ColumnAdapter(
self._from_obj[0], equivs)
def _reset_polymorphic_adapter(self, mapper):
for m2 in mapper._with_polymorphic_mappers:
self._polymorphic_adapters.pop(m2, None)
for m in m2.iterate_to_root():
self._polymorphic_adapters.pop(m.local_table, None)
def _adapt_polymorphic_element(self, element):
if "parententity" in element._annotations:
search = element._annotations['parententity']
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
if isinstance(element, expression.FromClause):
search = element
elif hasattr(element, 'table'):
search = element.table
else:
return None
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
def _adapt_col_list(self, cols):
return [
self._adapt_clause(
expression._literal_as_label_reference(o),
True, True)
for o in cols
]
@_generative()
def _adapt_all_clauses(self):
self._orm_only_adapt = False
def _adapt_clause(self, clause, as_filter, orm_only):
"""Adapt incoming clauses to transformations which
have been applied within this query."""
adapters = []
# do we adapt all expression elements or only those
# tagged as 'ORM' constructs ?
orm_only = getattr(self, '_orm_only_adapt', orm_only)
if as_filter and self._filter_aliases:
for fa in self._filter_aliases._visitor_iterator:
adapters.append(
(
orm_only, fa.replace
)
)
if self._from_obj_alias:
# for the "from obj" alias, apply extra rule to the
# 'ORM only' check, if this query were generated from a
# subquery of itself, i.e. _from_selectable(), apply adaption
# to all SQL constructs.
adapters.append(
(
getattr(self, '_orm_only_from_obj_alias', orm_only),
self._from_obj_alias.replace
)
)
if self._polymorphic_adapters:
adapters.append(
(
orm_only, self._adapt_polymorphic_element
)
)
if not adapters:
return clause
def replace(elem):
for _orm_only, adapter in adapters:
# if 'orm only', look for ORM annotations
# in the element before adapting.
if not _orm_only or \
'_orm_adapt' in elem._annotations or \
"parententity" in elem._annotations:
e = adapter(elem)
if e is not None:
return e
return visitors.replacement_traverse(
clause,
{},
replace
)
def _entity_zero(self):
return self._entities[0]
def _mapper_zero(self):
return self._select_from_entity or \
self._entity_zero().entity_zero
@property
def _mapper_entities(self):
for ent in self._entities:
if isinstance(ent, _MapperEntity):
yield ent
def _joinpoint_zero(self):
return self._joinpoint.get(
'_joinpoint_entity',
self._mapper_zero()
)
def _mapper_zero_or_none(self):
if self._primary_entity:
return self._primary_entity.mapper
else:
return None
def _only_mapper_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._mapper_zero()
def _only_full_mapper_zero(self, methname):
if self._entities != [self._primary_entity]:
raise sa_exc.InvalidRequestError(
"%s() can only be used against "
"a single mapped class." % methname)
return self._primary_entity.entity_zero
def _only_entity_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._entity_zero()
def __all_equivs(self):
equivs = {}
for ent in self._mapper_entities:
equivs.update(ent.mapper._equivalent_columns)
return equivs
def _get_condition(self):
return self._no_criterion_condition(
"get", order_by=False, distinct=False)
def _get_existing_condition(self):
self._no_criterion_assertion("get", order_by=False, distinct=False)
def _no_criterion_assertion(self, meth, order_by=True, distinct=True):
if not self._enable_assertions:
return
if self._criterion is not None or \
self._statement is not None or self._from_obj or \
self._limit is not None or self._offset is not None or \
self._group_by or (order_by and self._order_by) or \
(distinct and self._distinct):
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
def _no_criterion_condition(self, meth, order_by=True, distinct=True):
self._no_criterion_assertion(meth, order_by, distinct)
self._from_obj = ()
self._statement = self._criterion = None
self._order_by = self._group_by = self._distinct = False
def _no_clauseelement_condition(self, meth):
if not self._enable_assertions:
return
if self._order_by:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
self._no_criterion_condition(meth)
def _no_statement_condition(self, meth):
if not self._enable_assertions:
return
if self._statement is not None:
raise sa_exc.InvalidRequestError(
("Query.%s() being called on a Query with an existing full "
"statement - can't apply criterion.") % meth)
def _no_limit_offset(self, meth):
if not self._enable_assertions:
return
if self._limit is not None or self._offset is not None:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a Query which already has LIMIT "
"or OFFSET applied. To modify the row-limited results of a "
" Query, call from_self() first. "
"Otherwise, call %s() before limit() or offset() "
"are applied."
% (meth, meth)
)
def _no_select_modifiers(self, meth):
if not self._enable_assertions:
return
for attr, methname, notset in (
('_limit', 'limit()', None),
('_offset', 'offset()', None),
('_order_by', 'order_by()', False),
('_group_by', 'group_by()', False),
('_distinct', 'distinct()', False),
):
if getattr(self, attr) is not notset:
raise sa_exc.InvalidRequestError(
"Can't call Query.%s() when %s has been called" %
(meth, methname)
)
def _get_options(self, populate_existing=None,
version_check=None,
only_load_props=None,
refresh_state=None):
if populate_existing:
self._populate_existing = populate_existing
if version_check:
self._version_check = version_check
if refresh_state:
self._refresh_state = refresh_state
if only_load_props:
self._only_load_props = set(only_load_props)
return self
def _clone(self):
cls = self.__class__
q = cls.__new__(cls)
q.__dict__ = self.__dict__.copy()
return q
@property
def statement(self):
"""The full SELECT statement represented by this Query.
The statement by default will not have disambiguating labels
applied to the construct unless with_labels(True) is called
first.
"""
stmt = self._compile_context(labels=self._with_labels).\
statement
if self._params:
stmt = stmt.params(self._params)
# TODO: there's no tests covering effects of
# the annotation not being there
return stmt._annotate({'no_replacement_traverse': True})
def subquery(self, name=None, with_labels=False, reduce_columns=False):
"""return the full SELECT statement represented by
this :class:`.Query`, embedded within an :class:`.Alias`.
Eager JOIN generation within the query is disabled.
:param name: string name to be assigned as the alias;
this is passed through to :meth:`.FromClause.alias`.
If ``None``, a name will be deterministically generated
at compile time.
:param with_labels: if True, :meth:`.with_labels` will be called
on the :class:`.Query` first to apply table-qualified labels
to all columns.
:param reduce_columns: if True, :meth:`.Select.reduce_columns` will
be called on the resulting :func:`.select` construct,
to remove same-named columns where one also refers to the other
via foreign key or WHERE clause equivalence.
.. versionchanged:: 0.8 the ``with_labels`` and ``reduce_columns``
keyword arguments were added.
"""
q = self.enable_eagerloads(False)
if with_labels:
q = q.with_labels()
q = q.statement
if reduce_columns:
q = q.reduce_columns()
return q.alias(name=name)
def cte(self, name=None, recursive=False):
"""Return the full SELECT statement represented by this
:class:`.Query` represented as a common table expression (CTE).
.. versionadded:: 0.7.6
Parameters and usage are the same as those of the
:meth:`.SelectBase.cte` method; see that method for
further details.
Here is the `Postgresql WITH
RECURSIVE example
<http://www.postgresql.org/docs/8.4/static/queries-with.html>`_.
Note that, in this example, the ``included_parts`` cte and the
``incl_alias`` alias of it are Core selectables, which
means the columns are accessed via the ``.c.`` attribute. The
``parts_alias`` object is an :func:`.orm.aliased` instance of the
``Part`` entity, so column-mapped attributes are available
directly::
from sqlalchemy.orm import aliased
class Part(Base):
__tablename__ = 'part'
part = Column(String, primary_key=True)
sub_part = Column(String, primary_key=True)
quantity = Column(Integer)
included_parts = session.query(
Part.sub_part,
Part.part,
Part.quantity).\\
filter(Part.part=="our part").\\
cte(name="included_parts", recursive=True)
incl_alias = aliased(included_parts, name="pr")
parts_alias = aliased(Part, name="p")
included_parts = included_parts.union_all(
session.query(
parts_alias.sub_part,
parts_alias.part,
parts_alias.quantity).\\
filter(parts_alias.part==incl_alias.c.sub_part)
)
q = session.query(
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).
label('total_quantity')
).\\
group_by(included_parts.c.sub_part)
.. seealso::
:meth:`.SelectBase.cte`
"""
return self.enable_eagerloads(False).\
statement.cte(name=name, recursive=recursive)
def label(self, name):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted
to a scalar subquery with a label of the given name.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.label`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.label(name)
def as_scalar(self):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted to a scalar subquery.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.as_scalar`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.as_scalar()
@property
def selectable(self):
"""Return the :class:`.Select` object emitted by this :class:`.Query`.
Used for :func:`.inspect` compatibility, this is equivalent to::
query.enable_eagerloads(False).with_labels().statement
"""
return self.__clause_element__()
def __clause_element__(self):
return self.enable_eagerloads(False).with_labels().statement
@_generative()
def enable_eagerloads(self, value):
"""Control whether or not eager joins and subqueries are
rendered.
When set to False, the returned Query will not render
eager joins regardless of :func:`~sqlalchemy.orm.joinedload`,
:func:`~sqlalchemy.orm.subqueryload` options
or mapper-level ``lazy='joined'``/``lazy='subquery'``
configurations.
This is used primarily when nesting the Query's
statement into a subquery or other
selectable, or when using :meth:`.Query.yield_per`.
"""
self._enable_eagerloads = value
def _no_yield_per(self, message):
raise sa_exc.InvalidRequestError(
"The yield_per Query option is currently not "
"compatible with %s eager loading. Please "
"specify lazyload('*') or query.enable_eagerloads(False) in "
"order to "
"proceed with query.yield_per()." % message)
@_generative()
def with_labels(self):
"""Apply column labels to the return value of Query.statement.
Indicates that this Query's `statement` accessor should return
a SELECT statement that applies labels to all columns in the
form <tablename>_<columnname>; this is commonly used to
disambiguate columns from multiple tables which have the same
name.
When the `Query` actually issues SQL to load rows, it always
uses column labeling.
"""
self._with_labels = True
@_generative()
def enable_assertions(self, value):
"""Control whether assertions are generated.
When set to False, the returned Query will
not assert its state before certain operations,
including that LIMIT/OFFSET has not been applied
when filter() is called, no criterion exists
when get() is called, and no "from_statement()"
exists when filter()/order_by()/group_by() etc.
is called. This more permissive mode is used by
custom Query subclasses to specify criterion or
other modifiers outside of the usual usage patterns.
Care should be taken to ensure that the usage
pattern is even possible. A statement applied
by from_statement() will override any criterion
set by filter() or order_by(), for example.
"""
self._enable_assertions = value
@property
def whereclause(self):
"""A readonly attribute which returns the current WHERE criterion for
this Query.
This returned value is a SQL expression construct, or ``None`` if no
criterion has been established.
"""
return self._criterion
@_generative()
def _with_current_path(self, path):
"""indicate that this query applies to objects loaded
within a certain path.
Used by deferred loaders (see strategies.py) which transfer
query options from an originating query to a newly generated
query intended for the deferred load.
"""
self._current_path = path
@_generative(_no_clauseelement_condition)
def with_polymorphic(self,
cls_or_mappers,
selectable=None,
polymorphic_on=None):
"""Load columns for inheriting classes.
:meth:`.Query.with_polymorphic` applies transformations
to the "main" mapped class represented by this :class:`.Query`.
The "main" mapped class here means the :class:`.Query`
object's first argument is a full class, i.e.
``session.query(SomeClass)``. These transformations allow additional
tables to be present in the FROM clause so that columns for a
joined-inheritance subclass are available in the query, both for the
purposes of load-time efficiency as well as the ability to use
these columns at query time.
See the documentation section :ref:`with_polymorphic` for
details on how this method is used.
.. versionchanged:: 0.8
A new and more flexible function
:func:`.orm.with_polymorphic` supersedes
:meth:`.Query.with_polymorphic`, as it can apply the equivalent
functionality to any set of columns or classes in the
:class:`.Query`, not just the "zero mapper". See that
function for a description of arguments.
"""
if not self._primary_entity:
raise sa_exc.InvalidRequestError(
"No primary mapper set up for this Query.")
entity = self._entities[0]._clone()
self._entities = [entity] + self._entities[1:]
entity.set_with_polymorphic(self,
cls_or_mappers,
selectable=selectable,
polymorphic_on=polymorphic_on)
@_generative()
def yield_per(self, count):
"""Yield only ``count`` rows at a time.
The purpose of this method is when fetching very large result sets
(> 10K rows), to batch results in sub-collections and yield them
out partially, so that the Python interpreter doesn't need to declare
very large areas of memory which is both time consuming and leads
to excessive memory use. The performance from fetching hundreds of
thousands of rows can often double when a suitable yield-per setting
(e.g. approximately 1000) is used, even with DBAPIs that buffer
rows (which are most).
The :meth:`.Query.yield_per` method **is not compatible with most
eager loading schemes, including subqueryload and joinedload with
collections**. For this reason, it may be helpful to disable
eager loads, either unconditionally with
:meth:`.Query.enable_eagerloads`::
q = sess.query(Object).yield_per(100).enable_eagerloads(False)
Or more selectively using :func:`.lazyload`; such as with
an asterisk to specify the default loader scheme::
q = sess.query(Object).yield_per(100).\\
options(lazyload('*'), joinedload(Object.some_related))
.. warning::
Use this method with caution; if the same instance is
present in more than one batch of rows, end-user changes
to attributes will be overwritten.
In particular, it's usually impossible to use this setting
with eagerly loaded collections (i.e. any lazy='joined' or
'subquery') since those collections will be cleared for a
new load when encountered in a subsequent result batch.
In the case of 'subquery' loading, the full result for all
rows is fetched which generally defeats the purpose of
:meth:`~sqlalchemy.orm.query.Query.yield_per`.
Also note that while
:meth:`~sqlalchemy.orm.query.Query.yield_per` will set the
``stream_results`` execution option to True, currently
this is only understood by
:mod:`~sqlalchemy.dialects.postgresql.psycopg2` dialect
which will stream results using server side cursors
instead of pre-buffer all rows for this query. Other
DBAPIs **pre-buffer all rows** before making them
available. The memory use of raw database rows is much less
than that of an ORM-mapped object, but should still be taken into
consideration when benchmarking.
.. seealso::
:meth:`.Query.enable_eagerloads`
"""
self._yield_per = count
self._execution_options = self._execution_options.union(
{"stream_results": True})
def get(self, ident):
"""Return an instance based on the given primary key identifier,
or ``None`` if not found.
E.g.::
my_user = session.query(User).get(5)
some_object = session.query(VersionedFoo).get((5, 10))
:meth:`~.Query.get` is special in that it provides direct
access to the identity map of the owning :class:`.Session`.
If the given primary key identifier is present
in the local identity map, the object is returned
directly from this collection and no SQL is emitted,
unless the object has been marked fully expired.
If not present,
a SELECT is performed in order to locate the object.
:meth:`~.Query.get` also will perform a check if
the object is present in the identity map and
marked as expired - a SELECT
is emitted to refresh the object as well as to
ensure that the row is still present.
If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
:meth:`~.Query.get` is only used to return a single
mapped instance, not multiple instances or
individual column constructs, and strictly
on a single primary key value. The originating
:class:`.Query` must be constructed in this way,
i.e. against a single mapped entity,
with no additional filtering criterion. Loading
options via :meth:`~.Query.options` may be applied
however, and will be used if the object is not
yet locally present.
A lazy-loading, many-to-one attribute configured
by :func:`.relationship`, using a simple
foreign-key-to-primary-key criterion, will also use an
operation equivalent to :meth:`~.Query.get` in order to retrieve
the target value from the local identity map
before querying the database. See :doc:`/orm/loading`
for further details on relationship loading.
:param ident: A scalar or tuple value representing
the primary key. For a composite primary key,
the order of identifiers corresponds in most cases
to that of the mapped :class:`.Table` object's
primary key columns. For a :func:`.mapper` that
was given the ``primary key`` argument during
construction, the order of identifiers corresponds
to the elements present in this collection.
:return: The object instance, or ``None``.
"""
# convert composite types to individual args
if hasattr(ident, '__composite_values__'):
ident = ident.__composite_values__()
ident = util.to_list(ident)
mapper = self._only_full_mapper_zero("get")
if len(ident) != len(mapper.primary_key):
raise sa_exc.InvalidRequestError(
"Incorrect number of values in identifier to formulate "
"primary key for query.get(); primary key columns are %s" %
','.join("'%s'" % c for c in mapper.primary_key))
key = mapper.identity_key_from_primary_key(ident)
if not self._populate_existing and \
not mapper.always_refresh and \
self._for_update_arg is None:
instance = loading.get_from_identity(
self.session, key, attributes.PASSIVE_OFF)
if instance is not None:
self._get_existing_condition()
# reject calls for id in identity map but class
# mismatch.
if not issubclass(instance.__class__, mapper.class_):
return None
return instance
return loading.load_on_ident(self, key)
@_generative()
def correlate(self, *args):
"""Return a :class:`.Query` construct which will correlate the given
FROM clauses to that of an enclosing :class:`.Query` or
:func:`~.expression.select`.
The method here accepts mapped classes, :func:`.aliased` constructs,
and :func:`.mapper` constructs as arguments, which are resolved into
expression constructs, in addition to appropriate expression
constructs.
The correlation arguments are ultimately passed to
:meth:`.Select.correlate` after coercion to expression constructs.
The correlation arguments take effect in such cases
as when :meth:`.Query.from_self` is used, or when
a subquery as returned by :meth:`.Query.subquery` is
embedded in another :func:`~.expression.select` construct.
"""
self._correlate = self._correlate.union(
_interpret_as_from(s)
if s is not None else None
for s in args)
@_generative()
def autoflush(self, setting):
"""Return a Query with a specific 'autoflush' setting.
Note that a Session with autoflush=False will
not autoflush, even if this flag is set to True at the
Query level. Therefore this flag is usually used only
to disable autoflush for a specific Query.
"""
self._autoflush = setting
@_generative()
def populate_existing(self):
"""Return a :class:`.Query` that will expire and refresh all instances
as they are loaded, or reused from the current :class:`.Session`.
:meth:`.populate_existing` does not improve behavior when
the ORM is used normally - the :class:`.Session` object's usual
behavior of maintaining a transaction and expiring all attributes
after rollback or commit handles object state automatically.
This method is not intended for general use.
"""
self._populate_existing = True
@_generative()
def _with_invoke_all_eagers(self, value):
"""Set the 'invoke all eagers' flag which causes joined- and
subquery loaders to traverse into already-loaded related objects
and collections.
Default is that of :attr:`.Query._invoke_all_eagers`.
"""
self._invoke_all_eagers = value
def with_parent(self, instance, property=None):
"""Add filtering criterion that relates the given instance
to a child object or collection, using its attribute state
as well as an established :func:`.relationship()`
configuration.
The method uses the :func:`.with_parent` function to generate
the clause, the result of which is passed to :meth:`.Query.filter`.
Parameters are the same as :func:`.with_parent`, with the exception
that the given property can be None, in which case a search is
performed against this :class:`.Query` object's target mapper.
"""
if property is None:
mapper = object_mapper(instance)
for prop in mapper.iterate_properties:
if isinstance(prop, properties.RelationshipProperty) and \
prop.mapper is self._mapper_zero():
property = prop
break
else:
raise sa_exc.InvalidRequestError(
"Could not locate a property which relates instances "
"of class '%s' to instances of class '%s'" %
(
self._mapper_zero().class_.__name__,
instance.__class__.__name__)
)
return self.filter(with_parent(instance, property))
@_generative()
def add_entity(self, entity, alias=None):
"""add a mapped entity to the list of result columns
to be returned."""
if alias is not None:
entity = aliased(entity, alias)
self._entities = list(self._entities)
m = _MapperEntity(self, entity)
self._set_entity_selectables([m])
@_generative()
def with_session(self, session):
"""Return a :class:`.Query` that will use the given :class:`.Session`.
"""
self.session = session
def from_self(self, *entities):
"""return a Query that selects from this Query's
SELECT statement.
\*entities - optional list of entities which will replace
those being selected.
"""
fromclause = self.with_labels().enable_eagerloads(False).\
statement.correlate(None)
q = self._from_selectable(fromclause)
q._enable_single_crit = False
if entities:
q._set_entities(entities)
return q
@_generative()
def _set_enable_single_crit(self, val):
self._enable_single_crit = val
@_generative()
def _from_selectable(self, fromclause):
for attr in (
'_statement', '_criterion',
'_order_by', '_group_by',
'_limit', '_offset',
'_joinpath', '_joinpoint',
'_distinct', '_having',
'_prefixes',
):
self.__dict__.pop(attr, None)
self._set_select_from([fromclause], True)
# this enables clause adaptation for non-ORM
# expressions.
self._orm_only_from_obj_alias = False
old_entities = self._entities
self._entities = []
for e in old_entities:
e.adapt_to_selectable(self, self._from_obj[0])
def values(self, *columns):
"""Return an iterator yielding result tuples corresponding
to the given list of columns"""
if not columns:
return iter(())
q = self._clone()
q._set_entities(columns, entity_wrapper=_ColumnEntity)
if not q._yield_per:
q._yield_per = 10
return iter(q)
_values = values
def value(self, column):
"""Return a scalar result corresponding to the given
column expression."""
try:
return next(self.values(column))[0]
except StopIteration:
return None
@_generative()
def with_entities(self, *entities):
"""Return a new :class:`.Query` replacing the SELECT list with the
given entities.
e.g.::
# Users, filtered on some arbitrary criterion
# and then ordered by related email address
q = session.query(User).\\
join(User.address).\\
filter(User.name.like('%ed%')).\\
order_by(Address.email)
# given *only* User.id==5, Address.email, and 'q', what
# would the *next* User in the result be ?
subq = q.with_entities(Address.email).\\
order_by(None).\\
filter(User.id==5).\\
subquery()
q = q.join((subq, subq.c.email < Address.email)).\\
limit(1)
.. versionadded:: 0.6.5
"""
self._set_entities(entities)
@_generative()
def add_columns(self, *column):
"""Add one or more column expressions to the list
of result columns to be returned."""
self._entities = list(self._entities)
l = len(self._entities)
for c in column:
_ColumnEntity(self, c)
# _ColumnEntity may add many entities if the
# given arg is a FROM clause
self._set_entity_selectables(self._entities[l:])
@util.pending_deprecation("0.7",
":meth:`.add_column` is superseded "
"by :meth:`.add_columns`",
False)
def add_column(self, column):
"""Add a column expression to the list of result columns to be
returned.
Pending deprecation: :meth:`.add_column` will be superseded by
:meth:`.add_columns`.
"""
return self.add_columns(column)
def options(self, *args):
"""Return a new Query object, applying the given list of
mapper options.
Most supplied options regard changing how column- and
relationship-mapped attributes are loaded. See the sections
:ref:`deferred` and :doc:`/orm/loading` for reference
documentation.
"""
return self._options(False, *args)
def _conditional_options(self, *args):
return self._options(True, *args)
@_generative()
def _options(self, conditional, *args):
# most MapperOptions write to the '_attributes' dictionary,
# so copy that as well
self._attributes = self._attributes.copy()
opts = tuple(util.flatten_iterator(args))
self._with_options = self._with_options + opts
if conditional:
for opt in opts:
opt.process_query_conditionally(self)
else:
for opt in opts:
opt.process_query(self)
def with_transformation(self, fn):
"""Return a new :class:`.Query` object transformed by
the given function.
E.g.::
def filter_something(criterion):
def transform(q):
return q.filter(criterion)
return transform
q = q.with_transformation(filter_something(x==5))
This allows ad-hoc recipes to be created for :class:`.Query`
objects. See the example at :ref:`hybrid_transformers`.
.. versionadded:: 0.7.4
"""
return fn(self)
@_generative()
def with_hint(self, selectable, text, dialect_name='*'):
"""Add an indexing or other executional context
hint for the given entity or selectable to
this :class:`.Query`.
Functionality is passed straight through to
:meth:`~sqlalchemy.sql.expression.Select.with_hint`,
with the addition that ``selectable`` can be a
:class:`.Table`, :class:`.Alias`, or ORM entity / mapped class
/etc.
.. seealso::
:meth:`.Query.with_statement_hint`
"""
if selectable is not None:
selectable = inspect(selectable).selectable
self._with_hints += ((selectable, text, dialect_name),)
def with_statement_hint(self, text, dialect_name='*'):
"""add a statement hint to this :class:`.Select`.
This method is similar to :meth:`.Select.with_hint` except that
it does not require an individual table, and instead applies to the
statement as a whole.
This feature calls down into :meth:`.Select.with_statement_hint`.
.. versionadded:: 1.0.0
.. seealso::
:meth:`.Query.with_hint`
"""
return self.with_hint(None, text, dialect_name)
@_generative()
def execution_options(self, **kwargs):
""" Set non-SQL options which take effect during execution.
The options are the same as those accepted by
:meth:`.Connection.execution_options`.
Note that the ``stream_results`` execution option is enabled
automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()`
method is used.
"""
self._execution_options = self._execution_options.union(kwargs)
@_generative()
def with_lockmode(self, mode):
"""Return a new :class:`.Query` object with the specified "locking mode",
which essentially refers to the ``FOR UPDATE`` clause.
.. deprecated:: 0.9.0 superseded by :meth:`.Query.with_for_update`.
:param mode: a string representing the desired locking mode.
Valid values are:
* ``None`` - translates to no lockmode
* ``'update'`` - translates to ``FOR UPDATE``
(standard SQL, supported by most dialects)
* ``'update_nowait'`` - translates to ``FOR UPDATE NOWAIT``
(supported by Oracle, PostgreSQL 8.1 upwards)
* ``'read'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL),
and ``FOR SHARE`` (for PostgreSQL)
.. seealso::
:meth:`.Query.with_for_update` - improved API for
specifying the ``FOR UPDATE`` clause.
"""
self._for_update_arg = LockmodeArg.parse_legacy_query(mode)
@_generative()
def with_for_update(self, read=False, nowait=False, of=None):
"""return a new :class:`.Query` with the specified options for the
``FOR UPDATE`` clause.
The behavior of this method is identical to that of
:meth:`.SelectBase.with_for_update`. When called with no arguments,
the resulting ``SELECT`` statement will have a ``FOR UPDATE`` clause
appended. When additional arguments are specified, backend-specific
options such as ``FOR UPDATE NOWAIT`` or ``LOCK IN SHARE MODE``
can take effect.
E.g.::
q = sess.query(User).with_for_update(nowait=True, of=User)
The above query on a Postgresql backend will render like::
SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT
.. versionadded:: 0.9.0 :meth:`.Query.with_for_update` supersedes
the :meth:`.Query.with_lockmode` method.
.. seealso::
:meth:`.GenerativeSelect.with_for_update` - Core level method with
full argument and behavioral description.
"""
self._for_update_arg = LockmodeArg(read=read, nowait=nowait, of=of)
@_generative()
def params(self, *args, **kwargs):
"""add values for bind parameters which may have been
specified in filter().
parameters may be specified using \**kwargs, or optionally a single
dictionary as the first positional argument. The reason for both is
that \**kwargs is convenient, however some parameter dictionaries
contain unicode keys in which case \**kwargs cannot be used.
"""
if len(args) == 1:
kwargs.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary.")
self._params = self._params.copy()
self._params.update(kwargs)
@_generative(_no_statement_condition, _no_limit_offset)
def filter(self, *criterion):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using SQL expressions.
e.g.::
session.query(MyClass).filter(MyClass.name == 'some name')
Multiple criteria are joined together by AND::
session.query(MyClass).\\
filter(MyClass.name == 'some name', MyClass.id > 5)
The criterion is any SQL expression object applicable to the
WHERE clause of a select. String expressions are coerced
into SQL expression constructs via the :func:`.text` construct.
.. versionchanged:: 0.7.5
Multiple criteria joined by AND.
.. seealso::
:meth:`.Query.filter_by` - filter on keyword expressions.
"""
for criterion in list(criterion):
criterion = expression._expression_literal_as_text(criterion)
criterion = self._adapt_clause(criterion, True, True)
if self._criterion is not None:
self._criterion = self._criterion & criterion
else:
self._criterion = criterion
def filter_by(self, **kwargs):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using keyword expressions.
e.g.::
session.query(MyClass).filter_by(name = 'some name')
Multiple criteria are joined together by AND::
session.query(MyClass).\\
filter_by(name = 'some name', id = 5)
The keyword expressions are extracted from the primary
entity of the query, or the last entity that was the
target of a call to :meth:`.Query.join`.
.. seealso::
:meth:`.Query.filter` - filter on SQL expressions.
"""
clauses = [_entity_descriptor(self._joinpoint_zero(), key) == value
for key, value in kwargs.items()]
return self.filter(sql.and_(*clauses))
@_generative(_no_statement_condition, _no_limit_offset)
def order_by(self, *criterion):
"""apply one or more ORDER BY criterion to the query and return
the newly resulting ``Query``
All existing ORDER BY settings can be suppressed by
passing ``None`` - this will suppress any ORDER BY configured
on mappers as well.
Alternatively, an existing ORDER BY setting on the Query
object can be entirely cancelled by passing ``False``
as the value - use this before calling methods where
an ORDER BY is invalid.
"""
if len(criterion) == 1:
if criterion[0] is False:
if '_order_by' in self.__dict__:
del self._order_by
return
if criterion[0] is None:
self._order_by = None
return
criterion = self._adapt_col_list(criterion)
if self._order_by is False or self._order_by is None:
self._order_by = criterion
else:
self._order_by = self._order_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def group_by(self, *criterion):
"""apply one or more GROUP BY criterion to the query and return
the newly resulting :class:`.Query`"""
criterion = list(chain(*[_orm_columns(c) for c in criterion]))
criterion = self._adapt_col_list(criterion)
if self._group_by is False:
self._group_by = criterion
else:
self._group_by = self._group_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def having(self, criterion):
"""apply a HAVING criterion to the query and return the
newly resulting :class:`.Query`.
:meth:`~.Query.having` is used in conjunction with
:meth:`~.Query.group_by`.
HAVING criterion makes it possible to use filters on aggregate
functions like COUNT, SUM, AVG, MAX, and MIN, eg.::
q = session.query(User.id).\\
join(User.addresses).\\
group_by(User.id).\\
having(func.count(Address.id) > 2)
"""
criterion = expression._expression_literal_as_text(criterion)
if criterion is not None and \
not isinstance(criterion, sql.ClauseElement):
raise sa_exc.ArgumentError(
"having() argument must be of type "
"sqlalchemy.sql.ClauseElement or string")
criterion = self._adapt_clause(criterion, True, True)
if self._having is not None:
self._having = self._having & criterion
else:
self._having = criterion
def union(self, *q):
"""Produce a UNION of this Query against one or more queries.
e.g.::
q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar')
q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo')
q3 = q1.union(q2)
The method accepts multiple Query objects so as to control
the level of nesting. A series of ``union()`` calls such as::
x.union(y).union(z).all()
will nest on each ``union()``, and produces::
SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION
SELECT * FROM y) UNION SELECT * FROM Z)
Whereas::
x.union(y, z).all()
produces::
SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION
SELECT * FROM Z)
Note that many database backends do not allow ORDER BY to
be rendered on a query called within UNION, EXCEPT, etc.
To disable all ORDER BY clauses including those configured
on mappers, issue ``query.order_by(None)`` - the resulting
:class:`.Query` object will not render ORDER BY within
its SELECT statement.
"""
return self._from_selectable(
expression.union(*([self] + list(q))))
def union_all(self, *q):
"""Produce a UNION ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.union_all(*([self] + list(q)))
)
def intersect(self, *q):
"""Produce an INTERSECT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect(*([self] + list(q)))
)
def intersect_all(self, *q):
"""Produce an INTERSECT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect_all(*([self] + list(q)))
)
def except_(self, *q):
"""Produce an EXCEPT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_(*([self] + list(q)))
)
def except_all(self, *q):
"""Produce an EXCEPT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_all(*([self] + list(q)))
)
def join(self, *props, **kwargs):
"""Create a SQL JOIN against this :class:`.Query` object's criterion
and apply generatively, returning the newly resulting :class:`.Query`.
**Simple Relationship Joins**
Consider a mapping between two classes ``User`` and ``Address``,
with a relationship ``User.addresses`` representing a collection
of ``Address`` objects associated with each ``User``. The most
common usage of :meth:`~.Query.join` is to create a JOIN along this
relationship, using the ``User.addresses`` attribute as an indicator
for how this should occur::
q = session.query(User).join(User.addresses)
Where above, the call to :meth:`~.Query.join` along ``User.addresses``
will result in SQL equivalent to::
SELECT user.* FROM user JOIN address ON user.id = address.user_id
In the above example we refer to ``User.addresses`` as passed to
:meth:`~.Query.join` as the *on clause*, that is, it indicates
how the "ON" portion of the JOIN should be constructed. For a
single-entity query such as the one above (i.e. we start by selecting
only from ``User`` and nothing else), the relationship can also be
specified by its string name::
q = session.query(User).join("addresses")
:meth:`~.Query.join` can also accommodate multiple
"on clause" arguments to produce a chain of joins, such as below
where a join across four related entities is constructed::
q = session.query(User).join("orders", "items", "keywords")
The above would be shorthand for three separate calls to
:meth:`~.Query.join`, each using an explicit attribute to indicate
the source entity::
q = session.query(User).\\
join(User.orders).\\
join(Order.items).\\
join(Item.keywords)
**Joins to a Target Entity or Selectable**
A second form of :meth:`~.Query.join` allows any mapped entity
or core selectable construct as a target. In this usage,
:meth:`~.Query.join` will attempt
to create a JOIN along the natural foreign key relationship between
two entities::
q = session.query(User).join(Address)
The above calling form of :meth:`~.Query.join` will raise an error if
either there are no foreign keys between the two entities, or if
there are multiple foreign key linkages between them. In the
above calling form, :meth:`~.Query.join` is called upon to
create the "on clause" automatically for us. The target can
be any mapped entity or selectable, such as a :class:`.Table`::
q = session.query(User).join(addresses_table)
**Joins to a Target with an ON Clause**
The third calling form allows both the target entity as well
as the ON clause to be passed explicitly. Suppose for
example we wanted to join to ``Address`` twice, using
an alias the second time. We use :func:`~sqlalchemy.orm.aliased`
to create a distinct alias of ``Address``, and join
to it using the ``target, onclause`` form, so that the
alias can be specified explicitly as the target along with
the relationship to instruct how the ON clause should proceed::
a_alias = aliased(Address)
q = session.query(User).\\
join(User.addresses).\\
join(a_alias, User.addresses).\\
filter(Address.email_address=='[email protected]').\\
filter(a_alias.email_address=='[email protected]')
Where above, the generated SQL would be similar to::
SELECT user.* FROM user
JOIN address ON user.id = address.user_id
JOIN address AS address_1 ON user.id=address_1.user_id
WHERE address.email_address = :email_address_1
AND address_1.email_address = :email_address_2
The two-argument calling form of :meth:`~.Query.join`
also allows us to construct arbitrary joins with SQL-oriented
"on clause" expressions, not relying upon configured relationships
at all. Any SQL expression can be passed as the ON clause
when using the two-argument form, which should refer to the target
entity in some way as well as an applicable source entity::
q = session.query(User).join(Address, User.id==Address.user_id)
.. versionchanged:: 0.7
In SQLAlchemy 0.6 and earlier, the two argument form of
:meth:`~.Query.join` requires the usage of a tuple:
``query(User).join((Address, User.id==Address.user_id))``\ .
This calling form is accepted in 0.7 and further, though
is not necessary unless multiple join conditions are passed to
a single :meth:`~.Query.join` call, which itself is also not
generally necessary as it is now equivalent to multiple
calls (this wasn't always the case).
**Advanced Join Targeting and Adaption**
There is a lot of flexibility in what the "target" can be when using
:meth:`~.Query.join`. As noted previously, it also accepts
:class:`.Table` constructs and other selectables such as
:func:`.alias` and :func:`.select` constructs, with either the one
or two-argument forms::
addresses_q = select([Address.user_id]).\\
where(Address.email_address.endswith("@bar.com")).\\
alias()
q = session.query(User).\\
join(addresses_q, addresses_q.c.user_id==User.id)
:meth:`~.Query.join` also features the ability to *adapt* a
:meth:`~sqlalchemy.orm.relationship` -driven ON clause to the target
selectable. Below we construct a JOIN from ``User`` to a subquery
against ``Address``, allowing the relationship denoted by
``User.addresses`` to *adapt* itself to the altered target::
address_subq = session.query(Address).\\
filter(Address.email_address == '[email protected]').\\
subquery()
q = session.query(User).join(address_subq, User.addresses)
Producing SQL similar to::
SELECT user.* FROM user
JOIN (
SELECT address.id AS id,
address.user_id AS user_id,
address.email_address AS email_address
FROM address
WHERE address.email_address = :email_address_1
) AS anon_1 ON user.id = anon_1.user_id
The above form allows one to fall back onto an explicit ON
clause at any time::
q = session.query(User).\\
join(address_subq, User.id==address_subq.c.user_id)
**Controlling what to Join From**
While :meth:`~.Query.join` exclusively deals with the "right"
side of the JOIN, we can also control the "left" side, in those
cases where it's needed, using :meth:`~.Query.select_from`.
Below we construct a query against ``Address`` but can still
make usage of ``User.addresses`` as our ON clause by instructing
the :class:`.Query` to select first from the ``User``
entity::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which will produce SQL similar to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
**Constructing Aliases Anonymously**
:meth:`~.Query.join` can construct anonymous aliases
using the ``aliased=True`` flag. This feature is useful
when a query is being joined algorithmically, such as
when querying self-referentially to an arbitrary depth::
q = session.query(Node).\\
join("children", "children", aliased=True)
When ``aliased=True`` is used, the actual "alias" construct
is not explicitly available. To work with it, methods such as
:meth:`.Query.filter` will adapt the incoming entity to
the last join point::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1')
When using automatic aliasing, the ``from_joinpoint=True``
argument can allow a multi-node join to be broken into
multiple calls to :meth:`~.Query.join`, so that
each path along the way can be further filtered::
q = session.query(Node).\\
join("children", aliased=True).\\
filter(Node.name='child 1').\\
join("children", aliased=True, from_joinpoint=True).\\
filter(Node.name == 'grandchild 1')
The filtering aliases above can then be reset back to the
original ``Node`` entity using :meth:`~.Query.reset_joinpoint`::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1').\\
reset_joinpoint().\\
filter(Node.name == 'parent 1)
For an example of ``aliased=True``, see the distribution
example :ref:`examples_xmlpersistence` which illustrates
an XPath-like query system using algorithmic joins.
:param \*props: A collection of one or more join conditions,
each consisting of a relationship-bound attribute or string
relationship name representing an "on clause", or a single
target entity, or a tuple in the form of ``(target, onclause)``.
A special two-argument calling form of the form ``target, onclause``
is also accepted.
:param aliased=False: If True, indicate that the JOIN target should be
anonymously aliased. Subsequent calls to :meth:`~.Query.filter`
and similar will adapt the incoming criterion to the target
alias, until :meth:`~.Query.reset_joinpoint` is called.
:param isouter=False: If True, the join used will be a left outer join,
just as if the :meth:`.Query.outerjoin` method were called. This
flag is here to maintain consistency with the same flag as accepted
by :meth:`.FromClause.join` and other Core constructs.
.. versionadded:: 1.0.0
:param from_joinpoint=False: When using ``aliased=True``, a setting
of True here will cause the join to be from the most recent
joined target, rather than starting back from the original
FROM clauses of the query.
.. seealso::
:ref:`ormtutorial_joins` in the ORM tutorial.
:ref:`inheritance_toplevel` for details on how
:meth:`~.Query.join` is used for inheritance relationships.
:func:`.orm.join` - a standalone ORM-level join function,
used internally by :meth:`.Query.join`, which in previous
SQLAlchemy versions was the primary ORM-level joining interface.
"""
aliased, from_joinpoint, isouter = kwargs.pop('aliased', False),\
kwargs.pop('from_joinpoint', False),\
kwargs.pop('isouter', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
','.join(kwargs.keys))
isouter = isouter
return self._join(props,
outerjoin=isouter, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def outerjoin(self, *props, **kwargs):
"""Create a left outer join against this ``Query`` object's criterion
and apply generatively, returning the newly resulting ``Query``.
Usage is the same as the ``join()`` method.
"""
aliased, from_joinpoint = kwargs.pop('aliased', False), \
kwargs.pop('from_joinpoint', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
','.join(kwargs))
return self._join(props,
outerjoin=True, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def _update_joinpoint(self, jp):
self._joinpoint = jp
# copy backwards to the root of the _joinpath
# dict, so that no existing dict in the path is mutated
while 'prev' in jp:
f, prev = jp['prev']
prev = prev.copy()
prev[f] = jp
jp['prev'] = (f, prev)
jp = prev
self._joinpath = jp
@_generative(_no_statement_condition, _no_limit_offset)
def _join(self, keys, outerjoin, create_aliases, from_joinpoint):
"""consumes arguments from join() or outerjoin(), places them into a
consistent format with which to form the actual JOIN constructs.
"""
if not from_joinpoint:
self._reset_joinpoint()
if len(keys) == 2 and \
isinstance(keys[0], (expression.FromClause,
type, AliasedClass)) and \
isinstance(keys[1], (str, expression.ClauseElement,
interfaces.PropComparator)):
# detect 2-arg form of join and
# convert to a tuple.
keys = (keys,)
for arg1 in util.to_list(keys):
if isinstance(arg1, tuple):
# "tuple" form of join, multiple
# tuples are accepted as well. The simpler
# "2-arg" form is preferred. May deprecate
# the "tuple" usage.
arg1, arg2 = arg1
else:
arg2 = None
# determine onclause/right_entity. there
# is a little bit of legacy behavior still at work here
# which means they might be in either order. may possibly
# lock this down to (right_entity, onclause) in 0.6.
if isinstance(
arg1, (interfaces.PropComparator, util.string_types)):
right_entity, onclause = arg2, arg1
else:
right_entity, onclause = arg1, arg2
left_entity = prop = None
if isinstance(onclause, interfaces.PropComparator):
of_type = getattr(onclause, '_of_type', None)
else:
of_type = None
if isinstance(onclause, util.string_types):
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity, onclause)
onclause = descriptor
# check for q.join(Class.propname, from_joinpoint=True)
# and Class is that of the current joinpoint
elif from_joinpoint and \
isinstance(onclause, interfaces.PropComparator):
left_entity = onclause._parententity
info = inspect(self._joinpoint_zero())
left_mapper, left_selectable, left_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', None)
if left_mapper is left_entity:
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity,
onclause.key)
onclause = descriptor
if isinstance(onclause, interfaces.PropComparator):
if right_entity is None:
if of_type:
right_entity = of_type
else:
right_entity = onclause.property.mapper
left_entity = onclause._parententity
prop = onclause.property
if not isinstance(onclause, attributes.QueryableAttribute):
onclause = prop
if not create_aliases:
# check for this path already present.
# don't render in that case.
edge = (left_entity, right_entity, prop.key)
if edge in self._joinpoint:
# The child's prev reference might be stale --
# it could point to a parent older than the
# current joinpoint. If this is the case,
# then we need to update it and then fix the
# tree's spine with _update_joinpoint. Copy
# and then mutate the child, which might be
# shared by a different query object.
jp = self._joinpoint[edge].copy()
jp['prev'] = (edge, self._joinpoint)
self._update_joinpoint(jp)
continue
elif onclause is not None and right_entity is None:
# TODO: no coverage here
raise NotImplementedError("query.join(a==b) not supported.")
self._join_left_to_right(
left_entity,
right_entity, onclause,
outerjoin, create_aliases, prop)
def _join_left_to_right(self, left, right,
onclause, outerjoin, create_aliases, prop):
"""append a JOIN to the query's from clause."""
self._polymorphic_adapters = self._polymorphic_adapters.copy()
if left is None:
if self._from_obj:
left = self._from_obj[0]
elif self._entities:
left = self._entities[0].entity_zero_or_selectable
if left is None:
raise sa_exc.InvalidRequestError(
"Don't know how to join from %s; please use "
"select_from() to establish the left "
"entity/selectable of this join" % self._entities[0])
if left is right and \
not create_aliases:
raise sa_exc.InvalidRequestError(
"Can't construct a join from %s to %s, they "
"are the same entity" %
(left, right))
l_info = inspect(left)
r_info = inspect(right)
overlap = False
if not create_aliases:
right_mapper = getattr(r_info, "mapper", None)
# if the target is a joined inheritance mapping,
# be more liberal about auto-aliasing.
if right_mapper and (
right_mapper.with_polymorphic or
isinstance(right_mapper.mapped_table, expression.Join)
):
for from_obj in self._from_obj or [l_info.selectable]:
if sql_util.selectables_overlap(
l_info.selectable, from_obj) and \
sql_util.selectables_overlap(
from_obj, r_info.selectable):
overlap = True
break
if (overlap or not create_aliases) and \
l_info.selectable is r_info.selectable:
raise sa_exc.InvalidRequestError(
"Can't join table/selectable '%s' to itself" %
l_info.selectable)
right, onclause = self._prepare_right_side(
r_info, right, onclause,
create_aliases,
prop, overlap)
# if joining on a MapperProperty path,
# track the path to prevent redundant joins
if not create_aliases and prop:
self._update_joinpoint({
'_joinpoint_entity': right,
'prev': ((left, right, prop.key), self._joinpoint)
})
else:
self._joinpoint = {'_joinpoint_entity': right}
self._join_to_left(l_info, left, right, onclause, outerjoin)
def _prepare_right_side(self, r_info, right, onclause, create_aliases,
prop, overlap):
info = r_info
right_mapper, right_selectable, right_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', False)
if right_mapper:
self._join_entities += (info, )
if right_mapper and prop and \
not right_mapper.common_parent(prop.mapper):
raise sa_exc.InvalidRequestError(
"Join target %s does not correspond to "
"the right side of join condition %s" % (right, onclause)
)
if not right_mapper and prop:
right_mapper = prop.mapper
need_adapter = False
if right_mapper and right is right_selectable:
if not right_selectable.is_derived_from(
right_mapper.mapped_table):
raise sa_exc.InvalidRequestError(
"Selectable '%s' is not derived from '%s'" %
(right_selectable.description,
right_mapper.mapped_table.description))
if isinstance(right_selectable, expression.SelectBase):
# TODO: this isn't even covered now!
right_selectable = right_selectable.alias()
need_adapter = True
right = aliased(right_mapper, right_selectable)
aliased_entity = right_mapper and \
not right_is_aliased and \
(
right_mapper.with_polymorphic and isinstance(
right_mapper._with_polymorphic_selectable,
expression.Alias)
or
overlap # test for overlap:
# orm/inheritance/relationships.py
# SelfReferentialM2MTest
)
if not need_adapter and (create_aliases or aliased_entity):
right = aliased(right, flat=True)
need_adapter = True
# if an alias() of the right side was generated here,
# apply an adapter to all subsequent filter() calls
# until reset_joinpoint() is called.
if need_adapter:
self._filter_aliases = ORMAdapter(
right,
equivalents=right_mapper and
right_mapper._equivalent_columns or {},
chain_to=self._filter_aliases)
# if the onclause is a ClauseElement, adapt it with any
# adapters that are in place right now
if isinstance(onclause, expression.ClauseElement):
onclause = self._adapt_clause(onclause, True, True)
# if an alias() on the right side was generated,
# which is intended to wrap a the right side in a subquery,
# ensure that columns retrieved from this target in the result
# set are also adapted.
if aliased_entity and not create_aliases:
self._mapper_loads_polymorphically_with(
right_mapper,
ORMAdapter(
right,
equivalents=right_mapper._equivalent_columns
)
)
return right, onclause
def _join_to_left(self, l_info, left, right, onclause, outerjoin):
info = l_info
left_mapper = getattr(info, 'mapper', None)
left_selectable = info.selectable
if self._from_obj:
replace_clause_index, clause = sql_util.find_join_source(
self._from_obj,
left_selectable)
if clause is not None:
try:
clause = orm_join(clause,
right,
onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = \
self._from_obj[:replace_clause_index] + \
(clause, ) + \
self._from_obj[replace_clause_index + 1:]
return
if left_mapper:
for ent in self._entities:
if ent.corresponds_to(left):
clause = ent.selectable
break
else:
clause = left
else:
clause = left_selectable
assert clause is not None
try:
clause = orm_join(clause, right, onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = self._from_obj + (clause,)
def _reset_joinpoint(self):
self._joinpoint = self._joinpath
self._filter_aliases = None
@_generative(_no_statement_condition)
def reset_joinpoint(self):
"""Return a new :class:`.Query`, where the "join point" has
been reset back to the base FROM entities of the query.
This method is usually used in conjunction with the
``aliased=True`` feature of the :meth:`~.Query.join`
method. See the example in :meth:`~.Query.join` for how
this is used.
"""
self._reset_joinpoint()
@_generative(_no_clauseelement_condition)
def select_from(self, *from_obj):
"""Set the FROM clause of this :class:`.Query` explicitly.
:meth:`.Query.select_from` is often used in conjunction with
:meth:`.Query.join` in order to control which entity is selected
from on the "left" side of the join.
The entity or selectable object here effectively replaces the
"left edge" of any calls to :meth:`~.Query.join`, when no
joinpoint is otherwise established - usually, the default "join
point" is the leftmost entity in the :class:`~.Query` object's
list of entities to be selected.
A typical example::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which produces SQL equivalent to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
:param \*from_obj: collection of one or more entities to apply
to the FROM clause. Entities can be mapped classes,
:class:`.AliasedClass` objects, :class:`.Mapper` objects
as well as core :class:`.FromClause` elements like subqueries.
.. versionchanged:: 0.9
This method no longer applies the given FROM object
to be the selectable from which matching entities
select from; the :meth:`.select_entity_from` method
now accomplishes this. See that method for a description
of this behavior.
.. seealso::
:meth:`~.Query.join`
:meth:`.Query.select_entity_from`
"""
self._set_select_from(from_obj, False)
@_generative(_no_clauseelement_condition)
def select_entity_from(self, from_obj):
"""Set the FROM clause of this :class:`.Query` to a
core selectable, applying it as a replacement FROM clause
for corresponding mapped entities.
This method is similar to the :meth:`.Query.select_from`
method, in that it sets the FROM clause of the query. However,
where :meth:`.Query.select_from` only affects what is placed
in the FROM, this method also applies the given selectable
to replace the FROM which the selected entities would normally
select from.
The given ``from_obj`` must be an instance of a :class:`.FromClause`,
e.g. a :func:`.select` or :class:`.Alias` construct.
An example would be a :class:`.Query` that selects ``User`` entities,
but uses :meth:`.Query.select_entity_from` to have the entities
selected from a :func:`.select` construct instead of the
base ``user`` table::
select_stmt = select([User]).where(User.id == 7)
q = session.query(User).\\
select_entity_from(select_stmt).\\
filter(User.name == 'ed')
The query generated will select ``User`` entities directly
from the given :func:`.select` construct, and will be::
SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name
FROM (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE anon_1.name = :name_1
Notice above that even the WHERE criterion was "adapted" such that
the ``anon_1`` subquery effectively replaces all references to the
``user`` table, except for the one that it refers to internally.
Compare this to :meth:`.Query.select_from`, which as of
version 0.9, does not affect existing entities. The
statement below::
q = session.query(User).\\
select_from(select_stmt).\\
filter(User.name == 'ed')
Produces SQL where both the ``user`` table as well as the
``select_stmt`` construct are present as separate elements
in the FROM clause. No "adaptation" of the ``user`` table
is applied::
SELECT "user".id AS user_id, "user".name AS user_name
FROM "user", (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE "user".name = :name_1
:meth:`.Query.select_entity_from` maintains an older
behavior of :meth:`.Query.select_from`. In modern usage,
similar results can also be achieved using :func:`.aliased`::
select_stmt = select([User]).where(User.id == 7)
user_from_select = aliased(User, select_stmt.alias())
q = session.query(user_from_select)
:param from_obj: a :class:`.FromClause` object that will replace
the FROM clause of this :class:`.Query`.
.. seealso::
:meth:`.Query.select_from`
.. versionadded:: 0.8
:meth:`.Query.select_entity_from` was added to specify
the specific behavior of entity replacement, however
the :meth:`.Query.select_from` maintains this behavior
as well until 0.9.
"""
self._set_select_from([from_obj], True)
def __getitem__(self, item):
if isinstance(item, slice):
start, stop, step = util.decode_slice(item)
if isinstance(stop, int) and \
isinstance(start, int) and \
stop - start <= 0:
return []
# perhaps we should execute a count() here so that we
# can still use LIMIT/OFFSET ?
elif (isinstance(start, int) and start < 0) \
or (isinstance(stop, int) and stop < 0):
return list(self)[item]
res = self.slice(start, stop)
if step is not None:
return list(res)[None:None:item.step]
else:
return list(res)
else:
if item == -1:
return list(self)[-1]
else:
return list(self[item:item + 1])[0]
@_generative(_no_statement_condition)
def slice(self, start, stop):
"""apply LIMIT/OFFSET to the ``Query`` based on a "
"range and return the newly resulting ``Query``."""
if start is not None and stop is not None:
self._offset = (self._offset or 0) + start
self._limit = stop - start
elif start is None and stop is not None:
self._limit = stop
elif start is not None and stop is None:
self._offset = (self._offset or 0) + start
if self._offset == 0:
self._offset = None
@_generative(_no_statement_condition)
def limit(self, limit):
"""Apply a ``LIMIT`` to the query and return the newly resulting
``Query``.
"""
self._limit = limit
@_generative(_no_statement_condition)
def offset(self, offset):
"""Apply an ``OFFSET`` to the query and return the newly resulting
``Query``.
"""
self._offset = offset
@_generative(_no_statement_condition)
def distinct(self, *criterion):
"""Apply a ``DISTINCT`` to the query and return the newly resulting
``Query``.
:param \*expr: optional column expressions. When present,
the Postgresql dialect will render a ``DISTINCT ON (<expressions>>)``
construct.
"""
if not criterion:
self._distinct = True
else:
criterion = self._adapt_col_list(criterion)
if isinstance(self._distinct, list):
self._distinct += criterion
else:
self._distinct = criterion
@_generative()
def prefix_with(self, *prefixes):
"""Apply the prefixes to the query and return the newly resulting
``Query``.
:param \*prefixes: optional prefixes, typically strings,
not using any commas. In particular is useful for MySQL keywords.
e.g.::
query = sess.query(User.name).\\
prefix_with('HIGH_PRIORITY').\\
prefix_with('SQL_SMALL_RESULT', 'ALL')
Would render::
SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL users.name AS users_name
FROM users
.. versionadded:: 0.7.7
"""
if self._prefixes:
self._prefixes += prefixes
else:
self._prefixes = prefixes
def all(self):
"""Return the results represented by this ``Query`` as a list.
This results in an execution of the underlying query.
"""
return list(self)
@_generative(_no_clauseelement_condition)
def from_statement(self, statement):
"""Execute the given SELECT statement and return results.
This method bypasses all internal statement compilation, and the
statement is executed without modification.
The statement is typically either a :func:`~.expression.text`
or :func:`~.expression.select` construct, and should return the set
of columns
appropriate to the entity class represented by this :class:`.Query`.
.. seealso::
:ref:`orm_tutorial_literal_sql` - usage examples in the
ORM tutorial
"""
statement = expression._expression_literal_as_text(statement)
if not isinstance(statement,
(expression.TextClause,
expression.SelectBase)):
raise sa_exc.ArgumentError(
"from_statement accepts text(), select(), "
"and union() objects only.")
self._statement = statement
def first(self):
"""Return the first result of this ``Query`` or
None if the result doesn't contain any row.
first() applies a limit of one within the generated SQL, so that
only one primary entity row is generated on the server side
(note this may consist of multiple result rows if join-loaded
collections are present).
Calling ``first()`` results in an execution of the underlying query.
"""
if self._statement is not None:
ret = list(self)[0:1]
else:
ret = list(self[0:1])
if len(ret) > 0:
return ret[0]
else:
return None
def one(self):
"""Return exactly one result or raise an exception.
Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects
no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
if multiple object identities are returned, or if multiple
rows are returned for a query that does not return object
identities.
Note that an entity query, that is, one which selects one or
more mapped classes as opposed to individual column attributes,
may ultimately represent many rows but only one row of
unique entity or entities - this is a successful result for one().
Calling ``one()`` results in an execution of the underlying query.
.. versionchanged:: 0.6
``one()`` fully fetches all results instead of applying
any kind of limit, so that the "unique"-ing of entities does not
conceal multiple object identities.
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
raise orm_exc.NoResultFound("No row was found for one()")
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one()")
def scalar(self):
"""Return the first element of the first result or None
if no rows present. If multiple rows are returned,
raises MultipleResultsFound.
>>> session.query(Item).scalar()
<Item>
>>> session.query(Item.id).scalar()
1
>>> session.query(Item.id).filter(Item.id < 0).scalar()
None
>>> session.query(Item.id, Item.name).scalar()
1
>>> session.query(func.count(Parent.id)).scalar()
20
This results in an execution of the underlying query.
"""
try:
ret = self.one()
if not isinstance(ret, tuple):
return ret
return ret[0]
except orm_exc.NoResultFound:
return None
def __iter__(self):
context = self._compile_context()
context.statement.use_labels = True
if self._autoflush and not self._populate_existing:
self.session._autoflush()
return self._execute_and_instances(context)
def _connection_from_session(self, **kw):
conn = self.session.connection(
**kw)
if self._execution_options:
conn = conn.execution_options(**self._execution_options)
return conn
def _execute_and_instances(self, querycontext):
conn = self._connection_from_session(
mapper=self._mapper_zero_or_none(),
clause=querycontext.statement,
close_with_result=True)
result = conn.execute(querycontext.statement, self._params)
return loading.instances(self, result, querycontext)
@property
def column_descriptions(self):
"""Return metadata about the columns which would be
returned by this :class:`.Query`.
Format is a list of dictionaries::
user_alias = aliased(User, name='user2')
q = sess.query(User, User.id, user_alias)
# this expression:
q.column_descriptions
# would return:
[
{
'name':'User',
'type':User,
'aliased':False,
'expr':User,
},
{
'name':'id',
'type':Integer(),
'aliased':False,
'expr':User.id,
},
{
'name':'user2',
'type':User,
'aliased':True,
'expr':user_alias
}
]
"""
return [
{
'name': ent._label_name,
'type': ent.type,
'aliased': getattr(ent, 'is_aliased_class', False),
'expr': ent.expr
}
for ent in self._entities
]
def instances(self, cursor, __context=None):
"""Given a ResultProxy cursor as returned by connection.execute(),
return an ORM result as an iterator.
e.g.::
result = engine.execute("select * from users")
for u in session.query(User).instances(result):
print u
"""
context = __context
if context is None:
context = QueryContext(self)
return loading.instances(self, cursor, context)
def merge_result(self, iterator, load=True):
"""Merge a result into this :class:`.Query` object's Session.
Given an iterator returned by a :class:`.Query` of the same structure
as this one, return an identical iterator of results, with all mapped
instances merged into the session using :meth:`.Session.merge`. This
is an optimized method which will merge all mapped instances,
preserving the structure of the result rows and unmapped columns with
less method overhead than that of calling :meth:`.Session.merge`
explicitly for each value.
The structure of the results is determined based on the column list of
this :class:`.Query` - if these do not correspond, unchecked errors
will occur.
The 'load' argument is the same as that of :meth:`.Session.merge`.
For an example of how :meth:`~.Query.merge_result` is used, see
the source code for the example :ref:`examples_caching`, where
:meth:`~.Query.merge_result` is used to efficiently restore state
from a cache back into a target :class:`.Session`.
"""
return loading.merge_result(self, iterator, load)
@property
def _select_args(self):
return {
'limit': self._limit,
'offset': self._offset,
'distinct': self._distinct,
'prefixes': self._prefixes,
'group_by': self._group_by or None,
'having': self._having
}
@property
def _should_nest_selectable(self):
kwargs = self._select_args
return (kwargs.get('limit') is not None or
kwargs.get('offset') is not None or
kwargs.get('distinct', False))
def exists(self):
"""A convenience method that turns a query into an EXISTS subquery
of the form EXISTS (SELECT 1 FROM ... WHERE ...).
e.g.::
q = session.query(User).filter(User.name == 'fred')
session.query(q.exists())
Producing SQL similar to::
SELECT EXISTS (
SELECT 1 FROM users WHERE users.name = :name_1
) AS anon_1
The EXISTS construct is usually used in the WHERE clause::
session.query(User.id).filter(q.exists()).scalar()
Note that some databases such as SQL Server don't allow an
EXISTS expression to be present in the columns clause of a
SELECT. To select a simple boolean value based on the exists
as a WHERE, use :func:`.literal`::
from sqlalchemy import literal
session.query(literal(True)).filter(q.exists()).scalar()
.. versionadded:: 0.8.1
"""
# .add_columns() for the case that we are a query().select_from(X),
# so that ".statement" can be produced (#2995) but also without
# omitting the FROM clause from a query(X) (#2818);
# .with_only_columns() after we have a core select() so that
# we get just "SELECT 1" without any entities.
return sql.exists(self.add_columns('1').with_labels().
statement.with_only_columns([1]))
def count(self):
"""Return a count of rows this Query would return.
This generates the SQL for this Query as follows::
SELECT count(1) AS count_1 FROM (
SELECT <rest of query follows...>
) AS anon_1
.. versionchanged:: 0.7
The above scheme is newly refined as of 0.7b3.
For fine grained control over specific columns
to count, to skip the usage of a subquery or
otherwise control of the FROM clause,
or to use other aggregate functions,
use :attr:`~sqlalchemy.sql.expression.func`
expressions in conjunction
with :meth:`~.Session.query`, i.e.::
from sqlalchemy import func
# count User records, without
# using a subquery.
session.query(func.count(User.id))
# return count of user "id" grouped
# by "name"
session.query(func.count(User.id)).\\
group_by(User.name)
from sqlalchemy import distinct
# count distinct "name" values
session.query(func.count(distinct(User.name)))
"""
col = sql.func.count(sql.literal_column('*'))
return self.from_self(col).scalar()
def delete(self, synchronize_session='evaluate'):
"""Perform a bulk delete query.
Deletes rows matched by this query from the database.
:param synchronize_session: chooses the strategy for the removal of
matched objects from the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, objects may still remain in
the session which were in fact deleted which can lead to confusing
results if they are accessed via get() or already loaded
collections.
``'fetch'`` - performs a select query before the delete to find
objects that are matched by the delete query and need to be
removed from the session. Matched objects are removed from the
session.
``'evaluate'`` - Evaluate the query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an error is raised. In that case you probably
want to use the 'fetch' strategy as a fallback.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
This method has several key caveats:
* The method does **not** offer in-Python cascading of relationships
- it is assumed that ON DELETE CASCADE/SET NULL/etc. is configured
for any foreign key references which require it, otherwise the
database may emit an integrity violation if foreign key references
are being enforced.
After the DELETE, dependent objects in the :class:`.Session` which
were impacted by an ON DELETE may not contain the current
state, or may have been deleted. This issue is resolved once the
:class:`.Session` is expired,
which normally occurs upon :meth:`.Session.commit` or can be forced
by using :meth:`.Session.expire_all`. Accessing an expired object
whose row has been deleted will invoke a SELECT to locate the
row; when the row is not found, an
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
* The :meth:`.MapperEvents.before_delete` and
:meth:`.MapperEvents.after_delete`
events are **not** invoked from this method. Instead, the
:meth:`.SessionEvents.after_bulk_delete` method is provided to act
upon a mass DELETE of entity rows.
.. seealso::
:meth:`.Query.update`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
# TODO: cascades need handling.
delete_op = persistence.BulkDelete.factory(
self, synchronize_session)
delete_op.exec_()
return delete_op.rowcount
def update(self, values, synchronize_session='evaluate'):
"""Perform a bulk update query.
Updates rows matched by this query in the database.
E.g.::
sess.query(User).filter(User.age == 25).\
update({User.age: User.age - 10}, synchronize_session='fetch')
sess.query(User).filter(User.age == 25).\
update({"age": User.age - 10}, synchronize_session='evaluate')
:param values: a dictionary with attributes names, or alternatively
mapped attributes or SQL expressions, as keys, and literal
values or sql expressions as values.
.. versionchanged:: 1.0.0 - string names in the values dictionary
are now resolved against the mapped entity; previously, these
strings were passed as literal column names with no mapper-level
translation.
:param synchronize_session: chooses the strategy to update the
attributes on objects in the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, updated objects may still
remain in the session with stale values on their attributes, which
can lead to confusing results.
``'fetch'`` - performs a select query before the update to find
objects that are matched by the update query. The updated
attributes are expired on matched objects.
``'evaluate'`` - Evaluate the Query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an exception is raised.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
This method has several key caveats:
* The method does **not** offer in-Python cascading of relationships
- it is assumed that ON UPDATE CASCADE is configured for any foreign
key references which require it, otherwise the database may emit an
integrity violation if foreign key references are being enforced.
After the UPDATE, dependent objects in the :class:`.Session` which
were impacted by an ON UPDATE CASCADE may not contain the current
state; this issue is resolved once the :class:`.Session` is expired,
which normally occurs upon :meth:`.Session.commit` or can be forced
by using :meth:`.Session.expire_all`.
* The method supports multiple table updates, as
detailed in :ref:`multi_table_updates`, and this behavior does
extend to support updates of joined-inheritance and other multiple
table mappings. However, the **join condition of an inheritance
mapper is currently not automatically rendered**.
Care must be taken in any multiple-table update to explicitly
include the joining condition between those tables, even in mappings
where this is normally automatic.
E.g. if a class ``Engineer`` subclasses ``Employee``, an UPDATE of
the ``Engineer`` local table using criteria against the ``Employee``
local table might look like::
session.query(Engineer).\\
filter(Engineer.id == Employee.id).\\
filter(Employee.name == 'dilbert').\\
update({"engineer_type": "programmer"})
* The :meth:`.MapperEvents.before_update` and
:meth:`.MapperEvents.after_update`
events are **not** invoked from this method. Instead, the
:meth:`.SessionEvents.after_bulk_update` method is provided to act
upon a mass UPDATE of entity rows.
.. seealso::
:meth:`.Query.delete`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
update_op = persistence.BulkUpdate.factory(
self, synchronize_session, values)
update_op.exec_()
return update_op.rowcount
def _compile_context(self, labels=True):
context = QueryContext(self)
if context.statement is not None:
return context
context.labels = labels
context._for_update_arg = self._for_update_arg
for entity in self._entities:
entity.setup_context(self, context)
for rec in context.create_eager_joins:
strategy = rec[0]
strategy(*rec[1:])
if context.from_clause:
# "load from explicit FROMs" mode,
# i.e. when select_from() or join() is used
context.froms = list(context.from_clause)
else:
# "load from discrete FROMs" mode,
# i.e. when each _MappedEntity has its own FROM
context.froms = context.froms
if self._enable_single_crit:
self._adjust_for_single_inheritance(context)
if not context.primary_columns:
if self._only_load_props:
raise sa_exc.InvalidRequestError(
"No column-based properties specified for "
"refresh operation. Use session.expire() "
"to reload collections and related items.")
else:
raise sa_exc.InvalidRequestError(
"Query contains no columns with which to "
"SELECT from.")
if context.multi_row_eager_loaders and self._should_nest_selectable:
context.statement = self._compound_eager_statement(context)
else:
context.statement = self._simple_statement(context)
return context
def _compound_eager_statement(self, context):
# for eager joins present and LIMIT/OFFSET/DISTINCT,
# wrap the query inside a select,
# then append eager joins onto that
if context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
else:
context.order_by = None
order_by_col_expr = []
inner = sql.select(
context.primary_columns + order_by_col_expr,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
# TODO: this order_by is only needed if
# LIMIT/OFFSET is present in self._select_args,
# else the application on the outside is enough
order_by=context.order_by,
**self._select_args
)
for hint in self._with_hints:
inner = inner.with_hint(*hint)
if self._correlate:
inner = inner.correlate(*self._correlate)
inner = inner.alias()
equivs = self.__all_equivs()
context.adapter = sql_util.ColumnAdapter(inner, equivs)
statement = sql.select(
[inner] + context.secondary_columns,
use_labels=context.labels)
statement._for_update_arg = context._for_update_arg
from_clause = inner
for eager_join in context.eager_joins.values():
# EagerLoader places a 'stop_on' attribute on the join,
# giving us a marker as to where the "splice point" of
# the join should be
from_clause = sql_util.splice_joins(
from_clause,
eager_join, eager_join.stop_on)
statement.append_from(from_clause)
if context.order_by:
statement.append_order_by(
*context.adapter.copy_and_process(
context.order_by
)
)
statement.append_order_by(*context.eager_order_by)
return statement
def _simple_statement(self, context):
if not context.order_by:
context.order_by = None
if self._distinct and context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
context.primary_columns += order_by_col_expr
context.froms += tuple(context.eager_joins.values())
statement = sql.select(
context.primary_columns +
context.secondary_columns,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
order_by=context.order_by,
**self._select_args
)
statement._for_update_arg = context._for_update_arg
for hint in self._with_hints:
statement = statement.with_hint(*hint)
if self._correlate:
statement = statement.correlate(*self._correlate)
if context.eager_order_by:
statement.append_order_by(*context.eager_order_by)
return statement
def _adjust_for_single_inheritance(self, context):
"""Apply single-table-inheritance filtering.
For all distinct single-table-inheritance mappers represented in
the columns clause of this query, add criterion to the WHERE
clause of the given QueryContext such that only the appropriate
subtypes are selected from the total results.
"""
for (ext_info, adapter) in set(self._mapper_adapter_map.values()):
if ext_info in self._join_entities:
continue
single_crit = ext_info.mapper._single_table_criterion
if single_crit is not None:
if adapter:
single_crit = adapter.traverse(single_crit)
single_crit = self._adapt_clause(single_crit, False, False)
context.whereclause = sql.and_(
sql.True_._ifnone(context.whereclause),
single_crit)
def __str__(self):
return str(self._compile_context().statement)
from ..sql.selectable import ForUpdateArg
class LockmodeArg(ForUpdateArg):
@classmethod
def parse_legacy_query(self, mode):
if mode in (None, False):
return None
if mode == "read":
read = True
nowait = False
elif mode == "update":
read = nowait = False
elif mode == "update_nowait":
nowait = True
read = False
else:
raise sa_exc.ArgumentError(
"Unknown with_lockmode argument: %r" % mode)
return LockmodeArg(read=read, nowait=nowait)
class _QueryEntity(object):
"""represent an entity column returned within a Query result."""
def __new__(cls, *args, **kwargs):
if cls is _QueryEntity:
entity = args[1]
if not isinstance(entity, util.string_types) and \
_is_mapped_class(entity):
cls = _MapperEntity
elif isinstance(entity, Bundle):
cls = _BundleEntity
else:
cls = _ColumnEntity
return object.__new__(cls)
def _clone(self):
q = self.__class__.__new__(self.__class__)
q.__dict__ = self.__dict__.copy()
return q
class _MapperEntity(_QueryEntity):
"""mapper/class/AliasedClass entity"""
def __init__(self, query, entity):
if not query._primary_entity:
query._primary_entity = self
query._entities.append(self)
self.entities = [entity]
self.expr = entity
supports_single_entity = True
def setup_entity(self, ext_info, aliased_adapter):
self.mapper = ext_info.mapper
self.aliased_adapter = aliased_adapter
self.selectable = ext_info.selectable
self.is_aliased_class = ext_info.is_aliased_class
self._with_polymorphic = ext_info.with_polymorphic_mappers
self._polymorphic_discriminator = \
ext_info.polymorphic_on
self.entity_zero = ext_info
if ext_info.is_aliased_class:
self._label_name = self.entity_zero.name
else:
self._label_name = self.mapper.class_.__name__
self.path = self.entity_zero._path_registry
def set_with_polymorphic(self, query, cls_or_mappers,
selectable, polymorphic_on):
"""Receive an update from a call to query.with_polymorphic().
Note the newer style of using a free standing with_polymporphic()
construct doesn't make use of this method.
"""
if self.is_aliased_class:
# TODO: invalidrequest ?
raise NotImplementedError(
"Can't use with_polymorphic() against "
"an Aliased object"
)
if cls_or_mappers is None:
query._reset_polymorphic_adapter(self.mapper)
return
mappers, from_obj = self.mapper._with_polymorphic_args(
cls_or_mappers, selectable)
self._with_polymorphic = mappers
self._polymorphic_discriminator = polymorphic_on
self.selectable = from_obj
query._mapper_loads_polymorphically_with(
self.mapper, sql_util.ColumnAdapter(
from_obj, self.mapper._equivalent_columns))
filter_fn = id
@property
def type(self):
return self.mapper.class_
@property
def entity_zero_or_selectable(self):
return self.entity_zero
def corresponds_to(self, entity):
if entity.is_aliased_class:
if self.is_aliased_class:
if entity._base_alias is self.entity_zero._base_alias:
return True
return False
elif self.is_aliased_class:
if self.entity_zero._use_mapper_path:
return entity in self._with_polymorphic
else:
return entity is self.entity_zero
return entity.common_parent(self.entity_zero)
def adapt_to_selectable(self, query, sel):
query._entities.append(self)
def _get_entity_clauses(self, query, context):
adapter = None
if not self.is_aliased_class:
if query._polymorphic_adapters:
adapter = query._polymorphic_adapters.get(self.mapper, None)
else:
adapter = self.aliased_adapter
if adapter:
if query._from_obj_alias:
ret = adapter.wrap(query._from_obj_alias)
else:
ret = adapter
else:
ret = query._from_obj_alias
return ret
def row_processor(self, query, context, result):
adapter = self._get_entity_clauses(query, context)
if context.adapter and adapter:
adapter = adapter.wrap(context.adapter)
elif not adapter:
adapter = context.adapter
# polymorphic mappers which have concrete tables in
# their hierarchy usually
# require row aliasing unconditionally.
if not adapter and self.mapper._requires_row_aliasing:
adapter = sql_util.ColumnAdapter(
self.selectable,
self.mapper._equivalent_columns)
if query._primary_entity is self:
_instance = loading.instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
only_load_props=query._only_load_props,
refresh_state=context.refresh_state,
polymorphic_discriminator=self._polymorphic_discriminator
)
else:
_instance = loading.instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
polymorphic_discriminator=self._polymorphic_discriminator
)
return _instance, self._label_name
def setup_context(self, query, context):
adapter = self._get_entity_clauses(query, context)
# if self._adapted_selectable is None:
context.froms += (self.selectable,)
if context.order_by is False and self.mapper.order_by:
context.order_by = self.mapper.order_by
# apply adaptation to the mapper's order_by if needed.
if adapter:
context.order_by = adapter.adapt_list(
util.to_list(
context.order_by
)
)
if self._with_polymorphic:
poly_properties = self.mapper._iterate_polymorphic_properties(
self._with_polymorphic)
else:
poly_properties = self.mapper._polymorphic_properties
for value in poly_properties:
if query._only_load_props and \
value.key not in query._only_load_props:
continue
value.setup(
context,
self,
self.path,
adapter,
only_load_props=query._only_load_props,
column_collection=context.primary_columns
)
if self._polymorphic_discriminator is not None and \
self._polymorphic_discriminator \
is not self.mapper.polymorphic_on:
if adapter:
pd = adapter.columns[self._polymorphic_discriminator]
else:
pd = self._polymorphic_discriminator
context.primary_columns.append(pd)
def __str__(self):
return str(self.mapper)
@inspection._self_inspects
class Bundle(object):
"""A grouping of SQL expressions that are returned by a :class:`.Query`
under one namespace.
The :class:`.Bundle` essentially allows nesting of the tuple-based
results returned by a column-oriented :class:`.Query` object. It also
is extensible via simple subclassing, where the primary capability
to override is that of how the set of expressions should be returned,
allowing post-processing as well as custom return types, without
involving ORM identity-mapped classes.
.. versionadded:: 0.9.0
.. seealso::
:ref:`bundles`
"""
single_entity = False
"""If True, queries for a single Bundle will be returned as a single
entity, rather than an element within a keyed tuple."""
def __init__(self, name, *exprs, **kw):
"""Construct a new :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
for row in session.query(bn).filter(
bn.c.x == 5).filter(bn.c.y == 4):
print(row.mybundle.x, row.mybundle.y)
:param name: name of the bundle.
:param \*exprs: columns or SQL expressions comprising the bundle.
:param single_entity=False: if True, rows for this :class:`.Bundle`
can be returned as a "single entity" outside of any enclosing tuple
in the same manner as a mapped entity.
"""
self.name = self._label = name
self.exprs = exprs
self.c = self.columns = ColumnCollection()
self.columns.update((getattr(col, "key", col._label), col)
for col in exprs)
self.single_entity = kw.pop('single_entity', self.single_entity)
columns = None
"""A namespace of SQL expressions referred to by this :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
q = sess.query(bn).filter(bn.c.x == 5)
Nesting of bundles is also supported::
b1 = Bundle("b1",
Bundle('b2', MyClass.a, MyClass.b),
Bundle('b3', MyClass.x, MyClass.y)
)
q = sess.query(b1).filter(
b1.c.b2.c.a == 5).filter(b1.c.b3.c.y == 9)
.. seealso::
:attr:`.Bundle.c`
"""
c = None
"""An alias for :attr:`.Bundle.columns`."""
def _clone(self):
cloned = self.__class__.__new__(self.__class__)
cloned.__dict__.update(self.__dict__)
return cloned
def __clause_element__(self):
return expression.ClauseList(group=False, *self.c)
@property
def clauses(self):
return self.__clause_element__().clauses
def label(self, name):
"""Provide a copy of this :class:`.Bundle` passing a new label."""
cloned = self._clone()
cloned.name = name
return cloned
def create_row_processor(self, query, procs, labels):
"""Produce the "row processing" function for this :class:`.Bundle`.
May be overridden by subclasses.
.. seealso::
:ref:`bundles` - includes an example of subclassing.
"""
keyed_tuple = util.lightweight_named_tuple('result', labels)
def proc(row):
return keyed_tuple([proc(row) for proc in procs])
return proc
class _BundleEntity(_QueryEntity):
def __init__(self, query, bundle, setup_entities=True):
query._entities.append(self)
self.bundle = self.expr = bundle
self.type = type(bundle)
self._label_name = bundle.name
self._entities = []
if setup_entities:
for expr in bundle.exprs:
if isinstance(expr, Bundle):
_BundleEntity(self, expr)
else:
_ColumnEntity(self, expr, namespace=self)
self.entities = ()
self.filter_fn = lambda item: item
self.supports_single_entity = self.bundle.single_entity
@property
def entity_zero(self):
for ent in self._entities:
ezero = ent.entity_zero
if ezero is not None:
return ezero
else:
return None
def corresponds_to(self, entity):
# TODO: this seems to have no effect for
# _ColumnEntity either
return False
@property
def entity_zero_or_selectable(self):
for ent in self._entities:
ezero = ent.entity_zero_or_selectable
if ezero is not None:
return ezero
else:
return None
def adapt_to_selectable(self, query, sel):
c = _BundleEntity(query, self.bundle, setup_entities=False)
# c._label_name = self._label_name
# c.entity_zero = self.entity_zero
# c.entities = self.entities
for ent in self._entities:
ent.adapt_to_selectable(c, sel)
def setup_entity(self, ext_info, aliased_adapter):
for ent in self._entities:
ent.setup_entity(ext_info, aliased_adapter)
def setup_context(self, query, context):
for ent in self._entities:
ent.setup_context(query, context)
def row_processor(self, query, context, result):
procs, labels = zip(
*[ent.row_processor(query, context, result)
for ent in self._entities]
)
proc = self.bundle.create_row_processor(query, procs, labels)
return proc, self._label_name
class _ColumnEntity(_QueryEntity):
"""Column/expression based entity."""
def __init__(self, query, column, namespace=None):
self.expr = column
self.namespace = namespace
if isinstance(column, util.string_types):
column = sql.literal_column(column)
self._label_name = column.name
elif isinstance(column, (
attributes.QueryableAttribute,
interfaces.PropComparator
)):
self._label_name = column.key
column = column._query_clause_element()
else:
self._label_name = getattr(column, 'key', None)
if not isinstance(column, expression.ColumnElement) and \
hasattr(column, '_select_iterable'):
for c in column._select_iterable:
if c is column:
break
_ColumnEntity(query, c, namespace=column)
else:
return
elif isinstance(column, Bundle):
_BundleEntity(query, column)
return
if not isinstance(column, sql.ColumnElement):
raise sa_exc.InvalidRequestError(
"SQL expression, column, or mapped entity "
"expected - got '%r'" % (column, )
)
self.type = type_ = column.type
if type_.hashable:
self.filter_fn = lambda item: item
else:
counter = util.counter()
self.filter_fn = lambda item: counter()
# If the Column is unnamed, give it a
# label() so that mutable column expressions
# can be located in the result even
# if the expression's identity has been changed
# due to adaption.
if not column._label and not getattr(column, 'is_literal', False):
column = column.label(self._label_name)
query._entities.append(self)
self.column = column
self.froms = set()
# look for ORM entities represented within the
# given expression. Try to count only entities
# for columns whose FROM object is in the actual list
# of FROMs for the overall expression - this helps
# subqueries which were built from ORM constructs from
# leaking out their entities into the main select construct
self.actual_froms = actual_froms = set(column._from_objects)
self.entities = util.OrderedSet(
elem._annotations['parententity']
for elem in visitors.iterate(column, {})
if 'parententity' in elem._annotations
and actual_froms.intersection(elem._from_objects)
)
if self.entities:
self.entity_zero = list(self.entities)[0]
elif self.namespace is not None:
self.entity_zero = self.namespace
else:
self.entity_zero = None
supports_single_entity = False
@property
def entity_zero_or_selectable(self):
if self.entity_zero is not None:
return self.entity_zero
elif self.actual_froms:
return list(self.actual_froms)[0]
else:
return None
def adapt_to_selectable(self, query, sel):
c = _ColumnEntity(query, sel.corresponding_column(self.column))
c._label_name = self._label_name
c.entity_zero = self.entity_zero
c.entities = self.entities
def setup_entity(self, ext_info, aliased_adapter):
if 'selectable' not in self.__dict__:
self.selectable = ext_info.selectable
self.froms.add(ext_info.selectable)
def corresponds_to(self, entity):
# TODO: just returning False here,
# no tests fail
if self.entity_zero is None:
return False
elif _is_aliased_class(entity):
# TODO: polymorphic subclasses ?
return entity is self.entity_zero
else:
return not _is_aliased_class(self.entity_zero) and \
entity.common_parent(self.entity_zero)
def _resolve_expr_against_query_aliases(self, query, expr, context):
return query._adapt_clause(expr, False, True)
def row_processor(self, query, context, result):
column = self._resolve_expr_against_query_aliases(
query, self.column, context)
if context.adapter:
column = context.adapter.columns[column]
getter = result._getter(column)
return getter, self._label_name
def setup_context(self, query, context):
column = self._resolve_expr_against_query_aliases(
query, self.column, context)
context.froms += tuple(self.froms)
context.primary_columns.append(column)
def __str__(self):
return str(self.column)
class QueryContext(object):
multi_row_eager_loaders = False
adapter = None
froms = ()
for_update = None
def __init__(self, query):
if query._statement is not None:
if isinstance(query._statement, expression.SelectBase) and \
not query._statement._textual and \
not query._statement.use_labels:
self.statement = query._statement.apply_labels()
else:
self.statement = query._statement
else:
self.statement = None
self.from_clause = query._from_obj
self.whereclause = query._criterion
self.order_by = query._order_by
self.query = query
self.session = query.session
self.populate_existing = query._populate_existing
self.invoke_all_eagers = query._invoke_all_eagers
self.version_check = query._version_check
self.refresh_state = query._refresh_state
self.primary_columns = []
self.secondary_columns = []
self.eager_order_by = []
self.eager_joins = {}
self.create_eager_joins = []
self.propagate_options = set(o for o in query._with_options if
o.propagate_to_loaders)
self.attributes = query._attributes.copy()
class AliasOption(interfaces.MapperOption):
def __init__(self, alias):
"""Return a :class:`.MapperOption` that will indicate to the :class:`.Query`
that the main table has been aliased.
This is a seldom-used option to suit the
very rare case that :func:`.contains_eager`
is being used in conjunction with a user-defined SELECT
statement that aliases the parent table. E.g.::
# define an aliased UNION called 'ulist'
ulist = users.select(users.c.user_id==7).\\
union(users.select(users.c.user_id>7)).\\
alias('ulist')
# add on an eager load of "addresses"
statement = ulist.outerjoin(addresses).\\
select().apply_labels()
# create query, indicating "ulist" will be an
# alias for the main table, "addresses"
# property should be eager loaded
query = session.query(User).options(
contains_alias(ulist),
contains_eager(User.addresses))
# then get results via the statement
results = query.from_statement(statement).all()
:param alias: is the string name of an alias, or a
:class:`~.sql.expression.Alias` object representing
the alias.
"""
self.alias = alias
def process_query(self, query):
if isinstance(self.alias, util.string_types):
alias = query._mapper_zero().mapped_table.alias(self.alias)
else:
alias = self.alias
query._from_obj_alias = sql_util.ColumnAdapter(alias)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.